Compare commits

...

108 Commits

Author SHA1 Message Date
32ced35130 more metadata in reports
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-29 12:27:08 +11:00
ff783fb45a still working on creation/deletion times
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-28 15:19:10 +11:00
49484900ac sql fix
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-28 13:49:41 +11:00
aa6abb8cb2 bugfix hourly totals
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-28 13:27:05 +11:00
1f2783fc86 fix
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-28 13:14:05 +11:00
b9eae50f69 updated snapshots logic
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-28 09:47:51 +11:00
c566456ebd add configuration for monthly aggregation job timing
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-28 09:04:16 +11:00
ee01d8deac improve lifecycle data
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-28 08:49:04 +11:00
93b5769145 improve logging for pro-rata
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-27 21:40:41 +11:00
Nathan Coad
38480e52c0 improve vm deletion detection
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-27 14:20:30 +11:00
Nathan Coad
6981bd9994 even more diagnostics
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-27 11:21:47 +11:00
Nathan Coad
fe96172253 add diagnostic endpoint
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-27 11:02:39 +11:00
Nathan Coad
35b4a50cf6 try to fix pro-rata yet again
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-27 09:09:24 +11:00
73ec80bb6f update monthly aggregation and docs
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-23 15:35:10 +11:00
0d509179aa update daily aggregation to use hourly intervals
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-23 14:33:22 +11:00
e6c7596239 extreme logging
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-23 13:51:03 +11:00
b39865325a more logging
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-23 13:44:50 +11:00
b4a3c0fb3a in depth fix of deletion/creation data
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-23 13:02:58 +11:00
2caf2763f6 improve aggregation
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-23 12:19:28 +11:00
25564efa54 more accurate resource pool data in aggregation reports
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-23 11:59:52 +11:00
871d7c2024 more logging
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-23 11:02:30 +11:00
3671860b7d another fix to aggregation reports
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-23 10:11:14 +11:00
3e2d95d3b9 fix aggregation logic
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-23 09:38:08 +11:00
8a3481b966 fix creationtime in aggregations
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-23 07:29:59 +11:00
13adc159a2 more accurate deletion times in aggregations
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-22 20:50:29 +11:00
c8f04efd51 add more documentation
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-22 20:30:02 +11:00
Nathan Coad
68ee2838e4 fix deletiontime from event
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-22 15:13:40 +11:00
Nathan Coad
b0592a2539 fix daily aggregation sample count
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-22 14:27:27 +11:00
Nathan Coad
baea0cc85c update aggregation calculations
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-22 13:30:53 +11:00
Nathan Coad
ceadf42048 update godoc
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-22 12:52:28 +11:00
Nathan Coad
374d4921e1 update aggregation jobs
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-22 12:04:41 +11:00
Nathan Coad
7dc8f598c3 more logging in daily aggregation
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-22 10:50:03 +11:00
Nathan Coad
148df38219 fix daily aggregation
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-22 10:20:18 +11:00
0a2c529111 code refactor
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-21 14:40:37 +11:00
3cdf368bc4 re-apply minimum snapshot interval
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-21 14:17:40 +11:00
32d4a352dc reduced the places where we probe hourly tables
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-21 11:44:13 +11:00
b77f8671da improve concurrency handling for inventory job
Some checks failed
continuous-integration/drone/push Build encountered an error
2026-01-21 11:21:51 +11:00
715b293894 [CI SKIP] add cache for docker hub images 2026-01-21 10:55:13 +11:00
2483091861 improve logging and concurrent vcenter inventory
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-21 10:25:04 +11:00
00805513c9 fix new-vm detection interval
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-21 09:36:19 +11:00
fd9cc185ce code re-org and bugfix hanging hourly snapshot
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-21 09:12:25 +11:00
c7c7fd3dc9 code cleanup
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-21 08:45:46 +11:00
d683d23bfc use 0 instead of start of aggregation window for creationtime in xlsx
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-20 20:02:33 +11:00
c8bb30c788 better handle skipped inventories
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-20 17:18:43 +11:00
7ea02be91a refactor code and improve daily cache handling of deleted VMs
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-20 16:46:07 +11:00
0517ef88c3 [CI SKIP] bugfixes for vm deletion tracking 2026-01-20 16:33:31 +11:00
a9e522cc84 improve scheduler
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-19 14:04:01 +11:00
e186644db7 add repair functionality
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-17 12:51:11 +11:00
22fa250a43 bugfixes for monthly aggregation
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-17 08:48:18 +11:00
1874b2c621 ensure we logout, fix aggregations
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-16 20:29:40 +11:00
a12fe5cad0 bugfixes
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-16 17:53:24 +11:00
1cd1046433 progress on go based aggregation
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-16 17:37:55 +11:00
6af49471b2 Merge branch 'main' of https://git.coadcorp.com/nathan/vctp2 into dev
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-16 16:56:11 +11:00
b4c52e296c fix aggregation for disk size changes
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone Build is passing
2026-01-16 16:33:39 +11:00
7b7afbf1d5 start work on dev branch [CI SKIP] 2026-01-16 16:28:19 +11:00
0820cbb65e give up auto semver
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-16 16:09:42 +11:00
f171c7f0eb fix ci-cd
Some checks failed
continuous-integration/drone/push Build encountered an error
2026-01-16 16:08:26 +11:00
7c76825813 delay daily aggregation job by 10 minutes
Some checks failed
continuous-integration/drone/push Build encountered an error
2026-01-16 16:07:35 +11:00
9dc94bd405 try auto bumping semver
Some checks failed
continuous-integration/drone/push Build is failing
2026-01-16 16:01:48 +11:00
6ee848edb5 further improve log noise
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-16 15:41:21 +11:00
63794be38d improve log noise
Some checks failed
continuous-integration/drone/push Build is failing
2026-01-16 15:34:06 +11:00
7273961cfc enhance vm trace page
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-16 14:52:15 +11:00
d55916766b derive encryption key from hardware uuid
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-16 14:35:31 +11:00
ab01c0fc4d enhance database logging
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-16 14:28:26 +11:00
588a552e4c fixes for line graph
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-16 12:55:36 +11:00
871904f63e add vcenter totals line graph
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-16 12:36:53 +11:00
268919219e update aggregation logic
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-16 08:32:12 +11:00
f0bacab729 postgres optimisations and daily sqlite vacuum
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-15 19:43:20 +11:00
75a5f31a2f fix hanging manual snapshot task
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-15 17:49:42 +11:00
1b91c73a18 redact vcenter pw from logs
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-15 16:42:01 +11:00
2ea0f937c5 add endpoint to manually trigger inventory collection
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-15 16:37:58 +11:00
e5e5be37a3 handle crashes better
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-15 16:02:58 +11:00
96567f6211 fix aggregation sql
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-15 15:53:39 +11:00
7971098caf enhance deletiontime detection
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-15 15:41:28 +11:00
645a20829f nil deref fix
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-15 15:38:12 +11:00
debac1f684 sort snapshot reports
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-15 15:25:56 +11:00
8dee30ea97 improve tracking of VM deletions
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-15 14:25:51 +11:00
bba308ad28 fix daily aggregation report
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-15 12:38:11 +11:00
3f985dcd4d tidy up forrmatting on pages
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-15 12:11:01 +11:00
0beafb5b00 readme updates [CI SKIP] 2026-01-15 11:51:23 +11:00
ea68331208 add prometheus instrumentation
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-15 11:43:29 +11:00
4d754ee263 add regenerate endpoint
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-15 11:13:00 +11:00
11f7d36bfc fix averages in aggregation jobs
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-15 10:57:27 +11:00
50e9921955 improve aggregations
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-15 09:57:05 +11:00
457d9395f0 refactor aggregate jobs
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-15 09:04:52 +11:00
8b2c8ae85d generate excel worksheets when data is available instead of on-demand
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-15 08:43:31 +11:00
434c7136e9 more optimisation
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-14 21:30:10 +11:00
877b65f10b optimise vcenter collection [CI SKIP] 2026-01-14 21:18:45 +11:00
8df1d145f8 add record size to hourly snapshot page
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-14 19:43:32 +11:00
9be3a3d807 more sql consolidation [CI SKIP] 2026-01-14 18:01:57 +11:00
1fca81a7b3 consolidate raw sql queries [CI_SKIP]
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-14 17:39:48 +11:00
56f021590d work on optimising vcenter queries
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-14 17:00:40 +11:00
44ae2094f3 enhance hourly snapshots
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-14 16:09:13 +11:00
417c7c8127 fix cert generation logic
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-14 15:18:54 +11:00
7fac6e3920 fix legacy references to tftp
Some checks failed
continuous-integration/drone/push Build was killed
2026-01-14 15:16:31 +11:00
98899e306f fix sql migrations
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-14 15:08:59 +11:00
cfc4efee0e improve aggregation logic
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-14 14:10:28 +11:00
b9ab34db0a bugfix reports
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-14 13:51:30 +11:00
013ae4568e fix charts
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-14 11:14:46 +11:00
5c34a9eacd add charts
Some checks failed
continuous-integration/drone/push Build is failing
2026-01-14 11:08:01 +11:00
13af853c45 fixes to index page
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-14 10:30:29 +11:00
5130d37632 ensure we dont collect hourly snapshot too soon after startup
Some checks failed
continuous-integration/drone/push Build was killed
2026-01-14 10:27:24 +11:00
b297b8293c adjustments to reporting
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-14 10:23:25 +11:00
7b600b2359 updates
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-14 10:07:06 +11:00
aa4567d7c1 add regenerate endpoint
Some checks failed
continuous-integration/drone/push Build was killed
2026-01-14 10:06:26 +11:00
ca8b39ba0e improve rollup reporting
Some checks failed
continuous-integration/drone/push Build was killed
2026-01-14 10:03:04 +11:00
7400e08c54 updates
All checks were successful
continuous-integration/drone/push Build is passing
2026-01-14 09:28:30 +11:00
ffe0c01fd7 improvements to inventory processing 2026-01-14 09:28:25 +11:00
93 changed files with 14873 additions and 1552 deletions

View File

@@ -4,7 +4,7 @@ name: default
steps:
- name: restore-cache-with-filesystem
image: meltwater/drone-cache
image: cache.coadcorp.com/meltwater/drone-cache
pull: true
settings:
backend: "filesystem"
@@ -23,7 +23,7 @@ steps:
path: /go
- name: build
image: golang
image: cache.coadcorp.com/library/golang
environment:
CGO_ENABLED: 0
GOMODCACHE: '/drone/src/pkg.mod'
@@ -60,7 +60,7 @@ steps:
- ls -lah ./build/
- name: dell-sftp-deploy
image: hypervtechnics/drone-sftp
image: cache.coadcorp.com/hypervtechnics/drone-sftp
settings:
host: deft.dell.com
username:
@@ -76,7 +76,7 @@ steps:
verbose: true
- name: rebuild-cache-with-filesystem
image: meltwater/drone-cache
image: cache.coadcorp.com/meltwater/drone-cache
pull: true
#when:
# event:

5
.gitignore vendored
View File

@@ -10,6 +10,9 @@
*.dylib
vctp
build/
reports/
reports/*.xlsx
settings.yaml
# Certificates
*.pem
@@ -42,7 +45,7 @@ appengine-generated/
tmp/
pb_data/
# General
# Generalis
.DS_Store
.AppleDouble
.LSOverride

154
README.md
View File

@@ -1,4 +1,125 @@
# Initial setup
# Overview
vCTP is a vSphere Chargeback Tracking Platform, designed for a specific customer, so some decisions may not be applicable for your use case.
## Snapshots and Reports
- Hourly snapshots capture inventory per vCenter (concurrency via `hourly_snapshot_concurrency`).
- Daily summaries aggregate the hourly snapshots for the day; monthly summaries aggregate daily summaries for the month (or hourly snapshots if configured).
- Snapshots are registered in `snapshot_registry` so regeneration via `/api/snapshots/aggregate` can locate the correct tables (fallback scanning is also supported).
- Reports (XLSX with totals/charts) are generated automatically after hourly, daily, and monthly jobs and written to a reports directory.
- Hourly totals in reports are interval-based: each row represents `[HH:00, HH+1:00)` and uses the first snapshot at or after the hour end (including cross-day snapshots) to prorate VM presence by creation/deletion overlap.
- Monthly aggregation reports include a Daily Totals sheet with full-day interval labels (`YYYY-MM-DD to YYYY-MM-DD`) and prorated totals derived from daily summaries.
- Prometheus metrics are exposed at `/metrics`:
- Snapshots/aggregations: `vctp_hourly_snapshots_total`, `vctp_hourly_snapshots_failed_total`, `vctp_hourly_snapshot_last_unix`, `vctp_hourly_snapshot_last_rows`, `vctp_daily_aggregations_total`, `vctp_daily_aggregations_failed_total`, `vctp_daily_aggregation_duration_seconds`, `vctp_monthly_aggregations_total`, `vctp_monthly_aggregations_failed_total`, `vctp_monthly_aggregation_duration_seconds`, `vctp_reports_available`
- vCenter health/perf: `vctp_vcenter_connect_failures_total{vcenter}`, `vctp_vcenter_snapshot_duration_seconds{vcenter}`, `vctp_vcenter_inventory_size{vcenter}`
## Prorating and Aggregation Logic
Daily aggregation runs per VM using sample counts for the day:
- `SamplesPresent`: count of snapshot samples in which the VM appears.
- `TotalSamples`: count of unique snapshot timestamps for the vCenter in the day.
- `AvgIsPresent`: `SamplesPresent / TotalSamples` (0 when `TotalSamples` is 0).
- `AvgVcpuCount`, `AvgRamGB`, `AvgProvisionedDisk` (daily): `sum(values_per_sample) / TotalSamples` to timeweight config changes and prorate partialday VMs.
- `PoolTinPct`, `PoolBronzePct`, `PoolSilverPct`, `PoolGoldPct` (daily): `(pool_hits / SamplesPresent) * 100`, so pool percentages reflect only the time the VM existed.
- `CreationTime`: only set when vCenter provides it; otherwise it remains `0`.
Monthly aggregation builds on daily summaries (or the daily rollup cache):
- For each VM, daily averages are converted to weighted sums: `daily_avg * daily_total_samples`.
- Monthly averages are `sum(weighted_sums) / monthly_total_samples` (per vCenter).
- Pool percentages are weighted the same way: `(daily_pool_pct / 100) * daily_total_samples`, summed, then divided by `monthly_total_samples` and multiplied by 100.
## RPM Layout (summary)
The RPM installs the service and defaults under `/usr/bin`, config under `/etc/dtms`, and data under `/var/lib/vctp`:
- Binary: `/usr/bin/vctp-linux-amd64`
- Systemd unit: `/etc/systemd/system/vctp.service`
- Defaults/env: `/etc/dtms/vctp.yml` (override with `-settings`), `/etc/default/vctp` (environment)
- TLS cert/key: `/etc/dtms/vctp.crt` and `/etc/dtms/vctp.key` (generated if absent)
- Data: SQLite DB and reports default to `/var/lib/vctp` (reports under `/var/lib/vctp/reports`)
- Scripts: preinstall/postinstall handle directory creation and permissions.
# Settings File
Configuration now lives in the YAML settings file. By default the service reads
`/etc/dtms/vctp.yml`, or you can override it with the `-settings` flag.
```shell
vctp -settings /path/to/vctp.yml
```
If you just want to run a single inventory snapshot across all configured vCenters and
exit (no scheduler/server), use:
```shell
vctp -settings /path/to/vctp.yml -run-inventory
```
## Database Configuration
By default the app uses SQLite and creates/opens `db.sqlite3`. You can opt into PostgreSQL
by updating the settings file:
- `settings.database_driver`: `sqlite` (default) or `postgres`
- `settings.database_url`: SQLite file path/DSN or PostgreSQL DSN
Examples:
```yaml
settings:
database_driver: sqlite
database_url: ./db.sqlite3
settings:
database_driver: postgres
database_url: postgres://user:pass@localhost:5432/vctp?sslmode=disable
```
PostgreSQL migrations live in `db/migrations_postgres`, while SQLite migrations remain in
`db/migrations`.
## Snapshot Retention
Hourly and daily snapshot table retention can be configured in the settings file:
- `settings.hourly_snapshot_max_age_days` (default: 60)
- `settings.daily_snapshot_max_age_months` (default: 12)
## Settings Reference
All configuration lives under the top-level `settings:` key in `vctp.yml`.
General:
- `settings.log_level`: logging verbosity (e.g., `debug`, `info`, `warn`, `error`)
- `settings.log_output`: log format, `text` or `json`
Database:
- `settings.database_driver`: `sqlite` or `postgres`
- `settings.database_url`: SQLite file path/DSN or PostgreSQL DSN
HTTP/TLS:
- `settings.bind_ip`: IP address to bind the HTTP server
- `settings.bind_port`: TCP port to bind the HTTP server
- `settings.bind_disable_tls`: `true` to serve plain HTTP (no TLS)
- `settings.tls_cert_filename`: PEM certificate path (TLS mode)
- `settings.tls_key_filename`: PEM private key path (TLS mode)
vCenter:
- `settings.vcenter_username`: vCenter username
- `settings.vcenter_password`: vCenter password (encrypted at startup)
- `settings.vcenter_insecure`: `true` to skip TLS verification
- `settings.vcenter_event_polling_seconds`: event polling interval (0 disables)
- `settings.vcenter_inventory_polling_seconds`: inventory polling interval (0 disables)
- `settings.vcenter_inventory_snapshot_seconds`: hourly snapshot cadence (seconds)
- `settings.vcenter_inventory_aggregate_seconds`: daily aggregation cadence (seconds)
- `settings.vcenter_addresses`: list of vCenter SDK URLs to monitor
Snapshots:
- `settings.hourly_snapshot_concurrency`: max concurrent vCenter snapshots (0 = unlimited)
- `settings.hourly_snapshot_max_age_days`: retention for hourly tables
- `settings.daily_snapshot_max_age_months`: retention for daily tables
- `settings.snapshot_cleanup_cron`: cron expression for cleanup job
- `settings.reports_dir`: directory to store generated XLSX reports (default: `/var/lib/vctp/reports`)
- `settings.hourly_snapshot_retry_seconds`: interval for retrying failed hourly snapshots (default: 300 seconds)
- `settings.hourly_snapshot_max_retries`: maximum retry attempts per vCenter snapshot (default: 3)
Filters/chargeback:
- `settings.tenants_to_filter`: list of tenant name patterns to exclude
- `settings.node_charge_clusters`: list of cluster name patterns for node chargeback
- `settings.srm_activeactive_vms`: list of SRM Active/Active VM name patterns
# Developer setup
## Pre-requisite tools
@@ -28,27 +149,10 @@ Run `templ generate -path ./components` to generate code based on template files
## Documentation
Run `swag init --exclude "pkg.mod,pkg.build,pkg.tools" -o server/router/docs`
#### Database Configuration
By default the app uses SQLite and creates/opens `db.sqlite3`. You can opt into PostgreSQL
by setting environment variables:
- `DB_DRIVER`: `sqlite` (default) or `postgres`
- `DB_URL`: SQLite file path/DSN or PostgreSQL DSN
Examples:
```shell
# SQLite (default)
DB_DRIVER=sqlite DB_URL=./db.sqlite3
# PostgreSQL
DB_DRIVER=postgres DB_URL=postgres://user:pass@localhost:5432/vctp?sslmode=disable
```
PostgreSQL migrations live in `db/migrations_postgres`, while SQLite migrations remain in
`db/migrations`.
#### Snapshot Retention
Hourly and daily snapshot table retention can be configured with environment variables:
- `HOURLY_SNAPSHOT_MAX_AGE_DAYS` (default: 60)
- `DAILY_SNAPSHOT_MAX_AGE_MONTHS` (default: 12)
## CI/CD (Drone)
- `.drone.yml` defines a Docker pipeline:
- Restore/build caches for Go modules/tools.
- Build step installs generators (`templ`, `sqlc`, `swag`), regenerates code/docs, runs project scripts, and produces the `vctp-linux-amd64` binary.
- RPM step packages via `nfpm` using `vctp.yml`, emits RPMs into `./build/`.
- Optional SFTP deploy step uploads build artifacts (e.g., `vctp*`) to a remote host.
- Cache rebuild step preserves Go caches across runs.

1
components/core/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
*.go

View File

@@ -8,80 +8,11 @@ templ Header() {
<meta name="viewport" content="width=device-width, initial-scale=1.0"/>
<meta name="description" content="vCTP API endpoint"/>
<title>vCTP API</title>
<link rel="icon" href="/favicon.ico"/>
<link rel="icon" type="image/png" sizes="16x16" href="/favicon-16x16.png"/>
<link rel="icon" type="image/png" sizes="32x32" href="/favicon-32x32.png"/>
<script src="/assets/js/htmx@v2.0.2.min.js"></script>
<link href={ "/assets/css/output@" + version.Value + ".css" } rel="stylesheet"/>
<style>
:root {
--web2-blue: #3b82f6;
--web2-cyan: #22d3ee;
--web2-slate: #0f172a;
--web2-card: #ffffff;
--web2-shadow: 0 20px 40px rgba(15, 23, 42, 0.15);
--web2-soft-shadow: 0 8px 20px rgba(15, 23, 42, 0.12);
}
body {
font-family: "Trebuchet MS", "Lucida Grande", "Verdana", sans-serif;
color: var(--web2-slate);
}
.web2-bg {
background: radial-gradient(circle at top left, #e0f2fe 0%, #f8fafc 45%, #e2e8f0 100%);
}
.web2-shell {
max-width: 1100px;
margin: 0 auto;
padding: 2rem 1.5rem 4rem;
}
.web2-header {
background: linear-gradient(135deg, #0ea5e9 0%, #6366f1 100%);
color: #fff;
border-radius: 22px;
box-shadow: var(--web2-shadow);
padding: 1.5rem 2rem;
}
.web2-card {
background: var(--web2-card);
border-radius: 18px;
box-shadow: var(--web2-soft-shadow);
padding: 1.5rem 1.75rem;
}
.web2-pill {
display: inline-flex;
align-items: center;
gap: 0.4rem;
background: rgba(255, 255, 255, 0.2);
border: 1px solid rgba(255, 255, 255, 0.35);
padding: 0.35rem 0.8rem;
border-radius: 999px;
font-size: 0.85rem;
letter-spacing: 0.02em;
backdrop-filter: blur(6px);
}
.web2-link {
color: var(--web2-blue);
text-decoration: none;
font-weight: 600;
}
.web2-link:hover {
text-decoration: underline;
}
.web2-button {
background: linear-gradient(180deg, #60a5fa 0%, #2563eb 100%);
color: #fff;
padding: 0.5rem 1rem;
border-radius: 999px;
box-shadow: 0 6px 16px rgba(37, 99, 235, 0.35);
font-weight: 600;
text-decoration: none;
}
.web2-button:hover {
filter: brightness(1.05);
}
.web2-list li {
background: rgba(255, 255, 255, 0.9);
border-radius: 14px;
padding: 0.85rem 1.1rem;
box-shadow: 0 6px 16px rgba(15, 23, 42, 0.08);
}
</style>
<link href="/assets/css/web3.css" rel="stylesheet"/>
</head>
}

View File

@@ -31,20 +31,20 @@ func Header() templ.Component {
templ_7745c5c3_Var1 = templ.NopComponent
}
ctx = templ.ClearChildren(ctx)
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 1, "<head><meta charset=\"UTF-8\"><meta name=\"viewport\" content=\"width=device-width, initial-scale=1.0\"><meta name=\"description\" content=\"vCTP API endpoint\"><title>vCTP API</title><script src=\"/assets/js/htmx@v2.0.2.min.js\"></script><link href=\"")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 1, "<head><meta charset=\"UTF-8\"><meta name=\"viewport\" content=\"width=device-width, initial-scale=1.0\"><meta name=\"description\" content=\"vCTP API endpoint\"><title>vCTP API</title><link rel=\"icon\" href=\"/favicon.ico\"><link rel=\"icon\" type=\"image/png\" sizes=\"16x16\" href=\"/favicon-16x16.png\"><link rel=\"icon\" type=\"image/png\" sizes=\"32x32\" href=\"/favicon-32x32.png\"><script src=\"/assets/js/htmx@v2.0.2.min.js\"></script><link href=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var2 templ.SafeURL
templ_7745c5c3_Var2, templ_7745c5c3_Err = templ.JoinURLErrs("/assets/css/output@" + version.Value + ".css")
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `components/core/header.templ`, Line: 12, Col: 61}
return templ.Error{Err: templ_7745c5c3_Err, FileName: `core/header.templ`, Line: 15, Col: 61}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var2))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 2, "\" rel=\"stylesheet\"><style>\n\t\t\t:root {\n\t\t\t\t--web2-blue: #3b82f6;\n\t\t\t\t--web2-cyan: #22d3ee;\n\t\t\t\t--web2-slate: #0f172a;\n\t\t\t\t--web2-card: #ffffff;\n\t\t\t\t--web2-shadow: 0 20px 40px rgba(15, 23, 42, 0.15);\n\t\t\t\t--web2-soft-shadow: 0 8px 20px rgba(15, 23, 42, 0.12);\n\t\t\t}\n\t\t\tbody {\n\t\t\t\tfont-family: \"Trebuchet MS\", \"Lucida Grande\", \"Verdana\", sans-serif;\n\t\t\t\tcolor: var(--web2-slate);\n\t\t\t}\n\t\t\t.web2-bg {\n\t\t\t\tbackground: radial-gradient(circle at top left, #e0f2fe 0%, #f8fafc 45%, #e2e8f0 100%);\n\t\t\t}\n\t\t\t.web2-shell {\n\t\t\t\tmax-width: 1100px;\n\t\t\t\tmargin: 0 auto;\n\t\t\t\tpadding: 2rem 1.5rem 4rem;\n\t\t\t}\n\t\t\t.web2-header {\n\t\t\t\tbackground: linear-gradient(135deg, #0ea5e9 0%, #6366f1 100%);\n\t\t\t\tcolor: #fff;\n\t\t\t\tborder-radius: 22px;\n\t\t\t\tbox-shadow: var(--web2-shadow);\n\t\t\t\tpadding: 1.5rem 2rem;\n\t\t\t}\n\t\t\t.web2-card {\n\t\t\t\tbackground: var(--web2-card);\n\t\t\t\tborder-radius: 18px;\n\t\t\t\tbox-shadow: var(--web2-soft-shadow);\n\t\t\t\tpadding: 1.5rem 1.75rem;\n\t\t\t}\n\t\t\t.web2-pill {\n\t\t\t\tdisplay: inline-flex;\n\t\t\t\talign-items: center;\n\t\t\t\tgap: 0.4rem;\n\t\t\t\tbackground: rgba(255, 255, 255, 0.2);\n\t\t\t\tborder: 1px solid rgba(255, 255, 255, 0.35);\n\t\t\t\tpadding: 0.35rem 0.8rem;\n\t\t\t\tborder-radius: 999px;\n\t\t\t\tfont-size: 0.85rem;\n\t\t\t\tletter-spacing: 0.02em;\n\t\t\t\tbackdrop-filter: blur(6px);\n\t\t\t}\n\t\t\t.web2-link {\n\t\t\t\tcolor: var(--web2-blue);\n\t\t\t\ttext-decoration: none;\n\t\t\t\tfont-weight: 600;\n\t\t\t}\n\t\t\t.web2-link:hover {\n\t\t\t\ttext-decoration: underline;\n\t\t\t}\n\t\t\t.web2-button {\n\t\t\t\tbackground: linear-gradient(180deg, #60a5fa 0%, #2563eb 100%);\n\t\t\t\tcolor: #fff;\n\t\t\t\tpadding: 0.5rem 1rem;\n\t\t\t\tborder-radius: 999px;\n\t\t\t\tbox-shadow: 0 6px 16px rgba(37, 99, 235, 0.35);\n\t\t\t\tfont-weight: 600;\n\t\t\t\ttext-decoration: none;\n\t\t\t}\n\t\t\t.web2-button:hover {\n\t\t\t\tfilter: brightness(1.05);\n\t\t\t}\n\t\t\t.web2-list li {\n\t\t\t\tbackground: rgba(255, 255, 255, 0.9);\n\t\t\t\tborder-radius: 14px;\n\t\t\t\tpadding: 0.85rem 1.1rem;\n\t\t\t\tbox-shadow: 0 6px 16px rgba(15, 23, 42, 0.08);\n\t\t\t}\n\t\t</style></head>")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 2, "\" rel=\"stylesheet\"><link href=\"/assets/css/web3.css\" rel=\"stylesheet\"></head>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}

1
components/views/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
*.go

View File

@@ -20,13 +20,15 @@ templ Index(info BuildInfo) {
<div class="flex flex-col gap-4 md:flex-row md:items-center md:justify-between">
<div>
<div class="web2-pill">vCTP Console</div>
<h1 class="mt-3 text-4xl font-bold">Build Intelligence Dashboard</h1>
<p class="mt-2 text-sm opacity-90">A glossy, snapshot-ready view of what is running.</p>
<h1 class="mt-3 text-4xl font-bold">Chargeback Intelligence Dashboard</h1>
<p class="mt-2 text-sm text-slate-600">Point in time snapshots of consumption.</p>
</div>
<div class="flex flex-wrap gap-3">
<div class="web2-button-group">
<a class="web2-button" href="/snapshots/hourly">Hourly Snapshots</a>
<a class="web2-button" href="/snapshots/daily">Daily Snapshots</a>
<a class="web2-button" href="/snapshots/monthly">Monthly Snapshots</a>
<a class="web2-button" href="/vm/trace">VM Trace</a>
<a class="web2-button" href="/vcenters">vCenters</a>
<a class="web2-button" href="/swagger/">Swagger UI</a>
</div>
</div>
@@ -46,6 +48,37 @@ templ Index(info BuildInfo) {
<p class="mt-3 text-xl font-semibold">{info.GoVersion}</p>
</div>
</section>
<section class="grid gap-6 lg:grid-cols-3">
<div class="web2-card">
<h2 class="text-lg font-semibold mb-2">Overview</h2>
<p class="mt-2 text-sm text-slate-600">
vCTP is a vSphere Chargeback Tracking Platform.
</p>
</div>
<div class="web2-card">
<h2 class="text-lg font-semibold mb-2">Snapshots and Reports</h2>
<div class="mt-3 text-sm text-slate-600 web2-paragraphs">
<p>Hourly snapshots capture inventory per vCenter (concurrency via <code class="web2-code">hourly_snapshot_concurrency</code>).</p>
<p>Daily summaries aggregate the hourly snapshots for the day; monthly summaries aggregate daily summaries for the month (or hourly snapshots if configured).</p>
<p>Snapshots are registered in <code class="web2-code">snapshot_registry</code> so regeneration via <code class="web2-code">/api/snapshots/aggregate</code> can locate the correct tables (fallback scanning is also supported).</p>
<p>Reports (XLSX with totals/charts) are generated automatically after hourly, daily, and monthly jobs and written to a reports directory.</p>
<p>Hourly totals are interval-based: each row represents <code class="web2-code">[HH:00, HH+1:00)</code> and uses the first snapshot at or after the hour end (including cross-day snapshots) to prorate VM presence.</p>
<p>Monthly aggregation reports include a Daily Totals sheet with full-day interval labels (YYYY-MM-DD to YYYY-MM-DD) and prorated totals.</p>
</div>
</div>
<div class="web2-card">
<h2 class="text-lg font-semibold mb-2">Prorating and Aggregation</h2>
<div class="mt-3 space-y-2 text-sm text-slate-600 web2-paragraphs">
<p>SamplesPresent is the count of snapshots in which the VM appears; TotalSamples is the count of unique snapshot times for the vCenter.</p>
<p>AvgIsPresent = SamplesPresent / TotalSamples (0 when TotalSamples is 0).</p>
<p>Daily AvgVcpuCount/AvgRamGB/AvgProvisionedDisk = sum of per-sample values divided by TotalSamples (time-weighted).</p>
<p>Daily pool percentages use pool hits divided by SamplesPresent, so they reflect only the time the VM existed.</p>
<p>Monthly aggregation weights daily averages by daily total samples, then divides by monthly total samples.</p>
<p>CreationTime is only set when vCenter provides it; otherwise it remains 0.</p>
</div>
</div>
</section>
</main>
</body>
@core.Footer()

View File

@@ -47,14 +47,14 @@ func Index(info BuildInfo) templ.Component {
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 2, "<body class=\"flex flex-col min-h-screen web2-bg\"><main class=\"flex-grow web2-shell space-y-8\"><section class=\"web2-header\"><div class=\"flex flex-col gap-4 md:flex-row md:items-center md:justify-between\"><div><div class=\"web2-pill\">vCTP Console</div><h1 class=\"mt-3 text-4xl font-bold\">Build Intelligence Dashboard</h1><p class=\"mt-2 text-sm opacity-90\">A glossy, snapshot-ready view of what is running.</p></div><div class=\"flex flex-wrap gap-3\"><a class=\"web2-button\" href=\"/snapshots/hourly\">Hourly Snapshots</a> <a class=\"web2-button\" href=\"/snapshots/daily\">Daily Snapshots</a> <a class=\"web2-button\" href=\"/snapshots/monthly\">Monthly Snapshots</a> <a class=\"web2-button\" href=\"/swagger/\">Swagger UI</a></div></div></section><section class=\"grid gap-6 md:grid-cols-3\"><div class=\"web2-card\"><p class=\"text-xs uppercase tracking-[0.2em] text-slate-400\">Build Time</p><p class=\"mt-3 text-xl font-semibold\">")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 2, "<body class=\"flex flex-col min-h-screen web2-bg\"><main class=\"flex-grow web2-shell space-y-8\"><section class=\"web2-header\"><div class=\"flex flex-col gap-4 md:flex-row md:items-center md:justify-between\"><div><div class=\"web2-pill\">vCTP Console</div><h1 class=\"mt-3 text-4xl font-bold\">Chargeback Intelligence Dashboard</h1><p class=\"mt-2 text-sm text-slate-600\">Point in time snapshots of consumption.</p></div><div class=\"web2-button-group\"><a class=\"web2-button\" href=\"/snapshots/hourly\">Hourly Snapshots</a> <a class=\"web2-button\" href=\"/snapshots/daily\">Daily Snapshots</a> <a class=\"web2-button\" href=\"/snapshots/monthly\">Monthly Snapshots</a> <a class=\"web2-button\" href=\"/vm/trace\">VM Trace</a> <a class=\"web2-button\" href=\"/vcenters\">vCenters</a> <a class=\"web2-button\" href=\"/swagger/\">Swagger UI</a></div></div></section><section class=\"grid gap-6 md:grid-cols-3\"><div class=\"web2-card\"><p class=\"text-xs uppercase tracking-[0.2em] text-slate-400\">Build Time</p><p class=\"mt-3 text-xl font-semibold\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var2 string
templ_7745c5c3_Var2, templ_7745c5c3_Err = templ.JoinStringErrs(info.BuildTime)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `components/views/index.templ`, Line: 38, Col: 59}
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/index.templ`, Line: 40, Col: 59}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var2))
if templ_7745c5c3_Err != nil {
@@ -67,7 +67,7 @@ func Index(info BuildInfo) templ.Component {
var templ_7745c5c3_Var3 string
templ_7745c5c3_Var3, templ_7745c5c3_Err = templ.JoinStringErrs(info.SHA1Ver)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `components/views/index.templ`, Line: 42, Col: 57}
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/index.templ`, Line: 44, Col: 57}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var3))
if templ_7745c5c3_Err != nil {
@@ -80,13 +80,13 @@ func Index(info BuildInfo) templ.Component {
var templ_7745c5c3_Var4 string
templ_7745c5c3_Var4, templ_7745c5c3_Err = templ.JoinStringErrs(info.GoVersion)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `components/views/index.templ`, Line: 46, Col: 59}
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/index.templ`, Line: 48, Col: 59}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var4))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 5, "</p></div></section></main></body>")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 5, "</p></div></section><section class=\"grid gap-6 lg:grid-cols-3\"><div class=\"web2-card\"><h2 class=\"text-lg font-semibold mb-2\">Overview</h2><p class=\"mt-2 text-sm text-slate-600\">vCTP is a vSphere Chargeback Tracking Platform.</p></div><div class=\"web2-card\"><h2 class=\"text-lg font-semibold mb-2\">Snapshots and Reports</h2><div class=\"mt-3 text-sm text-slate-600 web2-paragraphs\"><p>Hourly snapshots capture inventory per vCenter (concurrency via <code class=\"web2-code\">hourly_snapshot_concurrency</code>).</p><p>Daily summaries aggregate the hourly snapshots for the day; monthly summaries aggregate daily summaries for the month (or hourly snapshots if configured).</p><p>Snapshots are registered in <code class=\"web2-code\">snapshot_registry</code> so regeneration via <code class=\"web2-code\">/api/snapshots/aggregate</code> can locate the correct tables (fallback scanning is also supported).</p><p>Reports (XLSX with totals/charts) are generated automatically after hourly, daily, and monthly jobs and written to a reports directory.</p><p>Hourly totals are interval-based: each row represents <code class=\"web2-code\">[HH:00, HH+1:00)</code> and uses the first snapshot at or after the hour end (including cross-day snapshots) to prorate VM presence.</p><p>Monthly aggregation reports include a Daily Totals sheet with full-day interval labels (YYYY-MM-DD to YYYY-MM-DD) and prorated totals.</p></div></div><div class=\"web2-card\"><h2 class=\"text-lg font-semibold mb-2\">Prorating and Aggregation</h2><div class=\"mt-3 space-y-2 text-sm text-slate-600 web2-paragraphs\"><p>SamplesPresent is the count of snapshots in which the VM appears; TotalSamples is the count of unique snapshot times for the vCenter.</p><p>AvgIsPresent = SamplesPresent / TotalSamples (0 when TotalSamples is 0).</p><p>Daily AvgVcpuCount/AvgRamGB/AvgProvisionedDisk = sum of per-sample values divided by TotalSamples (time-weighted).</p><p>Daily pool percentages use pool hits divided by SamplesPresent, so they reflect only the time the VM existed.</p><p>Monthly aggregation weights daily averages by daily total samples, then divides by monthly total samples.</p><p>CreationTime is only set when vCenter provides it; otherwise it remains 0.</p></div></div></section></main></body>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}

View File

@@ -1,12 +1,56 @@
package views
import (
"fmt"
"vctp/components/core"
)
type SnapshotEntry struct {
Label string
Link string
Count int64
Group string
}
type VcenterLink struct {
Name string
Link string
}
type VcenterTotalsEntry struct {
Snapshot string
RawTime int64
VmCount int64
VcpuTotal int64
RamTotalGB int64
}
type VcenterTotalsMeta struct {
ViewType string
TypeLabel string
HourlyLink string
DailyLink string
MonthlyLink string
HourlyClass string
DailyClass string
MonthlyClass string
}
type VcenterChartData struct {
PointsVm string
PointsVcpu string
PointsRam string
Width int
Height int
GridX []float64
GridY []float64
YTicks []ChartTick
XTicks []ChartTick
}
type ChartTick struct {
Pos float64
Label string
}
templ SnapshotHourlyList(entries []SnapshotEntry) {
@@ -32,25 +76,207 @@ templ SnapshotListPage(title string, subtitle string, entries []SnapshotEntry) {
<div>
<div class="web2-pill">Snapshot Library</div>
<h1 class="mt-3 text-4xl font-bold">{title}</h1>
<p class="mt-2 text-sm opacity-90">{subtitle}</p>
<p class="mt-2 text-sm text-slate-600">{subtitle}</p>
</div>
<a class="web2-button" href="/">Back to Dashboard</a>
</div>
</section>
<section class="web2-card">
<div class="flex items-center justify-between">
<div class="flex items-center justify-between gap-3 mb-4 flex-wrap">
<h2 class="text-lg font-semibold">Available Exports</h2>
<span class="text-xs uppercase tracking-[0.2em] text-slate-400">{len(entries)} files</span>
<span class="web2-badge">{len(entries)} files</span>
</div>
<div class="overflow-hidden border border-slate-200 rounded">
<table class="web2-table">
<thead>
<tr>
<th>Snapshot</th>
<th>Records</th>
<th class="text-right">Download</th>
</tr>
</thead>
<tbody>
for i, entry := range entries {
if entry.Group != "" && (i == 0 || entries[i-1].Group != entry.Group) {
<tr class="web2-group-row">
<td colspan="3" class="font-semibold text-slate-700">{entry.Group}</td>
</tr>
}
<tr>
<td>
<div class="flex flex-col">
<span class="text-sm font-semibold text-slate-700">{entry.Label}</span>
</div>
</td>
<td>
<span class="web2-badge">{entry.Count} records</span>
</td>
<td class="text-right">
<a class="web2-link" href={entry.Link}>Download XLSX</a>
</td>
</tr>
}
</tbody>
</table>
</div>
</section>
</main>
</body>
@core.Footer()
</html>
}
templ VcenterList(links []VcenterLink) {
<!DOCTYPE html>
<html lang="en">
@core.Header()
<body class="flex flex-col min-h-screen web2-bg">
<main class="flex-grow web2-shell space-y-8">
<section class="web2-header">
<div class="flex flex-col gap-4 md:flex-row md:items-center md:justify-between">
<div>
<div class="web2-pill">vCenter Inventory</div>
<h1 class="mt-3 text-4xl font-bold">Monitored vCenters</h1>
<p class="mt-2 text-sm text-slate-600">Select a vCenter to view snapshot totals over time.</p>
</div>
<a class="web2-button" href="/">Back to Dashboard</a>
</div>
</section>
<section class="web2-card">
<div class="flex items-center justify-between gap-3 mb-4 flex-wrap">
<h2 class="text-lg font-semibold">vCenters</h2>
<span class="web2-badge">{len(links)} total</span>
</div>
<div class="overflow-hidden border border-slate-200 rounded">
<table class="web2-table">
<thead>
<tr>
<th>vCenter</th>
<th class="text-right">Totals</th>
</tr>
</thead>
<tbody>
for _, link := range links {
<tr>
<td class="font-semibold text-slate-700">{link.Name}</td>
<td class="text-right">
<a class="web2-link" href={link.Link}>View Totals</a>
</td>
</tr>
}
</tbody>
</table>
</div>
</section>
</main>
</body>
@core.Footer()
</html>
}
templ VcenterTotalsPage(vcenter string, entries []VcenterTotalsEntry, chart VcenterChartData, meta VcenterTotalsMeta) {
<!DOCTYPE html>
<html lang="en">
@core.Header()
<body class="flex flex-col min-h-screen web2-bg">
<main class="flex-grow web2-shell space-y-8 max-w-screen-2xl mx-auto" style="max-width: 1400px;">
<section class="web2-header">
<div class="flex flex-col gap-4 md:flex-row md:items-center md:justify-between">
<div>
<div class="web2-pill">vCenter Totals</div>
<h1 class="mt-3 text-4xl font-bold">Totals for {vcenter}</h1>
<p class="mt-2 text-sm text-slate-600">{meta.TypeLabel} snapshots of VM count, vCPU, and RAM over time.</p>
</div>
<div class="flex gap-3">
<a class="web2-button secondary" href="/vcenters">All vCenters</a>
<a class="web2-button" href="/">Dashboard</a>
</div>
</div>
<div class="web3-button-group mt-8 mb-3">
<a class={meta.HourlyClass} href={meta.HourlyLink}>Hourly</a>
<a class={meta.DailyClass} href={meta.DailyLink}>Daily</a>
<a class={meta.MonthlyClass} href={meta.MonthlyLink}>Monthly</a>
</div>
</section>
<section class="web2-card">
<div class="flex items-center justify-between gap-3 mb-4 flex-wrap">
<h2 class="text-lg font-semibold">{meta.TypeLabel} Snapshots</h2>
<span class="web2-badge">{len(entries)} records</span>
</div>
if chart.PointsVm != "" {
<div class="mb-6 overflow-auto">
<svg width="100%" height={fmt.Sprintf("%d", chart.Height+80)} viewBox={"0 0 " + fmt.Sprintf("%d", chart.Width) + " " + fmt.Sprintf("%d", chart.Height+70)} role="img" aria-label="Totals over time">
<defs>
<linearGradient id="grid" x1="0" y1="0" x2="0" y2="1">
<stop offset="0%" stop-color="#e2e8f0" stop-opacity="0.6"></stop>
</linearGradient>
</defs>
<rect x="40" y="10" width={fmt.Sprintf("%d", chart.Width-60)} height={fmt.Sprintf("%d", chart.Height)} fill="white" stroke="#e2e8f0"></rect>
<!-- grid lines -->
<g stroke="#e2e8f0" stroke-width="1" stroke-dasharray="2,4">
for _, y := range chart.GridY {
<line x1="40" y1={fmt.Sprintf("%.1f", y)} x2={fmt.Sprintf("%d", chart.Width-20)} y2={fmt.Sprintf("%.1f", y)} />
}
for _, x := range chart.GridX {
<line x1={fmt.Sprintf("%.1f", x)} y1="10" x2={fmt.Sprintf("%.1f", x)} y2={fmt.Sprintf("%d", chart.Height+10)} />
}
</g>
<!-- axes -->
<line x1="40" y1={fmt.Sprintf("%d", chart.Height+10)} x2={fmt.Sprintf("%d", chart.Width-20)} y2={fmt.Sprintf("%d", chart.Height+10)} stroke="#94a3b8" stroke-width="1.5"></line>
<line x1="40" y1="10" x2="40" y2={fmt.Sprintf("%d", chart.Height+10)} stroke="#94a3b8" stroke-width="1.5"></line>
<!-- data -->
<polyline points={chart.PointsVm} fill="none" stroke="#2563eb" stroke-width="2.5"></polyline>
<polyline points={chart.PointsVcpu} fill="none" stroke="#16a34a" stroke-width="2.5"></polyline>
<polyline points={chart.PointsRam} fill="none" stroke="#ea580c" stroke-width="2.5"></polyline>
<!-- tick labels -->
<g font-size="10" fill="#475569" text-anchor="end">
for _, tick := range chart.YTicks {
<text x="36" y={fmt.Sprintf("%.1f", tick.Pos+3)}>{tick.Label}</text>
}
</g>
<g font-size="10" fill="#475569" text-anchor="middle">
for _, tick := range chart.XTicks {
<text x={fmt.Sprintf("%.1f", tick.Pos)} y={fmt.Sprintf("%d", chart.Height+24)}>{tick.Label}</text>
}
</g>
<!-- legend -->
<g font-size="12" fill="#475569" transform={"translate(40 " + fmt.Sprintf("%d", chart.Height+54) + ")"}>
<rect x="0" y="0" width="14" height="8" fill="#2563eb"></rect><text x="22" y="12">VMs</text>
<rect x="90" y="0" width="14" height="8" fill="#16a34a"></rect><text x="112" y="12">vCPU</text>
<rect x="180" y="0" width="14" height="8" fill="#ea580c"></rect><text x="202" y="12">RAM (GB)</text>
</g>
<!-- axis labels -->
<text x="15" y="20" transform={"rotate(-90 15 20)"} font-size="12" fill="#475569">Totals</text>
<text x={fmt.Sprintf("%d", chart.Width/2)} y={fmt.Sprintf("%d", chart.Height+70)} font-size="12" fill="#475569">Snapshot sequence (newest right)</text>
</svg>
</div>
}
<div class="overflow-hidden border border-slate-200 rounded">
<table class="web2-table">
<thead>
<tr>
<th>Snapshot Time</th>
<th class="text-right">VMs</th>
<th class="text-right">vCPUs</th>
<th class="text-right">RAM (GB)</th>
</tr>
</thead>
<tbody>
for _, entry := range entries {
<tr>
<td>{entry.Snapshot}</td>
<td class="text-right">{entry.VmCount}</td>
<td class="text-right">{entry.VcpuTotal}</td>
<td class="text-right">{entry.RamTotalGB}</td>
</tr>
}
</tbody>
</table>
</div>
<ul class="mt-6 space-y-3 web2-list">
for _, entry := range entries {
<li class="flex items-center justify-between gap-4">
<span class="text-sm font-semibold text-slate-700">{entry.Label}</span>
<a class="web2-link" href={entry.Link}>Download XLSX</a>
</li>
}
</ul>
</section>
</main>
</body>

View File

@@ -9,12 +9,56 @@ import "github.com/a-h/templ"
import templruntime "github.com/a-h/templ/runtime"
import (
"fmt"
"vctp/components/core"
)
type SnapshotEntry struct {
Label string
Link string
Count int64
Group string
}
type VcenterLink struct {
Name string
Link string
}
type VcenterTotalsEntry struct {
Snapshot string
RawTime int64
VmCount int64
VcpuTotal int64
RamTotalGB int64
}
type VcenterTotalsMeta struct {
ViewType string
TypeLabel string
HourlyLink string
DailyLink string
MonthlyLink string
HourlyClass string
DailyClass string
MonthlyClass string
}
type VcenterChartData struct {
PointsVm string
PointsVcpu string
PointsRam string
Width int
Height int
GridX []float64
GridY []float64
YTicks []ChartTick
XTicks []ChartTick
}
type ChartTick struct {
Pos float64
Label string
}
func SnapshotHourlyList(entries []SnapshotEntry) templ.Component {
@@ -140,75 +184,107 @@ func SnapshotListPage(title string, subtitle string, entries []SnapshotEntry) te
var templ_7745c5c3_Var5 string
templ_7745c5c3_Var5, templ_7745c5c3_Err = templ.JoinStringErrs(title)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `components/views/snapshots.templ`, Line: 34, Col: 49}
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 78, Col: 49}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var5))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 3, "</h1><p class=\"mt-2 text-sm opacity-90\">")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 3, "</h1><p class=\"mt-2 text-sm text-slate-600\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var6 string
templ_7745c5c3_Var6, templ_7745c5c3_Err = templ.JoinStringErrs(subtitle)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `components/views/snapshots.templ`, Line: 35, Col: 51}
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 79, Col: 55}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var6))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 4, "</p></div><a class=\"web2-button\" href=\"/\">Back to Dashboard</a></div></section><section class=\"web2-card\"><div class=\"flex items-center justify-between\"><h2 class=\"text-lg font-semibold\">Available Exports</h2><span class=\"text-xs uppercase tracking-[0.2em] text-slate-400\">")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 4, "</p></div><a class=\"web2-button\" href=\"/\">Back to Dashboard</a></div></section><section class=\"web2-card\"><div class=\"flex items-center justify-between gap-3 mb-4 flex-wrap\"><h2 class=\"text-lg font-semibold\">Available Exports</h2><span class=\"web2-badge\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var7 string
templ_7745c5c3_Var7, templ_7745c5c3_Err = templ.JoinStringErrs(len(entries))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `components/views/snapshots.templ`, Line: 44, Col: 83}
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 88, Col: 44}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var7))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 5, " files</span></div><ul class=\"mt-6 space-y-3 web2-list\">")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 5, " files</span></div><div class=\"overflow-hidden border border-slate-200 rounded\"><table class=\"web2-table\"><thead><tr><th>Snapshot</th><th>Records</th><th class=\"text-right\">Download</th></tr></thead> <tbody>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
for _, entry := range entries {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 6, "<li class=\"flex items-center justify-between gap-4\"><span class=\"text-sm font-semibold text-slate-700\">")
for i, entry := range entries {
if entry.Group != "" && (i == 0 || entries[i-1].Group != entry.Group) {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 6, "<tr class=\"web2-group-row\"><td colspan=\"3\" class=\"font-semibold text-slate-700\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var8 string
templ_7745c5c3_Var8, templ_7745c5c3_Err = templ.JoinStringErrs(entry.Group)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 103, Col: 76}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var8))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 7, "</td></tr>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 8, " <tr><td><div class=\"flex flex-col\"><span class=\"text-sm font-semibold text-slate-700\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var8 string
templ_7745c5c3_Var8, templ_7745c5c3_Err = templ.JoinStringErrs(entry.Label)
var templ_7745c5c3_Var9 string
templ_7745c5c3_Var9, templ_7745c5c3_Err = templ.JoinStringErrs(entry.Label)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `components/views/snapshots.templ`, Line: 49, Col: 71}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var8))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 7, "</span> <a class=\"web2-link\" href=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var9 templ.SafeURL
templ_7745c5c3_Var9, templ_7745c5c3_Err = templ.JoinURLErrs(entry.Link)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `components/views/snapshots.templ`, Line: 50, Col: 45}
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 109, Col: 75}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var9))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 8, "\">Download XLSX</a></li>")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 9, "</span></div></td><td><span class=\"web2-badge\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var10 string
templ_7745c5c3_Var10, templ_7745c5c3_Err = templ.JoinStringErrs(entry.Count)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 113, Col: 48}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var10))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 10, " records</span></td><td class=\"text-right\"><a class=\"web2-link\" href=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var11 templ.SafeURL
templ_7745c5c3_Var11, templ_7745c5c3_Err = templ.JoinURLErrs(entry.Link)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 116, Col: 48}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var11))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 11, "\">Download XLSX</a></td></tr>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 9, "</ul></section></main></body>")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 12, "</tbody></table></div></section></main></body>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
@@ -216,7 +292,749 @@ func SnapshotListPage(title string, subtitle string, entries []SnapshotEntry) te
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 10, "</html>")
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 13, "</html>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
return nil
})
}
func VcenterList(links []VcenterLink) templ.Component {
return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) {
templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context
if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil {
return templ_7745c5c3_CtxErr
}
templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W)
if !templ_7745c5c3_IsBuffer {
defer func() {
templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer)
if templ_7745c5c3_Err == nil {
templ_7745c5c3_Err = templ_7745c5c3_BufErr
}
}()
}
ctx = templ.InitializeContext(ctx)
templ_7745c5c3_Var12 := templ.GetChildren(ctx)
if templ_7745c5c3_Var12 == nil {
templ_7745c5c3_Var12 = templ.NopComponent
}
ctx = templ.ClearChildren(ctx)
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 14, "<!doctype html><html lang=\"en\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = core.Header().Render(ctx, templ_7745c5c3_Buffer)
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 15, "<body class=\"flex flex-col min-h-screen web2-bg\"><main class=\"flex-grow web2-shell space-y-8\"><section class=\"web2-header\"><div class=\"flex flex-col gap-4 md:flex-row md:items-center md:justify-between\"><div><div class=\"web2-pill\">vCenter Inventory</div><h1 class=\"mt-3 text-4xl font-bold\">Monitored vCenters</h1><p class=\"mt-2 text-sm text-slate-600\">Select a vCenter to view snapshot totals over time.</p></div><a class=\"web2-button\" href=\"/\">Back to Dashboard</a></div></section><section class=\"web2-card\"><div class=\"flex items-center justify-between gap-3 mb-4 flex-wrap\"><h2 class=\"text-lg font-semibold\">vCenters</h2><span class=\"web2-badge\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var13 string
templ_7745c5c3_Var13, templ_7745c5c3_Err = templ.JoinStringErrs(len(links))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 150, Col: 42}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var13))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 16, " total</span></div><div class=\"overflow-hidden border border-slate-200 rounded\"><table class=\"web2-table\"><thead><tr><th>vCenter</th><th class=\"text-right\">Totals</th></tr></thead> <tbody>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
for _, link := range links {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 17, "<tr><td class=\"font-semibold text-slate-700\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var14 string
templ_7745c5c3_Var14, templ_7745c5c3_Err = templ.JoinStringErrs(link.Name)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 163, Col: 61}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var14))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 18, "</td><td class=\"text-right\"><a class=\"web2-link\" href=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var15 templ.SafeURL
templ_7745c5c3_Var15, templ_7745c5c3_Err = templ.JoinURLErrs(link.Link)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 165, Col: 47}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var15))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 19, "\">View Totals</a></td></tr>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 20, "</tbody></table></div></section></main></body>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = core.Footer().Render(ctx, templ_7745c5c3_Buffer)
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 21, "</html>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
return nil
})
}
func VcenterTotalsPage(vcenter string, entries []VcenterTotalsEntry, chart VcenterChartData, meta VcenterTotalsMeta) templ.Component {
return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) {
templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context
if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil {
return templ_7745c5c3_CtxErr
}
templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W)
if !templ_7745c5c3_IsBuffer {
defer func() {
templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer)
if templ_7745c5c3_Err == nil {
templ_7745c5c3_Err = templ_7745c5c3_BufErr
}
}()
}
ctx = templ.InitializeContext(ctx)
templ_7745c5c3_Var16 := templ.GetChildren(ctx)
if templ_7745c5c3_Var16 == nil {
templ_7745c5c3_Var16 = templ.NopComponent
}
ctx = templ.ClearChildren(ctx)
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 22, "<!doctype html><html lang=\"en\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = core.Header().Render(ctx, templ_7745c5c3_Buffer)
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 23, "<body class=\"flex flex-col min-h-screen web2-bg\"><main class=\"flex-grow web2-shell space-y-8 max-w-screen-2xl mx-auto\" style=\"max-width: 1400px;\"><section class=\"web2-header\"><div class=\"flex flex-col gap-4 md:flex-row md:items-center md:justify-between\"><div><div class=\"web2-pill\">vCenter Totals</div><h1 class=\"mt-3 text-4xl font-bold\">Totals for ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var17 string
templ_7745c5c3_Var17, templ_7745c5c3_Err = templ.JoinStringErrs(vcenter)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 189, Col: 63}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var17))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 24, "</h1><p class=\"mt-2 text-sm text-slate-600\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var18 string
templ_7745c5c3_Var18, templ_7745c5c3_Err = templ.JoinStringErrs(meta.TypeLabel)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 190, Col: 62}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var18))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 25, " snapshots of VM count, vCPU, and RAM over time.</p></div><div class=\"flex gap-3\"><a class=\"web2-button secondary\" href=\"/vcenters\">All vCenters</a> <a class=\"web2-button\" href=\"/\">Dashboard</a></div></div><div class=\"web3-button-group mt-8 mb-3\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var19 = []any{meta.HourlyClass}
templ_7745c5c3_Err = templ.RenderCSSItems(ctx, templ_7745c5c3_Buffer, templ_7745c5c3_Var19...)
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 26, "<a class=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var20 string
templ_7745c5c3_Var20, templ_7745c5c3_Err = templ.JoinStringErrs(templ.CSSClasses(templ_7745c5c3_Var19).String())
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 1, Col: 0}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var20))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 27, "\" href=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var21 templ.SafeURL
templ_7745c5c3_Var21, templ_7745c5c3_Err = templ.JoinURLErrs(meta.HourlyLink)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 198, Col: 56}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var21))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 28, "\">Hourly</a> ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var22 = []any{meta.DailyClass}
templ_7745c5c3_Err = templ.RenderCSSItems(ctx, templ_7745c5c3_Buffer, templ_7745c5c3_Var22...)
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 29, "<a class=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var23 string
templ_7745c5c3_Var23, templ_7745c5c3_Err = templ.JoinStringErrs(templ.CSSClasses(templ_7745c5c3_Var22).String())
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 1, Col: 0}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var23))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 30, "\" href=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var24 templ.SafeURL
templ_7745c5c3_Var24, templ_7745c5c3_Err = templ.JoinURLErrs(meta.DailyLink)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 199, Col: 54}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var24))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 31, "\">Daily</a> ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var25 = []any{meta.MonthlyClass}
templ_7745c5c3_Err = templ.RenderCSSItems(ctx, templ_7745c5c3_Buffer, templ_7745c5c3_Var25...)
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 32, "<a class=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var26 string
templ_7745c5c3_Var26, templ_7745c5c3_Err = templ.JoinStringErrs(templ.CSSClasses(templ_7745c5c3_Var25).String())
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 1, Col: 0}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var26))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 33, "\" href=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var27 templ.SafeURL
templ_7745c5c3_Var27, templ_7745c5c3_Err = templ.JoinURLErrs(meta.MonthlyLink)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 200, Col: 58}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var27))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 34, "\">Monthly</a></div></section><section class=\"web2-card\"><div class=\"flex items-center justify-between gap-3 mb-4 flex-wrap\"><h2 class=\"text-lg font-semibold\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var28 string
templ_7745c5c3_Var28, templ_7745c5c3_Err = templ.JoinStringErrs(meta.TypeLabel)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 206, Col: 56}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var28))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 35, " Snapshots</h2><span class=\"web2-badge\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var29 string
templ_7745c5c3_Var29, templ_7745c5c3_Err = templ.JoinStringErrs(len(entries))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 207, Col: 45}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var29))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 36, " records</span></div>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if chart.PointsVm != "" {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 37, "<div class=\"mb-6 overflow-auto\"><svg width=\"100%\" height=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var30 string
templ_7745c5c3_Var30, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", chart.Height+80))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 211, Col: 67}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var30))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 38, "\" viewBox=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var31 string
templ_7745c5c3_Var31, templ_7745c5c3_Err = templ.JoinStringErrs("0 0 " + fmt.Sprintf("%d", chart.Width) + " " + fmt.Sprintf("%d", chart.Height+70))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 211, Col: 160}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var31))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 39, "\" role=\"img\" aria-label=\"Totals over time\"><defs><linearGradient id=\"grid\" x1=\"0\" y1=\"0\" x2=\"0\" y2=\"1\"><stop offset=\"0%\" stop-color=\"#e2e8f0\" stop-opacity=\"0.6\"></stop></linearGradient></defs> <rect x=\"40\" y=\"10\" width=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var32 string
templ_7745c5c3_Var32, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", chart.Width-60))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 217, Col: 68}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var32))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 40, "\" height=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var33 string
templ_7745c5c3_Var33, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", chart.Height))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 217, Col: 109}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var33))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 41, "\" fill=\"white\" stroke=\"#e2e8f0\"></rect><!-- grid lines --><g stroke=\"#e2e8f0\" stroke-width=\"1\" stroke-dasharray=\"2,4\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
for _, y := range chart.GridY {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 42, "<line x1=\"40\" y1=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var34 string
templ_7745c5c3_Var34, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%.1f", y))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 221, Col: 51}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var34))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 43, "\" x2=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var35 string
templ_7745c5c3_Var35, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", chart.Width-20))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 221, Col: 90}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var35))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 44, "\" y2=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var36 string
templ_7745c5c3_Var36, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%.1f", y))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 221, Col: 118}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var36))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 45, "\"></line> ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
for _, x := range chart.GridX {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 46, "<line x1=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var37 string
templ_7745c5c3_Var37, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%.1f", x))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 224, Col: 43}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var37))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 47, "\" y1=\"10\" x2=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var38 string
templ_7745c5c3_Var38, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%.1f", x))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 224, Col: 79}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var38))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 48, "\" y2=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var39 string
templ_7745c5c3_Var39, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", chart.Height+10))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 224, Col: 119}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var39))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 49, "\"></line>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 50, "</g><!-- axes --><line x1=\"40\" y1=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var40 string
templ_7745c5c3_Var40, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", chart.Height+10))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 228, Col: 61}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var40))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 51, "\" x2=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var41 string
templ_7745c5c3_Var41, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", chart.Width-20))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 228, Col: 100}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var41))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 52, "\" y2=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var42 string
templ_7745c5c3_Var42, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", chart.Height+10))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 228, Col: 140}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var42))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 53, "\" stroke=\"#94a3b8\" stroke-width=\"1.5\"></line> <line x1=\"40\" y1=\"10\" x2=\"40\" y2=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var43 string
templ_7745c5c3_Var43, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", chart.Height+10))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 229, Col: 77}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var43))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 54, "\" stroke=\"#94a3b8\" stroke-width=\"1.5\"></line><!-- data --><polyline points=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var44 string
templ_7745c5c3_Var44, templ_7745c5c3_Err = templ.JoinStringErrs(chart.PointsVm)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 231, Col: 40}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var44))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 55, "\" fill=\"none\" stroke=\"#2563eb\" stroke-width=\"2.5\"></polyline> <polyline points=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var45 string
templ_7745c5c3_Var45, templ_7745c5c3_Err = templ.JoinStringErrs(chart.PointsVcpu)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 232, Col: 42}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var45))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 56, "\" fill=\"none\" stroke=\"#16a34a\" stroke-width=\"2.5\"></polyline> <polyline points=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var46 string
templ_7745c5c3_Var46, templ_7745c5c3_Err = templ.JoinStringErrs(chart.PointsRam)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 233, Col: 41}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var46))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 57, "\" fill=\"none\" stroke=\"#ea580c\" stroke-width=\"2.5\"></polyline><!-- tick labels --><g font-size=\"10\" fill=\"#475569\" text-anchor=\"end\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
for _, tick := range chart.YTicks {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 58, "<text x=\"36\" y=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var47 string
templ_7745c5c3_Var47, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%.1f", tick.Pos+3))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 237, Col: 58}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var47))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 59, "\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var48 string
templ_7745c5c3_Var48, templ_7745c5c3_Err = templ.JoinStringErrs(tick.Label)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 237, Col: 71}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var48))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 60, "</text>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 61, "</g> <g font-size=\"10\" fill=\"#475569\" text-anchor=\"middle\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
for _, tick := range chart.XTicks {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 62, "<text x=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var49 string
templ_7745c5c3_Var49, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%.1f", tick.Pos))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 242, Col: 49}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var49))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 63, "\" y=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var50 string
templ_7745c5c3_Var50, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", chart.Height+24))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 242, Col: 88}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var50))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 64, "\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var51 string
templ_7745c5c3_Var51, templ_7745c5c3_Err = templ.JoinStringErrs(tick.Label)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 242, Col: 101}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var51))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 65, "</text>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 66, "</g><!-- legend --><g font-size=\"12\" fill=\"#475569\" transform=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var52 string
templ_7745c5c3_Var52, templ_7745c5c3_Err = templ.JoinStringErrs("translate(40 " + fmt.Sprintf("%d", chart.Height+54) + ")")
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 246, Col: 111}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var52))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 67, "\"><rect x=\"0\" y=\"0\" width=\"14\" height=\"8\" fill=\"#2563eb\"></rect><text x=\"22\" y=\"12\">VMs</text> <rect x=\"90\" y=\"0\" width=\"14\" height=\"8\" fill=\"#16a34a\"></rect><text x=\"112\" y=\"12\">vCPU</text> <rect x=\"180\" y=\"0\" width=\"14\" height=\"8\" fill=\"#ea580c\"></rect><text x=\"202\" y=\"12\">RAM (GB)</text></g><!-- axis labels --><text x=\"15\" y=\"20\" transform=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var53 string
templ_7745c5c3_Var53, templ_7745c5c3_Err = templ.JoinStringErrs("rotate(-90 15 20)")
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 252, Col: 59}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var53))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 68, "\" font-size=\"12\" fill=\"#475569\">Totals</text> <text x=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var54 string
templ_7745c5c3_Var54, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", chart.Width/2))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 253, Col: 50}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var54))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 69, "\" y=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var55 string
templ_7745c5c3_Var55, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", chart.Height+70))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 253, Col: 89}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var55))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 70, "\" font-size=\"12\" fill=\"#475569\">Snapshot sequence (newest right)</text></svg></div>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 71, "<div class=\"overflow-hidden border border-slate-200 rounded\"><table class=\"web2-table\"><thead><tr><th>Snapshot Time</th><th class=\"text-right\">VMs</th><th class=\"text-right\">vCPUs</th><th class=\"text-right\">RAM (GB)</th></tr></thead> <tbody>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
for _, entry := range entries {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 72, "<tr><td>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var56 string
templ_7745c5c3_Var56, templ_7745c5c3_Err = templ.JoinStringErrs(entry.Snapshot)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 271, Col: 29}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var56))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 73, "</td><td class=\"text-right\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var57 string
templ_7745c5c3_Var57, templ_7745c5c3_Err = templ.JoinStringErrs(entry.VmCount)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 272, Col: 47}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var57))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 74, "</td><td class=\"text-right\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var58 string
templ_7745c5c3_Var58, templ_7745c5c3_Err = templ.JoinStringErrs(entry.VcpuTotal)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 273, Col: 49}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var58))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 75, "</td><td class=\"text-right\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var59 string
templ_7745c5c3_Var59, templ_7745c5c3_Err = templ.JoinStringErrs(entry.RamTotalGB)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/snapshots.templ`, Line: 274, Col: 50}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var59))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 76, "</td></tr>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 77, "</tbody></table></div></section></main></body>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = core.Footer().Render(ctx, templ_7745c5c3_Buffer)
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 78, "</html>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}

View File

@@ -0,0 +1,173 @@
package views
import (
"fmt"
"vctp/components/core"
)
type VmTraceEntry struct {
Snapshot string
RawTime int64
Name string
VmId string
VmUuid string
Vcenter string
ResourcePool string
VcpuCount int64
RamGB int64
ProvisionedDisk float64
CreationTime string
DeletionTime string
}
type VmTraceChart struct {
PointsVcpu string
PointsRam string
PointsTin string
PointsBronze string
PointsSilver string
PointsGold string
Width int
Height int
GridX []float64
GridY []float64
XTicks []ChartTick
YTicks []ChartTick
}
templ VmTracePage(query string, display_query string, vm_id string, vm_uuid string, vm_name string, creationLabel string, deletionLabel string, creationApprox bool, entries []VmTraceEntry, chart VmTraceChart) {
<!DOCTYPE html>
<html lang="en">
@core.Header()
<body class="flex flex-col min-h-screen web2-bg">
<main class="flex-grow web2-shell space-y-8 max-w-screen-2xl mx-auto" style="max-width: 1400px;">
<section class="web2-header">
<div class="flex flex-col gap-4 md:flex-row md:items-center md:justify-between">
<div>
<div class="web2-pill">VM Trace</div>
<h1 class="mt-3 text-4xl font-bold">Snapshot history{display_query}</h1>
<p class="mt-2 text-sm text-slate-600">Timeline of vCPU, RAM, and resource pool changes across snapshots.</p>
</div>
<div class="flex gap-3 flex-wrap">
<a class="web2-button" href="/">Dashboard</a>
</div>
</div>
<form method="get" action="/vm/trace" class="mt-4 grid gap-3 md:grid-cols-3">
<div class="flex flex-col gap-1">
<label class="text-sm text-slate-600" for="vm_id">VM ID</label>
<input class="web2-card border border-slate-200 px-3 py-2 rounded" type="text" id="vm_id" name="vm_id" value={vm_id} placeholder="vm-12345"/>
</div>
<div class="flex flex-col gap-1">
<label class="text-sm text-slate-600" for="vm_uuid">VM UUID</label>
<input class="web2-card border border-slate-200 px-3 py-2 rounded" type="text" id="vm_uuid" name="vm_uuid" value={vm_uuid} placeholder="uuid..."/>
</div>
<div class="flex flex-col gap-1">
<label class="text-sm text-slate-600" for="name">Name</label>
<input class="web2-card border border-slate-200 px-3 py-2 rounded" type="text" id="name" name="name" value={vm_name} placeholder="VM name"/>
</div>
<div class="md:col-span-3 flex gap-2">
<button class="web3-button active" type="submit">Load VM Trace</button>
<a class="web3-button" href="/vm/trace">Clear</a>
</div>
</form>
</section>
<section class="web2-card">
<div class="flex items-center justify-between gap-3 mb-4 flex-wrap">
<h2 class="text-lg font-semibold">Snapshot Timeline</h2>
<span class="web2-badge">{len(entries)} samples</span>
</div>
if chart.PointsVcpu != "" {
<div class="mb-6 overflow-auto">
<svg width="100%" height="360" viewBox={"0 0 " + fmt.Sprintf("%d", chart.Width) + " 320"} role="img" aria-label="VM timeline">
<rect x="40" y="10" width={fmt.Sprintf("%d", chart.Width-60)} height={fmt.Sprintf("%d", chart.Height)} fill="white" stroke="#e2e8f0"></rect>
<g stroke="#e2e8f0" stroke-width="1" stroke-dasharray="2,4">
for _, y := range chart.GridY {
<line x1="40" y1={fmt.Sprintf("%.1f", y)} x2={fmt.Sprintf("%d", chart.Width-20)} y2={fmt.Sprintf("%.1f", y)} />
}
for _, x := range chart.GridX {
<line x1={fmt.Sprintf("%.1f", x)} y1="10" x2={fmt.Sprintf("%.1f", x)} y2={fmt.Sprintf("%d", chart.Height+10)} />
}
</g>
<line x1="40" y1={fmt.Sprintf("%d", chart.Height+10)} x2={fmt.Sprintf("%d", chart.Width-20)} y2={fmt.Sprintf("%d", chart.Height+10)} stroke="#94a3b8" stroke-width="1.5"></line>
<line x1="40" y1="10" x2="40" y2={fmt.Sprintf("%d", chart.Height+10)} stroke="#94a3b8" stroke-width="1.5"></line>
<polyline points={chart.PointsVcpu} fill="none" stroke="#2563eb" stroke-width="2.5"></polyline>
<polyline points={chart.PointsRam} fill="none" stroke="#16a34a" stroke-width="2.5"></polyline>
<polyline points={chart.PointsTin} fill="none" stroke="#0ea5e9" stroke-width="1.5" stroke-dasharray="4,4"></polyline>
<polyline points={chart.PointsBronze} fill="none" stroke="#a855f7" stroke-width="1.5" stroke-dasharray="4,4"></polyline>
<polyline points={chart.PointsSilver} fill="none" stroke="#94a3b8" stroke-width="1.5" stroke-dasharray="4,4"></polyline>
<polyline points={chart.PointsGold} fill="none" stroke="#f59e0b" stroke-width="1.5" stroke-dasharray="4,4"></polyline>
<g font-size="10" fill="#475569" text-anchor="end">
for _, tick := range chart.YTicks {
<text x="36" y={fmt.Sprintf("%.1f", tick.Pos+3)}>{tick.Label}</text>
}
</g>
<g font-size="10" fill="#475569" text-anchor="middle">
for _, tick := range chart.XTicks {
<text x={fmt.Sprintf("%.1f", tick.Pos)} y={fmt.Sprintf("%d", chart.Height+24)}>{tick.Label}</text>
}
</g>
<g font-size="12" fill="#475569" transform={"translate(40 " + fmt.Sprintf("%d", chart.Height+50) + ")"}>
<rect x="0" y="0" width="14" height="8" fill="#2563eb"></rect><text x="22" y="12">vCPU</text>
<rect x="90" y="0" width="14" height="8" fill="#16a34a"></rect><text x="112" y="12">RAM (GB)</text>
<rect x="200" y="0" width="14" height="8" fill="#0ea5e9"></rect><text x="222" y="12">Tin</text>
<rect x="260" y="0" width="14" height="8" fill="#a855f7"></rect><text x="282" y="12">Bronze</text>
<rect x="340" y="0" width="14" height="8" fill="#94a3b8"></rect><text x="362" y="12">Silver</text>
<rect x="420" y="0" width="14" height="8" fill="#f59e0b"></rect><text x="442" y="12">Gold</text>
</g>
<text x="15" y="20" transform={"rotate(-90 15 20)"} font-size="12" fill="#475569">Resources / Pool</text>
<text x={fmt.Sprintf("%d", chart.Width/2)} y={fmt.Sprintf("%d", chart.Height+70)} font-size="12" fill="#475569">Snapshots (oldest left, newest right)</text>
</svg>
</div>
}
<div class="grid gap-3 md:grid-cols-2 mb-4">
<div class="web2-card">
<p class="text-xs uppercase tracking-[0.15em] text-slate-500">Creation time</p>
<p class="mt-2 text-base font-semibold text-slate-800">{creationLabel}</p>
if creationApprox {
<p class="text-xs text-slate-500 mt-1">Approximate (earliest snapshot)</p>
}
</div>
<div class="web2-card">
<p class="text-xs uppercase tracking-[0.15em] text-slate-500">Deletion time</p>
<p class="mt-2 text-base font-semibold text-slate-800">{deletionLabel}</p>
</div>
</div>
<div class="overflow-hidden border border-slate-200 rounded">
<table class="web2-table">
<thead>
<tr>
<th>Snapshot</th>
<th>VM Name</th>
<th>VmId</th>
<th>VmUuid</th>
<th>Vcenter</th>
<th>Resource Pool</th>
<th class="text-right">vCPUs</th>
<th class="text-right">RAM (GB)</th>
<th class="text-right">Disk</th>
</tr>
</thead>
<tbody>
for _, e := range entries {
<tr>
<td>{e.Snapshot}</td>
<td>{e.Name}</td>
<td>{e.VmId}</td>
<td>{e.VmUuid}</td>
<td>{e.Vcenter}</td>
<td>{e.ResourcePool}</td>
<td class="text-right">{e.VcpuCount}</td>
<td class="text-right">{e.RamGB}</td>
<td class="text-right">{fmt.Sprintf("%.1f", e.ProvisionedDisk)}</td>
</tr>
}
</tbody>
</table>
</div>
</section>
</main>
</body>
@core.Footer()
</html>
}

View File

@@ -0,0 +1,729 @@
// Code generated by templ - DO NOT EDIT.
// templ: version: v0.3.977
package views
//lint:file-ignore SA4006 This context is only used if a nested component is present.
import "github.com/a-h/templ"
import templruntime "github.com/a-h/templ/runtime"
import (
"fmt"
"vctp/components/core"
)
type VmTraceEntry struct {
Snapshot string
RawTime int64
Name string
VmId string
VmUuid string
Vcenter string
ResourcePool string
VcpuCount int64
RamGB int64
ProvisionedDisk float64
CreationTime string
DeletionTime string
}
type VmTraceChart struct {
PointsVcpu string
PointsRam string
PointsTin string
PointsBronze string
PointsSilver string
PointsGold string
Width int
Height int
GridX []float64
GridY []float64
XTicks []ChartTick
YTicks []ChartTick
}
func VmTracePage(query string, display_query string, vm_id string, vm_uuid string, vm_name string, creationLabel string, deletionLabel string, creationApprox bool, entries []VmTraceEntry, chart VmTraceChart) templ.Component {
return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) {
templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context
if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil {
return templ_7745c5c3_CtxErr
}
templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W)
if !templ_7745c5c3_IsBuffer {
defer func() {
templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer)
if templ_7745c5c3_Err == nil {
templ_7745c5c3_Err = templ_7745c5c3_BufErr
}
}()
}
ctx = templ.InitializeContext(ctx)
templ_7745c5c3_Var1 := templ.GetChildren(ctx)
if templ_7745c5c3_Var1 == nil {
templ_7745c5c3_Var1 = templ.NopComponent
}
ctx = templ.ClearChildren(ctx)
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 1, "<!doctype html><html lang=\"en\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = core.Header().Render(ctx, templ_7745c5c3_Buffer)
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 2, "<body class=\"flex flex-col min-h-screen web2-bg\"><main class=\"flex-grow web2-shell space-y-8 max-w-screen-2xl mx-auto\" style=\"max-width: 1400px;\"><section class=\"web2-header\"><div class=\"flex flex-col gap-4 md:flex-row md:items-center md:justify-between\"><div><div class=\"web2-pill\">VM Trace</div><h1 class=\"mt-3 text-4xl font-bold\">Snapshot history")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var2 string
templ_7745c5c3_Var2, templ_7745c5c3_Err = templ.JoinStringErrs(display_query)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 48, Col: 74}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var2))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 3, "</h1><p class=\"mt-2 text-sm text-slate-600\">Timeline of vCPU, RAM, and resource pool changes across snapshots.</p></div><div class=\"flex gap-3 flex-wrap\"><a class=\"web2-button\" href=\"/\">Dashboard</a></div></div><form method=\"get\" action=\"/vm/trace\" class=\"mt-4 grid gap-3 md:grid-cols-3\"><div class=\"flex flex-col gap-1\"><label class=\"text-sm text-slate-600\" for=\"vm_id\">VM ID</label> <input class=\"web2-card border border-slate-200 px-3 py-2 rounded\" type=\"text\" id=\"vm_id\" name=\"vm_id\" value=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var3 string
templ_7745c5c3_Var3, templ_7745c5c3_Err = templ.JoinStringErrs(vm_id)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 58, Col: 123}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var3))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 4, "\" placeholder=\"vm-12345\"></div><div class=\"flex flex-col gap-1\"><label class=\"text-sm text-slate-600\" for=\"vm_uuid\">VM UUID</label> <input class=\"web2-card border border-slate-200 px-3 py-2 rounded\" type=\"text\" id=\"vm_uuid\" name=\"vm_uuid\" value=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var4 string
templ_7745c5c3_Var4, templ_7745c5c3_Err = templ.JoinStringErrs(vm_uuid)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 62, Col: 129}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var4))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 5, "\" placeholder=\"uuid...\"></div><div class=\"flex flex-col gap-1\"><label class=\"text-sm text-slate-600\" for=\"name\">Name</label> <input class=\"web2-card border border-slate-200 px-3 py-2 rounded\" type=\"text\" id=\"name\" name=\"name\" value=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var5 string
templ_7745c5c3_Var5, templ_7745c5c3_Err = templ.JoinStringErrs(vm_name)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 66, Col: 123}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var5))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 6, "\" placeholder=\"VM name\"></div><div class=\"md:col-span-3 flex gap-2\"><button class=\"web3-button active\" type=\"submit\">Load VM Trace</button> <a class=\"web3-button\" href=\"/vm/trace\">Clear</a></div></form></section><section class=\"web2-card\"><div class=\"flex items-center justify-between gap-3 mb-4 flex-wrap\"><h2 class=\"text-lg font-semibold\">Snapshot Timeline</h2><span class=\"web2-badge\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var6 string
templ_7745c5c3_Var6, templ_7745c5c3_Err = templ.JoinStringErrs(len(entries))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 78, Col: 44}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var6))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 7, " samples</span></div>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if chart.PointsVcpu != "" {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 8, "<div class=\"mb-6 overflow-auto\"><svg width=\"100%\" height=\"360\" viewBox=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var7 string
templ_7745c5c3_Var7, templ_7745c5c3_Err = templ.JoinStringErrs("0 0 " + fmt.Sprintf("%d", chart.Width) + " 320")
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 82, Col: 95}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var7))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 9, "\" role=\"img\" aria-label=\"VM timeline\"><rect x=\"40\" y=\"10\" width=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var8 string
templ_7745c5c3_Var8, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", chart.Width-60))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 83, Col: 68}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var8))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 10, "\" height=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var9 string
templ_7745c5c3_Var9, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", chart.Height))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 83, Col: 109}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var9))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 11, "\" fill=\"white\" stroke=\"#e2e8f0\"></rect> <g stroke=\"#e2e8f0\" stroke-width=\"1\" stroke-dasharray=\"2,4\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
for _, y := range chart.GridY {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 12, "<line x1=\"40\" y1=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var10 string
templ_7745c5c3_Var10, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%.1f", y))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 86, Col: 50}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var10))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 13, "\" x2=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var11 string
templ_7745c5c3_Var11, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", chart.Width-20))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 86, Col: 89}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var11))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 14, "\" y2=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var12 string
templ_7745c5c3_Var12, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%.1f", y))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 86, Col: 117}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var12))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 15, "\"></line> ")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
for _, x := range chart.GridX {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 16, "<line x1=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var13 string
templ_7745c5c3_Var13, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%.1f", x))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 89, Col: 42}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var13))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 17, "\" y1=\"10\" x2=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var14 string
templ_7745c5c3_Var14, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%.1f", x))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 89, Col: 78}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var14))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 18, "\" y2=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var15 string
templ_7745c5c3_Var15, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", chart.Height+10))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 89, Col: 118}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var15))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 19, "\"></line>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 20, "</g> <line x1=\"40\" y1=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var16 string
templ_7745c5c3_Var16, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", chart.Height+10))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 92, Col: 60}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var16))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 21, "\" x2=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var17 string
templ_7745c5c3_Var17, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", chart.Width-20))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 92, Col: 99}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var17))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 22, "\" y2=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var18 string
templ_7745c5c3_Var18, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", chart.Height+10))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 92, Col: 139}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var18))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 23, "\" stroke=\"#94a3b8\" stroke-width=\"1.5\"></line> <line x1=\"40\" y1=\"10\" x2=\"40\" y2=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var19 string
templ_7745c5c3_Var19, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", chart.Height+10))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 93, Col: 76}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var19))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 24, "\" stroke=\"#94a3b8\" stroke-width=\"1.5\"></line> <polyline points=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var20 string
templ_7745c5c3_Var20, templ_7745c5c3_Err = templ.JoinStringErrs(chart.PointsVcpu)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 94, Col: 42}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var20))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 25, "\" fill=\"none\" stroke=\"#2563eb\" stroke-width=\"2.5\"></polyline> <polyline points=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var21 string
templ_7745c5c3_Var21, templ_7745c5c3_Err = templ.JoinStringErrs(chart.PointsRam)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 95, Col: 41}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var21))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 26, "\" fill=\"none\" stroke=\"#16a34a\" stroke-width=\"2.5\"></polyline> <polyline points=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var22 string
templ_7745c5c3_Var22, templ_7745c5c3_Err = templ.JoinStringErrs(chart.PointsTin)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 96, Col: 41}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var22))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 27, "\" fill=\"none\" stroke=\"#0ea5e9\" stroke-width=\"1.5\" stroke-dasharray=\"4,4\"></polyline> <polyline points=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var23 string
templ_7745c5c3_Var23, templ_7745c5c3_Err = templ.JoinStringErrs(chart.PointsBronze)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 97, Col: 44}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var23))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 28, "\" fill=\"none\" stroke=\"#a855f7\" stroke-width=\"1.5\" stroke-dasharray=\"4,4\"></polyline> <polyline points=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var24 string
templ_7745c5c3_Var24, templ_7745c5c3_Err = templ.JoinStringErrs(chart.PointsSilver)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 98, Col: 44}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var24))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 29, "\" fill=\"none\" stroke=\"#94a3b8\" stroke-width=\"1.5\" stroke-dasharray=\"4,4\"></polyline> <polyline points=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var25 string
templ_7745c5c3_Var25, templ_7745c5c3_Err = templ.JoinStringErrs(chart.PointsGold)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 99, Col: 42}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var25))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 30, "\" fill=\"none\" stroke=\"#f59e0b\" stroke-width=\"1.5\" stroke-dasharray=\"4,4\"></polyline> <g font-size=\"10\" fill=\"#475569\" text-anchor=\"end\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
for _, tick := range chart.YTicks {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 31, "<text x=\"36\" y=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var26 string
templ_7745c5c3_Var26, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%.1f", tick.Pos+3))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 102, Col: 57}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var26))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 32, "\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var27 string
templ_7745c5c3_Var27, templ_7745c5c3_Err = templ.JoinStringErrs(tick.Label)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 102, Col: 70}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var27))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 33, "</text>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 34, "</g> <g font-size=\"10\" fill=\"#475569\" text-anchor=\"middle\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
for _, tick := range chart.XTicks {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 35, "<text x=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var28 string
templ_7745c5c3_Var28, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%.1f", tick.Pos))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 107, Col: 48}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var28))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 36, "\" y=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var29 string
templ_7745c5c3_Var29, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", chart.Height+24))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 107, Col: 87}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var29))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 37, "\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var30 string
templ_7745c5c3_Var30, templ_7745c5c3_Err = templ.JoinStringErrs(tick.Label)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 107, Col: 100}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var30))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 38, "</text>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 39, "</g> <g font-size=\"12\" fill=\"#475569\" transform=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var31 string
templ_7745c5c3_Var31, templ_7745c5c3_Err = templ.JoinStringErrs("translate(40 " + fmt.Sprintf("%d", chart.Height+50) + ")")
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 110, Col: 110}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var31))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 40, "\"><rect x=\"0\" y=\"0\" width=\"14\" height=\"8\" fill=\"#2563eb\"></rect><text x=\"22\" y=\"12\">vCPU</text> <rect x=\"90\" y=\"0\" width=\"14\" height=\"8\" fill=\"#16a34a\"></rect><text x=\"112\" y=\"12\">RAM (GB)</text> <rect x=\"200\" y=\"0\" width=\"14\" height=\"8\" fill=\"#0ea5e9\"></rect><text x=\"222\" y=\"12\">Tin</text> <rect x=\"260\" y=\"0\" width=\"14\" height=\"8\" fill=\"#a855f7\"></rect><text x=\"282\" y=\"12\">Bronze</text> <rect x=\"340\" y=\"0\" width=\"14\" height=\"8\" fill=\"#94a3b8\"></rect><text x=\"362\" y=\"12\">Silver</text> <rect x=\"420\" y=\"0\" width=\"14\" height=\"8\" fill=\"#f59e0b\"></rect><text x=\"442\" y=\"12\">Gold</text></g> <text x=\"15\" y=\"20\" transform=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var32 string
templ_7745c5c3_Var32, templ_7745c5c3_Err = templ.JoinStringErrs("rotate(-90 15 20)")
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 118, Col: 58}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var32))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 41, "\" font-size=\"12\" fill=\"#475569\">Resources / Pool</text> <text x=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var33 string
templ_7745c5c3_Var33, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", chart.Width/2))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 119, Col: 49}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var33))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 42, "\" y=\"")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var34 string
templ_7745c5c3_Var34, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", chart.Height+70))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 119, Col: 88}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var34))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 43, "\" font-size=\"12\" fill=\"#475569\">Snapshots (oldest left, newest right)</text></svg></div>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 44, "<div class=\"grid gap-3 md:grid-cols-2 mb-4\"><div class=\"web2-card\"><p class=\"text-xs uppercase tracking-[0.15em] text-slate-500\">Creation time</p><p class=\"mt-2 text-base font-semibold text-slate-800\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var35 string
templ_7745c5c3_Var35, templ_7745c5c3_Err = templ.JoinStringErrs(creationLabel)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 126, Col: 76}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var35))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 45, "</p>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
if creationApprox {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 46, "<p class=\"text-xs text-slate-500 mt-1\">Approximate (earliest snapshot)</p>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 47, "</div><div class=\"web2-card\"><p class=\"text-xs uppercase tracking-[0.15em] text-slate-500\">Deletion time</p><p class=\"mt-2 text-base font-semibold text-slate-800\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var36 string
templ_7745c5c3_Var36, templ_7745c5c3_Err = templ.JoinStringErrs(deletionLabel)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 133, Col: 76}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var36))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 48, "</p></div></div><div class=\"overflow-hidden border border-slate-200 rounded\"><table class=\"web2-table\"><thead><tr><th>Snapshot</th><th>VM Name</th><th>VmId</th><th>VmUuid</th><th>Vcenter</th><th>Resource Pool</th><th class=\"text-right\">vCPUs</th><th class=\"text-right\">RAM (GB)</th><th class=\"text-right\">Disk</th></tr></thead> <tbody>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
for _, e := range entries {
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 49, "<tr><td>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var37 string
templ_7745c5c3_Var37, templ_7745c5c3_Err = templ.JoinStringErrs(e.Snapshot)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 154, Col: 25}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var37))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 50, "</td><td>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var38 string
templ_7745c5c3_Var38, templ_7745c5c3_Err = templ.JoinStringErrs(e.Name)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 155, Col: 21}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var38))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 51, "</td><td>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var39 string
templ_7745c5c3_Var39, templ_7745c5c3_Err = templ.JoinStringErrs(e.VmId)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 156, Col: 21}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var39))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 52, "</td><td>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var40 string
templ_7745c5c3_Var40, templ_7745c5c3_Err = templ.JoinStringErrs(e.VmUuid)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 157, Col: 23}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var40))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 53, "</td><td>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var41 string
templ_7745c5c3_Var41, templ_7745c5c3_Err = templ.JoinStringErrs(e.Vcenter)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 158, Col: 24}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var41))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 54, "</td><td>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var42 string
templ_7745c5c3_Var42, templ_7745c5c3_Err = templ.JoinStringErrs(e.ResourcePool)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 159, Col: 29}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var42))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 55, "</td><td class=\"text-right\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var43 string
templ_7745c5c3_Var43, templ_7745c5c3_Err = templ.JoinStringErrs(e.VcpuCount)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 160, Col: 45}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var43))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 56, "</td><td class=\"text-right\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var44 string
templ_7745c5c3_Var44, templ_7745c5c3_Err = templ.JoinStringErrs(e.RamGB)
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 161, Col: 41}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var44))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 57, "</td><td class=\"text-right\">")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
var templ_7745c5c3_Var45 string
templ_7745c5c3_Var45, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%.1f", e.ProvisionedDisk))
if templ_7745c5c3_Err != nil {
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 162, Col: 72}
}
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var45))
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 58, "</td></tr>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 59, "</tbody></table></div></section></main></body>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = core.Footer().Render(ctx, templ_7745c5c3_Buffer)
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 60, "</html>")
if templ_7745c5c3_Err != nil {
return templ_7745c5c3_Err
}
return nil
})
}
var _ = templruntime.GeneratedTemplate

2140
db/helpers.go Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -2,12 +2,10 @@ package db
import (
"database/sql"
"fmt"
"log/slog"
"strings"
"vctp/db/queries"
//_ "github.com/tursodatabase/libsql-client-go/libsql"
"github.com/jmoiron/sqlx"
_ "modernc.org/sqlite"
)
@@ -38,8 +36,8 @@ func (d *LocalDB) Logger() *slog.Logger {
}
func (d *LocalDB) Close() error {
fmt.Println("Shutting database")
d.logger.Debug("test")
//fmt.Println("Shutting database")
d.logger.Debug("Shutting database")
return d.db.Close()
}

View File

@@ -10,7 +10,6 @@ CREATE TABLE IF NOT EXISTS "Inventory" (
"CreationTime" INTEGER,
"DeletionTime" INTEGER,
"ResourcePool" TEXT,
"VmType" TEXT,
"Datacenter" TEXT,
"Cluster" TEXT,
"Folder" TEXT,

View File

@@ -0,0 +1,14 @@
-- +goose Up
-- +goose StatementBegin
CREATE TABLE IF NOT EXISTS snapshot_registry (
id INTEGER PRIMARY KEY AUTOINCREMENT,
snapshot_type TEXT NOT NULL,
table_name TEXT NOT NULL UNIQUE,
snapshot_time BIGINT NOT NULL
);
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
DROP TABLE snapshot_registry;
-- +goose StatementEnd

View File

@@ -0,0 +1,48 @@
-- +goose Up
-- +goose StatementBegin
PRAGMA foreign_keys=OFF;
ALTER TABLE "Inventory" RENAME TO "Inventory_old";
CREATE TABLE IF NOT EXISTS "Inventory" (
"Iid" INTEGER PRIMARY KEY AUTOINCREMENT,
"Name" TEXT NOT NULL,
"Vcenter" TEXT NOT NULL,
"VmId" TEXT,
"EventKey" TEXT,
"CloudId" TEXT,
"CreationTime" INTEGER,
"DeletionTime" INTEGER,
"ResourcePool" TEXT,
"Datacenter" TEXT,
"Cluster" TEXT,
"Folder" TEXT,
"ProvisionedDisk" REAL,
"InitialVcpus" INTEGER,
"InitialRam" INTEGER,
"IsTemplate" TEXT NOT NULL DEFAULT "FALSE",
"PoweredOn" TEXT NOT NULL DEFAULT "FALSE",
"SrmPlaceholder" TEXT NOT NULL DEFAULT "FALSE",
"VmUuid" TEXT
);
INSERT INTO "Inventory" (
"Iid", "Name", "Vcenter", "VmId", "EventKey", "CloudId", "CreationTime", "DeletionTime",
"ResourcePool", "Datacenter", "Cluster", "Folder", "ProvisionedDisk", "InitialVcpus",
"InitialRam", "IsTemplate", "PoweredOn", "SrmPlaceholder", "VmUuid"
)
SELECT
"Iid", "Name", "Vcenter", "VmId", "EventKey", "CloudId", "CreationTime", "DeletionTime",
"ResourcePool", "Datacenter", "Cluster", "Folder", "ProvisionedDisk", "InitialVcpus",
"InitialRam", "IsTemplate", "PoweredOn", "SrmPlaceholder", "VmUuid"
FROM "Inventory_old";
DROP TABLE "Inventory_old";
PRAGMA foreign_keys=ON;
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
ALTER TABLE "Inventory" ADD COLUMN "VmType" TEXT;
-- +goose StatementEnd

View File

@@ -0,0 +1,5 @@
-- +goose Up
ALTER TABLE snapshot_registry ADD COLUMN snapshot_count BIGINT NOT NULL DEFAULT 0;
-- +goose Down
ALTER TABLE snapshot_registry DROP COLUMN snapshot_count;

View File

@@ -0,0 +1,5 @@
-- +goose Up
CREATE INDEX IF NOT EXISTS idx_snapshot_registry_type_time ON snapshot_registry (snapshot_type, snapshot_time);
-- +goose Down
DROP INDEX IF EXISTS idx_snapshot_registry_type_time;

View File

@@ -10,7 +10,6 @@ CREATE TABLE IF NOT EXISTS "Inventory" (
"CreationTime" BIGINT,
"DeletionTime" BIGINT,
"ResourcePool" TEXT,
"VmType" TEXT,
"Datacenter" TEXT,
"Cluster" TEXT,
"Folder" TEXT,

View File

@@ -0,0 +1,14 @@
-- +goose Up
-- +goose StatementBegin
CREATE TABLE IF NOT EXISTS snapshot_registry (
id BIGSERIAL PRIMARY KEY,
snapshot_type TEXT NOT NULL,
table_name TEXT NOT NULL UNIQUE,
snapshot_time BIGINT NOT NULL
);
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
DROP TABLE snapshot_registry;
-- +goose StatementEnd

View File

@@ -0,0 +1,9 @@
-- +goose Up
-- +goose StatementBegin
ALTER TABLE "Inventory" DROP COLUMN IF EXISTS "VmType";
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
ALTER TABLE "Inventory" ADD COLUMN "VmType" TEXT;
-- +goose StatementEnd

View File

@@ -0,0 +1,5 @@
-- +goose Up
ALTER TABLE snapshot_registry ADD COLUMN IF NOT EXISTS snapshot_count BIGINT NOT NULL DEFAULT 0;
-- +goose Down
ALTER TABLE snapshot_registry DROP COLUMN IF EXISTS snapshot_count;

View File

@@ -0,0 +1,5 @@
-- +goose Up
CREATE INDEX IF NOT EXISTS idx_snapshot_registry_type_time ON snapshot_registry (snapshot_type, snapshot_time);
-- +goose Down
DROP INDEX IF EXISTS idx_snapshot_registry_type_time;

View File

@@ -9,70 +9,94 @@ import (
)
type Event struct {
Eid int64
CloudId string
Source string
EventTime sql.NullInt64
ChainId string
VmId sql.NullString
EventKey sql.NullString
DatacenterName sql.NullString
ComputeResourceName sql.NullString
UserName sql.NullString
Processed int64
DatacenterId sql.NullString
ComputeResourceId sql.NullString
VmName sql.NullString
EventType sql.NullString
Eid int64 `db:"Eid" json:"Eid"`
CloudId string `db:"CloudId" json:"CloudId"`
Source string `db:"Source" json:"Source"`
EventTime sql.NullInt64 `db:"EventTime" json:"EventTime"`
ChainId string `db:"ChainId" json:"ChainId"`
VmId sql.NullString `db:"VmId" json:"VmId"`
EventKey sql.NullString `db:"EventKey" json:"EventKey"`
DatacenterName sql.NullString `db:"DatacenterName" json:"DatacenterName"`
ComputeResourceName sql.NullString `db:"ComputeResourceName" json:"ComputeResourceName"`
UserName sql.NullString `db:"UserName" json:"UserName"`
Processed int64 `db:"Processed" json:"Processed"`
DatacenterId sql.NullString `db:"DatacenterId" json:"DatacenterId"`
ComputeResourceId sql.NullString `db:"ComputeResourceId" json:"ComputeResourceId"`
VmName sql.NullString `db:"VmName" json:"VmName"`
EventType sql.NullString `db:"EventType" json:"EventType"`
}
type Inventory struct {
Iid int64
Name string
Vcenter string
VmId sql.NullString
EventKey sql.NullString
CloudId sql.NullString
CreationTime sql.NullInt64
DeletionTime sql.NullInt64
ResourcePool sql.NullString
VmType sql.NullString
Datacenter sql.NullString
Cluster sql.NullString
Folder sql.NullString
ProvisionedDisk sql.NullFloat64
InitialVcpus sql.NullInt64
InitialRam sql.NullInt64
IsTemplate interface{}
PoweredOn interface{}
SrmPlaceholder interface{}
VmUuid sql.NullString
Iid int64 `db:"Iid" json:"Iid"`
Name string `db:"Name" json:"Name"`
Vcenter string `db:"Vcenter" json:"Vcenter"`
VmId sql.NullString `db:"VmId" json:"VmId"`
EventKey sql.NullString `db:"EventKey" json:"EventKey"`
CloudId sql.NullString `db:"CloudId" json:"CloudId"`
CreationTime sql.NullInt64 `db:"CreationTime" json:"CreationTime"`
DeletionTime sql.NullInt64 `db:"DeletionTime" json:"DeletionTime"`
ResourcePool sql.NullString `db:"ResourcePool" json:"ResourcePool"`
Datacenter sql.NullString `db:"Datacenter" json:"Datacenter"`
Cluster sql.NullString `db:"Cluster" json:"Cluster"`
Folder sql.NullString `db:"Folder" json:"Folder"`
ProvisionedDisk sql.NullFloat64 `db:"ProvisionedDisk" json:"ProvisionedDisk"`
InitialVcpus sql.NullInt64 `db:"InitialVcpus" json:"InitialVcpus"`
InitialRam sql.NullInt64 `db:"InitialRam" json:"InitialRam"`
IsTemplate interface{} `db:"IsTemplate" json:"IsTemplate"`
PoweredOn interface{} `db:"PoweredOn" json:"PoweredOn"`
SrmPlaceholder interface{} `db:"SrmPlaceholder" json:"SrmPlaceholder"`
VmUuid sql.NullString `db:"VmUuid" json:"VmUuid"`
}
type InventoryHistory struct {
Hid int64
InventoryId sql.NullInt64
ReportDate sql.NullInt64
UpdateTime sql.NullInt64
PreviousVcpus sql.NullInt64
PreviousRam sql.NullInt64
PreviousResourcePool sql.NullString
PreviousProvisionedDisk sql.NullFloat64
Hid int64 `db:"Hid" json:"Hid"`
InventoryId sql.NullInt64 `db:"InventoryId" json:"InventoryId"`
ReportDate sql.NullInt64 `db:"ReportDate" json:"ReportDate"`
UpdateTime sql.NullInt64 `db:"UpdateTime" json:"UpdateTime"`
PreviousVcpus sql.NullInt64 `db:"PreviousVcpus" json:"PreviousVcpus"`
PreviousRam sql.NullInt64 `db:"PreviousRam" json:"PreviousRam"`
PreviousResourcePool sql.NullString `db:"PreviousResourcePool" json:"PreviousResourcePool"`
PreviousProvisionedDisk sql.NullFloat64 `db:"PreviousProvisionedDisk" json:"PreviousProvisionedDisk"`
}
type PragmaTableInfo struct {
Cid sql.NullInt64 `db:"cid" json:"cid"`
Name sql.NullString `db:"name" json:"name"`
Type sql.NullString `db:"type" json:"type"`
Notnull sql.NullInt64 `db:"notnull" json:"notnull"`
DfltValue sql.NullString `db:"dflt_value" json:"dflt_value"`
Pk sql.NullInt64 `db:"pk" json:"pk"`
}
type SnapshotRegistry struct {
ID int64 `db:"id" json:"id"`
SnapshotType string `db:"snapshot_type" json:"snapshot_type"`
TableName string `db:"table_name" json:"table_name"`
SnapshotTime int64 `db:"snapshot_time" json:"snapshot_time"`
SnapshotCount int64 `db:"snapshot_count" json:"snapshot_count"`
}
type SqliteMaster struct {
Type sql.NullString `db:"type" json:"type"`
Name sql.NullString `db:"name" json:"name"`
TblName sql.NullString `db:"tbl_name" json:"tbl_name"`
Rootpage sql.NullInt64 `db:"rootpage" json:"rootpage"`
Sql sql.NullString `db:"sql" json:"sql"`
}
type Update struct {
Uid int64
InventoryId sql.NullInt64
UpdateTime sql.NullInt64
UpdateType string
NewVcpus sql.NullInt64
NewRam sql.NullInt64
NewResourcePool sql.NullString
EventKey sql.NullString
EventId sql.NullString
NewProvisionedDisk sql.NullFloat64
UserName sql.NullString
PlaceholderChange sql.NullString
Name sql.NullString
RawChangeString []byte
Uid int64 `db:"Uid" json:"Uid"`
InventoryId sql.NullInt64 `db:"InventoryId" json:"InventoryId"`
UpdateTime sql.NullInt64 `db:"UpdateTime" json:"UpdateTime"`
UpdateType string `db:"UpdateType" json:"UpdateType"`
NewVcpus sql.NullInt64 `db:"NewVcpus" json:"NewVcpus"`
NewRam sql.NullInt64 `db:"NewRam" json:"NewRam"`
NewResourcePool sql.NullString `db:"NewResourcePool" json:"NewResourcePool"`
EventKey sql.NullString `db:"EventKey" json:"EventKey"`
EventId sql.NullString `db:"EventId" json:"EventId"`
NewProvisionedDisk sql.NullFloat64 `db:"NewProvisionedDisk" json:"NewProvisionedDisk"`
UserName sql.NullString `db:"UserName" json:"UserName"`
PlaceholderChange sql.NullString `db:"PlaceholderChange" json:"PlaceholderChange"`
Name sql.NullString `db:"Name" json:"Name"`
RawChangeString []byte `db:"RawChangeString" json:"RawChangeString"`
}

View File

@@ -32,9 +32,9 @@ WHERE "CloudId" = ? LIMIT 1;
-- name: CreateInventory :one
INSERT INTO inventory (
"Name", "Vcenter", "VmId", "VmUuid", "EventKey", "CloudId", "CreationTime", "ResourcePool", "VmType", "IsTemplate", "Datacenter", "Cluster", "Folder", "ProvisionedDisk", "InitialVcpus", "InitialRam", "SrmPlaceholder", "PoweredOn"
"Name", "Vcenter", "VmId", "VmUuid", "EventKey", "CloudId", "CreationTime", "ResourcePool", "IsTemplate", "Datacenter", "Cluster", "Folder", "ProvisionedDisk", "InitialVcpus", "InitialRam", "SrmPlaceholder", "PoweredOn"
) VALUES(
?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?
?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?
)
RETURNING *;
@@ -119,3 +119,13 @@ INSERT INTO inventory_history (
?, ?, ?, ?, ?, ?, ?
)
RETURNING *;
-- name: SqliteTableExists :one
SELECT COUNT(1) AS count
FROM sqlite_master
WHERE type = 'table' AND name = sqlc.arg('table_name');
-- name: SqliteColumnExists :one
SELECT COUNT(1) AS count
FROM pragma_table_info
WHERE name = sqlc.arg('column_name');

View File

@@ -17,8 +17,8 @@ RETURNING Uid, InventoryId, UpdateTime, UpdateType, NewVcpus, NewRam, NewResourc
`
type CleanupUpdatesParams struct {
UpdateType string
UpdateTime sql.NullInt64
UpdateType string `db:"updateType" json:"updateType"`
UpdateTime sql.NullInt64 `db:"updateTime" json:"updateTime"`
}
func (q *Queries) CleanupUpdates(ctx context.Context, arg CleanupUpdatesParams) error {
@@ -47,19 +47,19 @@ RETURNING Eid, CloudId, Source, EventTime, ChainId, VmId, EventKey, DatacenterNa
`
type CreateEventParams struct {
CloudId string
Source string
EventTime sql.NullInt64
ChainId string
VmId sql.NullString
VmName sql.NullString
EventType sql.NullString
EventKey sql.NullString
DatacenterId sql.NullString
DatacenterName sql.NullString
ComputeResourceId sql.NullString
ComputeResourceName sql.NullString
UserName sql.NullString
CloudId string `db:"CloudId" json:"CloudId"`
Source string `db:"Source" json:"Source"`
EventTime sql.NullInt64 `db:"EventTime" json:"EventTime"`
ChainId string `db:"ChainId" json:"ChainId"`
VmId sql.NullString `db:"VmId" json:"VmId"`
VmName sql.NullString `db:"VmName" json:"VmName"`
EventType sql.NullString `db:"EventType" json:"EventType"`
EventKey sql.NullString `db:"EventKey" json:"EventKey"`
DatacenterId sql.NullString `db:"DatacenterId" json:"DatacenterId"`
DatacenterName sql.NullString `db:"DatacenterName" json:"DatacenterName"`
ComputeResourceId sql.NullString `db:"ComputeResourceId" json:"ComputeResourceId"`
ComputeResourceName sql.NullString `db:"ComputeResourceName" json:"ComputeResourceName"`
UserName sql.NullString `db:"UserName" json:"UserName"`
}
func (q *Queries) CreateEvent(ctx context.Context, arg CreateEventParams) (Event, error) {
@@ -101,32 +101,31 @@ func (q *Queries) CreateEvent(ctx context.Context, arg CreateEventParams) (Event
const createInventory = `-- name: CreateInventory :one
INSERT INTO inventory (
"Name", "Vcenter", "VmId", "VmUuid", "EventKey", "CloudId", "CreationTime", "ResourcePool", "VmType", "IsTemplate", "Datacenter", "Cluster", "Folder", "ProvisionedDisk", "InitialVcpus", "InitialRam", "SrmPlaceholder", "PoweredOn"
"Name", "Vcenter", "VmId", "VmUuid", "EventKey", "CloudId", "CreationTime", "ResourcePool", "IsTemplate", "Datacenter", "Cluster", "Folder", "ProvisionedDisk", "InitialVcpus", "InitialRam", "SrmPlaceholder", "PoweredOn"
) VALUES(
?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?
?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?
)
RETURNING Iid, Name, Vcenter, VmId, EventKey, CloudId, CreationTime, DeletionTime, ResourcePool, VmType, Datacenter, Cluster, Folder, ProvisionedDisk, InitialVcpus, InitialRam, IsTemplate, PoweredOn, SrmPlaceholder, VmUuid
RETURNING Iid, Name, Vcenter, VmId, EventKey, CloudId, CreationTime, DeletionTime, ResourcePool, Datacenter, Cluster, Folder, ProvisionedDisk, InitialVcpus, InitialRam, IsTemplate, PoweredOn, SrmPlaceholder, VmUuid
`
type CreateInventoryParams struct {
Name string
Vcenter string
VmId sql.NullString
VmUuid sql.NullString
EventKey sql.NullString
CloudId sql.NullString
CreationTime sql.NullInt64
ResourcePool sql.NullString
VmType sql.NullString
IsTemplate interface{}
Datacenter sql.NullString
Cluster sql.NullString
Folder sql.NullString
ProvisionedDisk sql.NullFloat64
InitialVcpus sql.NullInt64
InitialRam sql.NullInt64
SrmPlaceholder interface{}
PoweredOn interface{}
Name string `db:"Name" json:"Name"`
Vcenter string `db:"Vcenter" json:"Vcenter"`
VmId sql.NullString `db:"VmId" json:"VmId"`
VmUuid sql.NullString `db:"VmUuid" json:"VmUuid"`
EventKey sql.NullString `db:"EventKey" json:"EventKey"`
CloudId sql.NullString `db:"CloudId" json:"CloudId"`
CreationTime sql.NullInt64 `db:"CreationTime" json:"CreationTime"`
ResourcePool sql.NullString `db:"ResourcePool" json:"ResourcePool"`
IsTemplate interface{} `db:"IsTemplate" json:"IsTemplate"`
Datacenter sql.NullString `db:"Datacenter" json:"Datacenter"`
Cluster sql.NullString `db:"Cluster" json:"Cluster"`
Folder sql.NullString `db:"Folder" json:"Folder"`
ProvisionedDisk sql.NullFloat64 `db:"ProvisionedDisk" json:"ProvisionedDisk"`
InitialVcpus sql.NullInt64 `db:"InitialVcpus" json:"InitialVcpus"`
InitialRam sql.NullInt64 `db:"InitialRam" json:"InitialRam"`
SrmPlaceholder interface{} `db:"SrmPlaceholder" json:"SrmPlaceholder"`
PoweredOn interface{} `db:"PoweredOn" json:"PoweredOn"`
}
func (q *Queries) CreateInventory(ctx context.Context, arg CreateInventoryParams) (Inventory, error) {
@@ -139,7 +138,6 @@ func (q *Queries) CreateInventory(ctx context.Context, arg CreateInventoryParams
arg.CloudId,
arg.CreationTime,
arg.ResourcePool,
arg.VmType,
arg.IsTemplate,
arg.Datacenter,
arg.Cluster,
@@ -161,7 +159,6 @@ func (q *Queries) CreateInventory(ctx context.Context, arg CreateInventoryParams
&i.CreationTime,
&i.DeletionTime,
&i.ResourcePool,
&i.VmType,
&i.Datacenter,
&i.Cluster,
&i.Folder,
@@ -186,13 +183,13 @@ RETURNING Hid, InventoryId, ReportDate, UpdateTime, PreviousVcpus, PreviousRam,
`
type CreateInventoryHistoryParams struct {
InventoryId sql.NullInt64
ReportDate sql.NullInt64
UpdateTime sql.NullInt64
PreviousVcpus sql.NullInt64
PreviousRam sql.NullInt64
PreviousResourcePool sql.NullString
PreviousProvisionedDisk sql.NullFloat64
InventoryId sql.NullInt64 `db:"InventoryId" json:"InventoryId"`
ReportDate sql.NullInt64 `db:"ReportDate" json:"ReportDate"`
UpdateTime sql.NullInt64 `db:"UpdateTime" json:"UpdateTime"`
PreviousVcpus sql.NullInt64 `db:"PreviousVcpus" json:"PreviousVcpus"`
PreviousRam sql.NullInt64 `db:"PreviousRam" json:"PreviousRam"`
PreviousResourcePool sql.NullString `db:"PreviousResourcePool" json:"PreviousResourcePool"`
PreviousProvisionedDisk sql.NullFloat64 `db:"PreviousProvisionedDisk" json:"PreviousProvisionedDisk"`
}
func (q *Queries) CreateInventoryHistory(ctx context.Context, arg CreateInventoryHistoryParams) (InventoryHistory, error) {
@@ -229,19 +226,19 @@ RETURNING Uid, InventoryId, UpdateTime, UpdateType, NewVcpus, NewRam, NewResourc
`
type CreateUpdateParams struct {
InventoryId sql.NullInt64
Name sql.NullString
EventKey sql.NullString
EventId sql.NullString
UpdateTime sql.NullInt64
UpdateType string
NewVcpus sql.NullInt64
NewRam sql.NullInt64
NewResourcePool sql.NullString
NewProvisionedDisk sql.NullFloat64
UserName sql.NullString
PlaceholderChange sql.NullString
RawChangeString []byte
InventoryId sql.NullInt64 `db:"InventoryId" json:"InventoryId"`
Name sql.NullString `db:"Name" json:"Name"`
EventKey sql.NullString `db:"EventKey" json:"EventKey"`
EventId sql.NullString `db:"EventId" json:"EventId"`
UpdateTime sql.NullInt64 `db:"UpdateTime" json:"UpdateTime"`
UpdateType string `db:"UpdateType" json:"UpdateType"`
NewVcpus sql.NullInt64 `db:"NewVcpus" json:"NewVcpus"`
NewRam sql.NullInt64 `db:"NewRam" json:"NewRam"`
NewResourcePool sql.NullString `db:"NewResourcePool" json:"NewResourcePool"`
NewProvisionedDisk sql.NullFloat64 `db:"NewProvisionedDisk" json:"NewProvisionedDisk"`
UserName sql.NullString `db:"UserName" json:"UserName"`
PlaceholderChange sql.NullString `db:"PlaceholderChange" json:"PlaceholderChange"`
RawChangeString []byte `db:"RawChangeString" json:"RawChangeString"`
}
func (q *Queries) CreateUpdate(ctx context.Context, arg CreateUpdateParams) (Update, error) {
@@ -281,7 +278,7 @@ func (q *Queries) CreateUpdate(ctx context.Context, arg CreateUpdateParams) (Upd
}
const getInventoryByName = `-- name: GetInventoryByName :many
SELECT Iid, Name, Vcenter, VmId, EventKey, CloudId, CreationTime, DeletionTime, ResourcePool, VmType, Datacenter, Cluster, Folder, ProvisionedDisk, InitialVcpus, InitialRam, IsTemplate, PoweredOn, SrmPlaceholder, VmUuid FROM inventory
SELECT Iid, Name, Vcenter, VmId, EventKey, CloudId, CreationTime, DeletionTime, ResourcePool, Datacenter, Cluster, Folder, ProvisionedDisk, InitialVcpus, InitialRam, IsTemplate, PoweredOn, SrmPlaceholder, VmUuid FROM inventory
WHERE "Name" = ?
`
@@ -304,7 +301,6 @@ func (q *Queries) GetInventoryByName(ctx context.Context, name string) ([]Invent
&i.CreationTime,
&i.DeletionTime,
&i.ResourcePool,
&i.VmType,
&i.Datacenter,
&i.Cluster,
&i.Folder,
@@ -330,7 +326,7 @@ func (q *Queries) GetInventoryByName(ctx context.Context, name string) ([]Invent
}
const getInventoryByVcenter = `-- name: GetInventoryByVcenter :many
SELECT Iid, Name, Vcenter, VmId, EventKey, CloudId, CreationTime, DeletionTime, ResourcePool, VmType, Datacenter, Cluster, Folder, ProvisionedDisk, InitialVcpus, InitialRam, IsTemplate, PoweredOn, SrmPlaceholder, VmUuid FROM inventory
SELECT Iid, Name, Vcenter, VmId, EventKey, CloudId, CreationTime, DeletionTime, ResourcePool, Datacenter, Cluster, Folder, ProvisionedDisk, InitialVcpus, InitialRam, IsTemplate, PoweredOn, SrmPlaceholder, VmUuid FROM inventory
WHERE "Vcenter" = ?
`
@@ -353,7 +349,6 @@ func (q *Queries) GetInventoryByVcenter(ctx context.Context, vcenter string) ([]
&i.CreationTime,
&i.DeletionTime,
&i.ResourcePool,
&i.VmType,
&i.Datacenter,
&i.Cluster,
&i.Folder,
@@ -379,7 +374,7 @@ func (q *Queries) GetInventoryByVcenter(ctx context.Context, vcenter string) ([]
}
const getInventoryEventId = `-- name: GetInventoryEventId :one
SELECT Iid, Name, Vcenter, VmId, EventKey, CloudId, CreationTime, DeletionTime, ResourcePool, VmType, Datacenter, Cluster, Folder, ProvisionedDisk, InitialVcpus, InitialRam, IsTemplate, PoweredOn, SrmPlaceholder, VmUuid FROM inventory
SELECT Iid, Name, Vcenter, VmId, EventKey, CloudId, CreationTime, DeletionTime, ResourcePool, Datacenter, Cluster, Folder, ProvisionedDisk, InitialVcpus, InitialRam, IsTemplate, PoweredOn, SrmPlaceholder, VmUuid FROM inventory
WHERE "CloudId" = ? LIMIT 1
`
@@ -396,7 +391,6 @@ func (q *Queries) GetInventoryEventId(ctx context.Context, cloudid sql.NullStrin
&i.CreationTime,
&i.DeletionTime,
&i.ResourcePool,
&i.VmType,
&i.Datacenter,
&i.Cluster,
&i.Folder,
@@ -412,7 +406,7 @@ func (q *Queries) GetInventoryEventId(ctx context.Context, cloudid sql.NullStrin
}
const getInventoryVcUrl = `-- name: GetInventoryVcUrl :many
SELECT Iid, Name, Vcenter, VmId, EventKey, CloudId, CreationTime, DeletionTime, ResourcePool, VmType, Datacenter, Cluster, Folder, ProvisionedDisk, InitialVcpus, InitialRam, IsTemplate, PoweredOn, SrmPlaceholder, VmUuid FROM inventory
SELECT Iid, Name, Vcenter, VmId, EventKey, CloudId, CreationTime, DeletionTime, ResourcePool, Datacenter, Cluster, Folder, ProvisionedDisk, InitialVcpus, InitialRam, IsTemplate, PoweredOn, SrmPlaceholder, VmUuid FROM inventory
WHERE "Vcenter" = ?1
`
@@ -435,7 +429,6 @@ func (q *Queries) GetInventoryVcUrl(ctx context.Context, vc string) ([]Inventory
&i.CreationTime,
&i.DeletionTime,
&i.ResourcePool,
&i.VmType,
&i.Datacenter,
&i.Cluster,
&i.Folder,
@@ -461,13 +454,13 @@ func (q *Queries) GetInventoryVcUrl(ctx context.Context, vc string) ([]Inventory
}
const getInventoryVmId = `-- name: GetInventoryVmId :one
SELECT Iid, Name, Vcenter, VmId, EventKey, CloudId, CreationTime, DeletionTime, ResourcePool, VmType, Datacenter, Cluster, Folder, ProvisionedDisk, InitialVcpus, InitialRam, IsTemplate, PoweredOn, SrmPlaceholder, VmUuid FROM inventory
SELECT Iid, Name, Vcenter, VmId, EventKey, CloudId, CreationTime, DeletionTime, ResourcePool, Datacenter, Cluster, Folder, ProvisionedDisk, InitialVcpus, InitialRam, IsTemplate, PoweredOn, SrmPlaceholder, VmUuid FROM inventory
WHERE "VmId" = ?1 AND "Datacenter" = ?2
`
type GetInventoryVmIdParams struct {
VmId sql.NullString
DatacenterName sql.NullString
VmId sql.NullString `db:"vmId" json:"vmId"`
DatacenterName sql.NullString `db:"datacenterName" json:"datacenterName"`
}
func (q *Queries) GetInventoryVmId(ctx context.Context, arg GetInventoryVmIdParams) (Inventory, error) {
@@ -483,7 +476,6 @@ func (q *Queries) GetInventoryVmId(ctx context.Context, arg GetInventoryVmIdPara
&i.CreationTime,
&i.DeletionTime,
&i.ResourcePool,
&i.VmType,
&i.Datacenter,
&i.Cluster,
&i.Folder,
@@ -499,13 +491,13 @@ func (q *Queries) GetInventoryVmId(ctx context.Context, arg GetInventoryVmIdPara
}
const getInventoryVmUuid = `-- name: GetInventoryVmUuid :one
SELECT Iid, Name, Vcenter, VmId, EventKey, CloudId, CreationTime, DeletionTime, ResourcePool, VmType, Datacenter, Cluster, Folder, ProvisionedDisk, InitialVcpus, InitialRam, IsTemplate, PoweredOn, SrmPlaceholder, VmUuid FROM inventory
SELECT Iid, Name, Vcenter, VmId, EventKey, CloudId, CreationTime, DeletionTime, ResourcePool, Datacenter, Cluster, Folder, ProvisionedDisk, InitialVcpus, InitialRam, IsTemplate, PoweredOn, SrmPlaceholder, VmUuid FROM inventory
WHERE "VmUuid" = ?1 AND "Datacenter" = ?2
`
type GetInventoryVmUuidParams struct {
VmUuid sql.NullString
DatacenterName sql.NullString
VmUuid sql.NullString `db:"vmUuid" json:"vmUuid"`
DatacenterName sql.NullString `db:"datacenterName" json:"datacenterName"`
}
func (q *Queries) GetInventoryVmUuid(ctx context.Context, arg GetInventoryVmUuidParams) (Inventory, error) {
@@ -521,7 +513,6 @@ func (q *Queries) GetInventoryVmUuid(ctx context.Context, arg GetInventoryVmUuid
&i.CreationTime,
&i.DeletionTime,
&i.ResourcePool,
&i.VmType,
&i.Datacenter,
&i.Cluster,
&i.Folder,
@@ -537,7 +528,7 @@ func (q *Queries) GetInventoryVmUuid(ctx context.Context, arg GetInventoryVmUuid
}
const getReportInventory = `-- name: GetReportInventory :many
SELECT Iid, Name, Vcenter, VmId, EventKey, CloudId, CreationTime, DeletionTime, ResourcePool, VmType, Datacenter, Cluster, Folder, ProvisionedDisk, InitialVcpus, InitialRam, IsTemplate, PoweredOn, SrmPlaceholder, VmUuid FROM inventory
SELECT Iid, Name, Vcenter, VmId, EventKey, CloudId, CreationTime, DeletionTime, ResourcePool, Datacenter, Cluster, Folder, ProvisionedDisk, InitialVcpus, InitialRam, IsTemplate, PoweredOn, SrmPlaceholder, VmUuid FROM inventory
ORDER BY "CreationTime"
`
@@ -560,7 +551,6 @@ func (q *Queries) GetReportInventory(ctx context.Context) ([]Inventory, error) {
&i.CreationTime,
&i.DeletionTime,
&i.ResourcePool,
&i.VmType,
&i.Datacenter,
&i.Cluster,
&i.Folder,
@@ -634,8 +624,8 @@ WHERE "UpdateType" = ?1 AND "InventoryId" = ?2
`
type GetVmUpdatesParams struct {
UpdateType string
InventoryId sql.NullInt64
UpdateType string `db:"updateType" json:"updateType"`
InventoryId sql.NullInt64 `db:"InventoryId" json:"InventoryId"`
}
func (q *Queries) GetVmUpdates(ctx context.Context, arg GetVmUpdatesParams) ([]Update, error) {
@@ -679,12 +669,12 @@ func (q *Queries) GetVmUpdates(ctx context.Context, arg GetVmUpdatesParams) ([]U
const inventoryCleanup = `-- name: InventoryCleanup :exec
DELETE FROM inventory
WHERE "VmId" = ?1 AND "Datacenter" = ?2
RETURNING Iid, Name, Vcenter, VmId, EventKey, CloudId, CreationTime, DeletionTime, ResourcePool, VmType, Datacenter, Cluster, Folder, ProvisionedDisk, InitialVcpus, InitialRam, IsTemplate, PoweredOn, SrmPlaceholder, VmUuid
RETURNING Iid, Name, Vcenter, VmId, EventKey, CloudId, CreationTime, DeletionTime, ResourcePool, Datacenter, Cluster, Folder, ProvisionedDisk, InitialVcpus, InitialRam, IsTemplate, PoweredOn, SrmPlaceholder, VmUuid
`
type InventoryCleanupParams struct {
VmId sql.NullString
DatacenterName sql.NullString
VmId sql.NullString `db:"vmId" json:"vmId"`
DatacenterName sql.NullString `db:"datacenterName" json:"datacenterName"`
}
func (q *Queries) InventoryCleanup(ctx context.Context, arg InventoryCleanupParams) error {
@@ -695,7 +685,7 @@ func (q *Queries) InventoryCleanup(ctx context.Context, arg InventoryCleanupPara
const inventoryCleanupTemplates = `-- name: InventoryCleanupTemplates :exec
DELETE FROM inventory
WHERE "IsTemplate" = 'TRUE'
RETURNING Iid, Name, Vcenter, VmId, EventKey, CloudId, CreationTime, DeletionTime, ResourcePool, VmType, Datacenter, Cluster, Folder, ProvisionedDisk, InitialVcpus, InitialRam, IsTemplate, PoweredOn, SrmPlaceholder, VmUuid
RETURNING Iid, Name, Vcenter, VmId, EventKey, CloudId, CreationTime, DeletionTime, ResourcePool, Datacenter, Cluster, Folder, ProvisionedDisk, InitialVcpus, InitialRam, IsTemplate, PoweredOn, SrmPlaceholder, VmUuid
`
func (q *Queries) InventoryCleanupTemplates(ctx context.Context) error {
@@ -706,7 +696,7 @@ func (q *Queries) InventoryCleanupTemplates(ctx context.Context) error {
const inventoryCleanupVcenter = `-- name: InventoryCleanupVcenter :exec
DELETE FROM inventory
WHERE "Vcenter" = ?1
RETURNING Iid, Name, Vcenter, VmId, EventKey, CloudId, CreationTime, DeletionTime, ResourcePool, VmType, Datacenter, Cluster, Folder, ProvisionedDisk, InitialVcpus, InitialRam, IsTemplate, PoweredOn, SrmPlaceholder, VmUuid
RETURNING Iid, Name, Vcenter, VmId, EventKey, CloudId, CreationTime, DeletionTime, ResourcePool, Datacenter, Cluster, Folder, ProvisionedDisk, InitialVcpus, InitialRam, IsTemplate, PoweredOn, SrmPlaceholder, VmUuid
`
func (q *Queries) InventoryCleanupVcenter(ctx context.Context, vc string) error {
@@ -721,9 +711,9 @@ WHERE "VmId" = ?2 AND "Datacenter" = ?3
`
type InventoryMarkDeletedParams struct {
DeletionTime sql.NullInt64
VmId sql.NullString
DatacenterName sql.NullString
DeletionTime sql.NullInt64 `db:"deletionTime" json:"deletionTime"`
VmId sql.NullString `db:"vmId" json:"vmId"`
DatacenterName sql.NullString `db:"datacenterName" json:"datacenterName"`
}
func (q *Queries) InventoryMarkDeleted(ctx context.Context, arg InventoryMarkDeletedParams) error {
@@ -738,9 +728,9 @@ WHERE "Iid" = ?3
`
type InventoryUpdateParams struct {
Uuid sql.NullString
SrmPlaceholder interface{}
Iid int64
Uuid sql.NullString `db:"uuid" json:"uuid"`
SrmPlaceholder interface{} `db:"srmPlaceholder" json:"srmPlaceholder"`
Iid int64 `db:"iid" json:"iid"`
}
func (q *Queries) InventoryUpdate(ctx context.Context, arg InventoryUpdateParams) error {
@@ -793,7 +783,7 @@ func (q *Queries) ListEvents(ctx context.Context) ([]Event, error) {
}
const listInventory = `-- name: ListInventory :many
SELECT Iid, Name, Vcenter, VmId, EventKey, CloudId, CreationTime, DeletionTime, ResourcePool, VmType, Datacenter, Cluster, Folder, ProvisionedDisk, InitialVcpus, InitialRam, IsTemplate, PoweredOn, SrmPlaceholder, VmUuid FROM inventory
SELECT Iid, Name, Vcenter, VmId, EventKey, CloudId, CreationTime, DeletionTime, ResourcePool, Datacenter, Cluster, Folder, ProvisionedDisk, InitialVcpus, InitialRam, IsTemplate, PoweredOn, SrmPlaceholder, VmUuid FROM inventory
ORDER BY "Name"
`
@@ -816,7 +806,6 @@ func (q *Queries) ListInventory(ctx context.Context) ([]Inventory, error) {
&i.CreationTime,
&i.DeletionTime,
&i.ResourcePool,
&i.VmType,
&i.Datacenter,
&i.Cluster,
&i.Folder,
@@ -887,6 +876,32 @@ func (q *Queries) ListUnprocessedEvents(ctx context.Context, eventtime sql.NullI
return items, nil
}
const sqliteColumnExists = `-- name: SqliteColumnExists :one
SELECT COUNT(1) AS count
FROM pragma_table_info
WHERE name = ?1
`
func (q *Queries) SqliteColumnExists(ctx context.Context, columnName sql.NullString) (int64, error) {
row := q.db.QueryRowContext(ctx, sqliteColumnExists, columnName)
var count int64
err := row.Scan(&count)
return count, err
}
const sqliteTableExists = `-- name: SqliteTableExists :one
SELECT COUNT(1) AS count
FROM sqlite_master
WHERE type = 'table' AND name = ?1
`
func (q *Queries) SqliteTableExists(ctx context.Context, tableName sql.NullString) (int64, error) {
row := q.db.QueryRowContext(ctx, sqliteTableExists, tableName)
var count int64
err := row.Scan(&count)
return count, err
}
const updateEventsProcessed = `-- name: UpdateEventsProcessed :exec
UPDATE events
SET "Processed" = 1

View File

@@ -8,7 +8,6 @@ CREATE TABLE IF NOT EXISTS inventory (
"CreationTime" INTEGER,
"DeletionTime" INTEGER,
"ResourcePool" TEXT,
"VmType" TEXT,
"Datacenter" TEXT,
"Cluster" TEXT,
"Folder" TEXT,
@@ -66,3 +65,31 @@ CREATE TABLE IF NOT EXISTS inventory_history (
"PreviousResourcePool" TEXT,
"PreviousProvisionedDisk" REAL
);
CREATE TABLE IF NOT EXISTS snapshot_registry (
"id" INTEGER PRIMARY KEY AUTOINCREMENT,
"snapshot_type" TEXT NOT NULL,
"table_name" TEXT NOT NULL UNIQUE,
"snapshot_time" INTEGER NOT NULL,
"snapshot_count" BIGINT NOT NULL DEFAULT 0
);
CREATE INDEX IF NOT EXISTS idx_snapshot_registry_type_time ON snapshot_registry (snapshot_type, snapshot_time);
-- The following tables are declared for sqlc type-checking only.
-- Do not apply this file as a migration.
CREATE TABLE sqlite_master (
"type" TEXT,
"name" TEXT,
"tbl_name" TEXT,
"rootpage" INTEGER,
"sql" TEXT
);
CREATE TABLE pragma_table_info (
"cid" INTEGER,
"name" TEXT,
"type" TEXT,
"notnull" INTEGER,
"dflt_value" TEXT,
"pk" INTEGER
);

178
dist/assets/css/web3.css vendored Normal file
View File

@@ -0,0 +1,178 @@
:root {
--web2-blue: #1d9bf0;
--web2-slate: #0f172a;
--web2-muted: #64748b;
--web2-card: #ffffff;
--web2-border: #e5e7eb;
}
body {
font-family: "Segoe UI", "Helvetica Neue", Arial, sans-serif;
color: var(--web2-slate);
}
.web2-bg {
background: #ffffff;
}
.web2-shell {
max-width: 1100px;
margin: 0 auto;
padding: 2rem 1.5rem 4rem;
}
.web2-header {
background: var(--web2-card);
border: 1px solid var(--web2-border);
border-radius: 4px;
padding: 1.5rem 2rem;
}
.web2-card {
background: var(--web2-card);
border: 1px solid var(--web2-border);
border-radius: 4px;
padding: 1.5rem 1.75rem;
}
.web2-card h2 {
position: relative;
padding-left: 0.75rem;
font-size: 1.05rem;
font-weight: 700;
letter-spacing: 0.02em;
color: #0b1220;
}
.web2-card h2::before {
content: "";
position: absolute;
left: 0;
top: 50%;
transform: translateY(-50%);
width: 4px;
height: 70%;
background: var(--web2-blue);
border-radius: 2px;
box-shadow: 0 0 0 1px rgba(29, 155, 240, 0.18);
}
.web2-pill {
display: inline-flex;
align-items: center;
gap: 0.4rem;
background: #f8fafc;
border: 1px solid var(--web2-border);
color: var(--web2-muted);
padding: 0.2rem 0.6rem;
border-radius: 3px;
font-size: 0.85rem;
letter-spacing: 0.02em;
}
.web2-code {
font-family: ui-monospace, SFMono-Regular, Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", monospace;
background: #f1f5f9;
border: 1px solid var(--web2-border);
border-radius: 3px;
padding: 0.1rem 0.35rem;
font-size: 0.85em;
color: #0f172a;
}
.web2-paragraphs p + p {
margin-top: 0.85rem;
}
.web2-link {
color: var(--web2-blue);
text-decoration: none;
font-weight: 600;
}
.web2-link:hover {
text-decoration: underline;
}
.web2-button {
background: var(--web2-blue);
color: #fff;
padding: 0.45rem 0.9rem;
border-radius: 3px;
border: 1px solid #1482d0;
box-shadow: none;
font-weight: 600;
text-decoration: none;
}
.web2-button:hover {
background: #1787d4;
}
.web2-button-group {
display: flex;
flex-wrap: wrap;
}
.web2-button-group .web2-button {
margin: 0 0.5rem 0.5rem 0;
}
.web3-button {
background: #f3f4f6;
color: #0f172a;
padding: 0.5rem 1rem;
border-radius: 6px;
border: 1px solid #e5e7eb;
text-decoration: none;
font-weight: 600;
transition: background 0.15s ease, border-color 0.15s ease, color 0.15s ease, box-shadow 0.15s ease;
display: inline-flex;
align-items: center;
gap: 0.35rem;
}
.web3-button:hover {
background: #e2e8f0;
border-color: #cbd5e1;
}
.web3-button.active {
background: #dbeafe;
border-color: #93c5fd;
color: #1d4ed8;
box-shadow: 0 0 0 2px rgba(147, 197, 253, 0.35);
}
.web3-button-group {
display: flex;
gap: 0.75rem;
flex-wrap: wrap;
margin-top: 4px;
}
.web2-list li {
background: #ffffff;
border: 1px solid var(--web2-border);
border-radius: 3px;
padding: 0.75rem 1rem;
box-shadow: none;
}
.web2-table {
width: 100%;
border-collapse: collapse;
font-size: 0.95rem;
}
.web2-table thead th {
text-align: left;
padding: 0.75rem 0.5rem;
font-weight: 700;
color: var(--web2-muted);
border-bottom: 1px solid var(--web2-border);
}
.web2-table tbody td {
padding: 0.9rem 0.5rem;
border-bottom: 1px solid var(--web2-border);
}
.web2-table tbody tr:nth-child(odd) {
background: #f8fafc;
}
.web2-table tbody tr:nth-child(even) {
background: #ffffff;
}
.web2-group-row td {
background: #e8eef5;
color: #0f172a;
border-bottom: 1px solid var(--web2-border);
padding: 0.65rem 0.5rem;
}
.web2-badge {
display: inline-flex;
align-items: center;
gap: 0.25rem;
border: 1px solid var(--web2-border);
padding: 0.15rem 0.45rem;
border-radius: 3px;
font-size: 0.8rem;
color: var(--web2-muted);
background: #f8fafc;
}

2
dist/dist.go vendored
View File

@@ -4,5 +4,5 @@ import (
"embed"
)
//go:embed all:assets
//go:embed all:assets favicon.ico favicon-16x16.png favicon-32x32.png
var AssetsDir embed.FS

BIN
dist/favicon-16x16.png vendored Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 520 B

BIN
dist/favicon-32x32.png vendored Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.1 KiB

BIN
dist/favicon.ico vendored Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

View File

@@ -95,14 +95,33 @@ func beforeAll() {
func startApp() error {
port := getPort()
app = exec.Command("go", "run", "main.go")
settingsPath := "./test-settings.yml"
settingsBody := fmt.Sprintf(`settings:
log_level: "debug"
log_output: "text"
database_driver: "sqlite"
database_url: "./test-db.sqlite3"
bind_ip: "127.0.0.1"
bind_port: %d
bind_disable_tls: true
tls_cert_filename:
tls_key_filename:
vcenter_username: "test"
vcenter_password: "test"
vcenter_insecure: true
vcenter_event_polling_seconds: 60
vcenter_inventory_polling_seconds: 7200
vcenter_inventory_snapshot_seconds: 3600
vcenter_inventory_aggregate_seconds: 86400
hourly_snapshot_max_age_days: 1
daily_snapshot_max_age_months: 1
`, port)
if err := os.WriteFile("../"+settingsPath, []byte(settingsBody), 0o600); err != nil {
return err
}
app = exec.Command("go", "run", "main.go", "-settings", settingsPath)
app.Dir = "../"
app.Env = append(
os.Environ(),
"DB_URL=./test-db.sqlite3",
fmt.Sprintf("PORT=%d", port),
"LOG_LEVEL=DEBUG",
)
app.Env = os.Environ()
var err error
baseUrL, err = url.Parse(fmt.Sprintf("http://localhost:%d", port))
@@ -188,6 +207,9 @@ func afterAll() {
if err := os.Remove("../test-db.sqlite3"); err != nil {
log.Fatalf("could not remove test-db.sqlite3: %v", err)
}
if err := os.Remove("../test-settings.yml"); err != nil {
log.Fatalf("could not remove test-settings.yml: %v", err)
}
}
// beforeEach creates a new context and page for each test,

26
go.mod
View File

@@ -1,33 +1,48 @@
module vctp
go 1.25.0
go 1.25.5
require (
github.com/a-h/templ v0.3.977
github.com/go-co-op/gocron/v2 v2.19.0
github.com/jackc/pgx/v5 v5.8.0
github.com/jmoiron/sqlx v1.4.0
github.com/joho/godotenv v1.5.1
github.com/pressly/goose/v3 v3.26.0
github.com/prometheus/client_golang v1.19.0
github.com/swaggo/swag v1.16.6
github.com/vmware/govmomi v0.52.0
github.com/xuri/excelize/v2 v2.10.0
gopkg.in/yaml.v2 v2.4.0
modernc.org/sqlite v1.43.0
modernc.org/sqlite v1.44.0
)
require (
github.com/KyleBanks/depth v1.2.1 // indirect
github.com/PuerkitoBio/purell v1.1.1 // indirect
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/dustin/go-humanize v1.0.1 // indirect
github.com/go-openapi/jsonpointer v0.19.5 // indirect
github.com/go-openapi/jsonreference v0.19.6 // indirect
github.com/go-openapi/spec v0.20.4 // indirect
github.com/go-openapi/swag v0.19.15 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/jackc/pgpassfile v1.0.0 // indirect
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect
github.com/jackc/puddle/v2 v2.2.2 // indirect
github.com/jonboulle/clockwork v0.5.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/mailru/easyjson v0.7.6 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/mfridman/interpolate v0.0.2 // indirect
github.com/ncruces/go-strftime v1.0.0 // indirect
github.com/prometheus/client_model v0.5.0 // indirect
github.com/prometheus/common v0.48.0 // indirect
github.com/prometheus/procfs v0.15.1 // indirect
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
github.com/richardlehane/mscfb v1.0.6 // indirect
github.com/richardlehane/msoleps v1.0.5 // indirect
github.com/richardlehane/msoleps v1.0.6 // indirect
github.com/robfig/cron/v3 v3.0.1 // indirect
github.com/sethvargo/go-retry v0.3.0 // indirect
github.com/tiendc/go-deepcopy v1.7.2 // indirect
@@ -36,10 +51,13 @@ require (
go.uber.org/multierr v1.11.0 // indirect
golang.org/x/crypto v0.47.0 // indirect
golang.org/x/exp v0.0.0-20260112195511-716be5621a96 // indirect
golang.org/x/mod v0.32.0 // indirect
golang.org/x/net v0.49.0 // indirect
golang.org/x/sync v0.19.0 // indirect
golang.org/x/sys v0.40.0 // indirect
golang.org/x/text v0.33.0 // indirect
golang.org/x/tools v0.41.0 // indirect
google.golang.org/protobuf v1.33.0 // indirect
modernc.org/libc v1.67.4 // indirect
modernc.org/mathutil v1.7.1 // indirect
modernc.org/memory v1.11.0 // indirect

65
go.sum
View File

@@ -1,7 +1,18 @@
filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=
filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4=
github.com/KyleBanks/depth v1.2.1 h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc=
github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE=
github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI=
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M=
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
github.com/a-h/templ v0.3.977 h1:kiKAPXTZE2Iaf8JbtM21r54A8bCNsncrfnokZZSrSDg=
github.com/a-h/templ v0.3.977/go.mod h1:oCZcnKRf5jjsGpf2yELzQfodLphd2mwecwG4Crk5HBo=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@@ -9,6 +20,16 @@ github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkp
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
github.com/go-co-op/gocron/v2 v2.19.0 h1:OKf2y6LXPs/BgBI2fl8PxUpNAI1DA9Mg+hSeGOS38OU=
github.com/go-co-op/gocron/v2 v2.19.0/go.mod h1:5lEiCKk1oVJV39Zg7/YG10OnaVrDAV5GGR6O0663k6U=
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY=
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
github.com/go-openapi/jsonreference v0.19.6 h1:UBIxjkht+AWIgYzCDSv2GN+E/togfwXUJFRTWhl2Jjs=
github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns=
github.com/go-openapi/spec v0.20.4 h1:O8hJrt0UMnhHcluhIdUgCLRWyM2x7QkBXRvOs7m+O1M=
github.com/go-openapi/spec v0.20.4/go.mod h1:faYFR1CvsJZ0mNsmsphTMSoRrNV3TEDoAM7FOEWeq8I=
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-openapi/swag v0.19.15 h1:D2NRCBzS9/pEY3gP9Nl8aDqGUcPFrwG2p+CNFrLyrCM=
github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg=
github.com/go-sql-driver/mysql v1.9.3 h1:U/N249h2WzJ3Ukj8SowVFjdtZKfu9vlLZxjPXV1aweo=
github.com/go-sql-driver/mysql v1.9.3/go.mod h1:qn46aNg1333BRMNU69Lq93t8du/dwxI64Gl8i5p1WMU=
@@ -30,16 +51,23 @@ github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo
github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
github.com/jmoiron/sqlx v1.4.0 h1:1PLqN7S1UYp5t4SrVVnt4nUVNemrDAtxlulVe+Qgm3o=
github.com/jmoiron/sqlx v1.4.0/go.mod h1:ZrZ7UsYB/weZdl2Bxg6jCRO9c3YHl8r3ahlKmRT4JLY=
github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0=
github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4=
github.com/jonboulle/clockwork v0.5.0 h1:Hyh9A8u51kptdkR+cqRpT1EebBwTn1oK9YfGYbdFz6I=
github.com/jonboulle/clockwork v0.5.0/go.mod h1:3mZlmanh0g2NDKO5TWZVJAfofYk64M7XN3SzBPjZF60=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA=
github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU=
@@ -48,16 +76,25 @@ github.com/mfridman/interpolate v0.0.2 h1:pnuTK7MQIxxFz1Gr+rjSIx9u7qVjf5VOoM/u6B
github.com/mfridman/interpolate v0.0.2/go.mod h1:p+7uk6oE07mpE/Ik1b8EckO0O4ZXiGAfshKBWLUM9Xg=
github.com/ncruces/go-strftime v1.0.0 h1:HMFp8mLCTPp341M/ZnA4qaf7ZlsbTc+miZjCLOFAw7w=
github.com/ncruces/go-strftime v1.0.0/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pressly/goose/v3 v3.26.0 h1:KJakav68jdH0WDvoAcj8+n61WqOIaPGgH0bJWS6jpmM=
github.com/pressly/goose/v3 v3.26.0/go.mod h1:4hC1KrritdCxtuFsqgs1R4AU5bWtTAf+cnWvfhf2DNY=
github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU=
github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k=
github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw=
github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI=
github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE=
github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc=
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
github.com/richardlehane/mscfb v1.0.6 h1:eN3bvvZCp00bs7Zf52bxNwAx5lJDBK1tCuH19qq5aC8=
github.com/richardlehane/mscfb v1.0.6/go.mod h1:pe0+IUIc0AHh0+teNzBlJCtSyZdFOGgV4ZK9bsoV+Jo=
github.com/richardlehane/msoleps v1.0.5 h1:kNlmACZuwC8ZWPLoJtD+HtZOsKJgYn7gXgUIcRB7dbo=
github.com/richardlehane/msoleps v1.0.5/go.mod h1:BWev5JBpU9Ko2WAgmZEuiz4/u3ZYTKbjLycmwiWUfWg=
github.com/richardlehane/msoleps v1.0.6 h1:9BvkpjvD+iUBalUY4esMwv6uBkfOip/Lzvd93jvR9gg=
github.com/richardlehane/msoleps v1.0.6/go.mod h1:BWev5JBpU9Ko2WAgmZEuiz4/u3ZYTKbjLycmwiWUfWg=
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
@@ -66,9 +103,12 @@ github.com/sethvargo/go-retry v0.3.0 h1:EEt31A35QhrcRZtrYFDTBg91cqZVnFL2navjDrah
github.com/sethvargo/go-retry v0.3.0/go.mod h1:mNX17F0C/HguQMyMyJxcnU471gOZGxCLyYaFyAZraas=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
github.com/swaggo/swag v1.16.6 h1:qBNcx53ZaX+M5dxVyTrgQ0PJ/ACK+NzhwcbieTt+9yI=
github.com/swaggo/swag v1.16.6/go.mod h1:ngP2etMK5a0P3QBizic5MEwpRmluJZPHjXcMoj4Xesg=
github.com/tiendc/go-deepcopy v1.7.2 h1:Ut2yYR7W9tWjTQitganoIue4UGxZwCcJy3orjrrIj44=
github.com/tiendc/go-deepcopy v1.7.2/go.mod h1:4bKjNC2r7boYOkD2IOuZpYjmlDdzjbpTRyCx+goBCJQ=
github.com/vmware/govmomi v0.52.0 h1:JyxQ1IQdllrY7PJbv2am9mRsv3p9xWlIQ66bv+XnyLw=
@@ -91,23 +131,36 @@ golang.org/x/image v0.25.0 h1:Y6uW6rH1y5y/LK1J8BPWZtr6yZ7hrsy6hFrXjgsc2fQ=
golang.org/x/image v0.25.0/go.mod h1:tCAmOEGthTtkalusGp1g3xa2gke8J6c2N565dTyl9Rs=
golang.org/x/mod v0.32.0 h1:9F4d3PHLljb6x//jOyokMv3eX+YDeepZSEo3mFJy93c=
golang.org/x/mod v0.32.0/go.mod h1:SgipZ/3h2Ci89DlEtEXWUk/HteuRin+HHhN+WbNhguU=
golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM=
golang.org/x/net v0.49.0 h1:eeHFmOGUTtaaPSGNmjBKpbng9MulQsJURQUAfUwY++o=
golang.org/x/net v0.49.0/go.mod h1:/ysNB2EvaqvesRkuLAyjI1ycPZlQHM3q01F02UY/MV8=
golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ=
golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.33.0 h1:B3njUFyqtHDUI5jMn1YIr5B0IE2U0qck04r6d4KPAxE=
golang.org/x/text v0.33.0/go.mod h1:LuMebE6+rBincTi9+xWTY8TztLzKHc/9C1uBCG27+q8=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.41.0 h1:a9b8iMweWG+S0OBnlU36rzLp20z1Rp10w+IY2czHTQc=
golang.org/x/tools v0.41.0/go.mod h1:XSY6eDqxVNiYgezAVqqCeihT4j1U2CCsqvH3WhQpnlg=
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
modernc.org/cc/v4 v4.27.1 h1:9W30zRlYrefrDV2JE2O8VDtJ1yPGownxciz5rrbQZis=
@@ -132,8 +185,8 @@ modernc.org/opt v0.1.4 h1:2kNGMRiUjrp4LcaPuLY2PzUfqM/w9N23quVwhKt5Qm8=
modernc.org/opt v0.1.4/go.mod h1:03fq9lsNfvkYSfxrfUhZCWPk1lm4cq4N+Bh//bEtgns=
modernc.org/sortutil v1.2.1 h1:+xyoGf15mM3NMlPDnFqrteY07klSFxLElE2PVuWIJ7w=
modernc.org/sortutil v1.2.1/go.mod h1:7ZI3a3REbai7gzCLcotuw9AC4VZVpYMjDzETGsSMqJE=
modernc.org/sqlite v1.43.0 h1:8YqiFx3G1VhHTXO2Q00bl1Wz9KhS9Q5okwfp9Y97VnA=
modernc.org/sqlite v1.43.0/go.mod h1:+VkC6v3pLOAE0A0uVucQEcbVW0I5nHCeDaBf+DpsQT8=
modernc.org/sqlite v1.44.0 h1:YjCKJnzZde2mLVy0cMKTSL4PxCmbIguOq9lGp8ZvGOc=
modernc.org/sqlite v1.44.0/go.mod h1:2Dq41ir5/qri7QJJJKNZcP4UF7TsX/KNeykYgPDtGhE=
modernc.org/strutil v1.2.1 h1:UneZBkQA+DX2Rp35KcM69cSsNES9ly8mQWD71HKlOA0=
modernc.org/strutil v1.2.1/go.mod h1:EHkiggD70koQxjVdSBM3JKM7k6L0FbGE5eymy9i3B9A=
modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y=

125
internal/metrics/metrics.go Normal file
View File

@@ -0,0 +1,125 @@
package metrics
import (
"net/http"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
)
var (
registry = prometheus.NewRegistry()
HourlySnapshotTotal = prometheus.NewCounter(prometheus.CounterOpts{Name: "vctp_hourly_snapshots_total", Help: "Total number of hourly snapshot jobs completed."})
HourlySnapshotFailures = prometheus.NewCounter(prometheus.CounterOpts{Name: "vctp_hourly_snapshots_failed_total", Help: "Hourly snapshot jobs that failed."})
HourlySnapshotLast = prometheus.NewGauge(prometheus.GaugeOpts{Name: "vctp_hourly_snapshot_last_unix", Help: "Unix timestamp of the last hourly snapshot start time."})
HourlySnapshotRows = prometheus.NewGauge(prometheus.GaugeOpts{Name: "vctp_hourly_snapshot_last_rows", Help: "Row count of the last hourly snapshot table."})
DailyAggregationsTotal = prometheus.NewCounter(prometheus.CounterOpts{Name: "vctp_daily_aggregations_total", Help: "Total number of daily aggregation jobs completed."})
DailyAggregationFailures = prometheus.NewCounter(prometheus.CounterOpts{Name: "vctp_daily_aggregations_failed_total", Help: "Daily aggregation jobs that failed."})
DailyAggregationDuration = prometheus.NewHistogram(prometheus.HistogramOpts{
Name: "vctp_daily_aggregation_duration_seconds",
Help: "Duration of daily aggregation jobs.",
Buckets: prometheus.ExponentialBuckets(1, 2, 10),
})
MonthlyAggregationsTotal = prometheus.NewCounter(prometheus.CounterOpts{Name: "vctp_monthly_aggregations_total", Help: "Total number of monthly aggregation jobs completed."})
MonthlyAggregationFailures = prometheus.NewCounter(prometheus.CounterOpts{Name: "vctp_monthly_aggregations_failed_total", Help: "Monthly aggregation jobs that failed."})
MonthlyAggregationDuration = prometheus.NewHistogram(prometheus.HistogramOpts{
Name: "vctp_monthly_aggregation_duration_seconds",
Help: "Duration of monthly aggregation jobs.",
Buckets: prometheus.ExponentialBuckets(1, 2, 10),
})
ReportsAvailable = prometheus.NewGauge(prometheus.GaugeOpts{
Name: "vctp_reports_available",
Help: "Number of downloadable reports present on disk.",
})
VcenterConnectFailures = prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "vctp_vcenter_connect_failures_total",
Help: "Failed connections to vCenter during snapshot runs.",
}, []string{"vcenter"})
VcenterSnapshotDuration = prometheus.NewHistogramVec(prometheus.HistogramOpts{
Name: "vctp_vcenter_snapshot_duration_seconds",
Help: "Duration of per-vCenter hourly snapshot jobs.",
Buckets: prometheus.ExponentialBuckets(0.5, 2, 10),
}, []string{"vcenter"})
VcenterInventorySize = prometheus.NewGaugeVec(prometheus.GaugeOpts{
Name: "vctp_vcenter_inventory_size",
Help: "Number of VMs seen in the last successful snapshot per vCenter.",
}, []string{"vcenter"})
)
func init() {
registry.MustRegister(
HourlySnapshotTotal,
HourlySnapshotFailures,
HourlySnapshotLast,
HourlySnapshotRows,
DailyAggregationsTotal,
DailyAggregationFailures,
DailyAggregationDuration,
MonthlyAggregationsTotal,
MonthlyAggregationFailures,
MonthlyAggregationDuration,
ReportsAvailable,
VcenterConnectFailures,
VcenterSnapshotDuration,
VcenterInventorySize,
)
}
// Handler returns an http.Handler that serves Prometheus metrics.
func Handler() http.Handler {
return promhttp.HandlerFor(registry, promhttp.HandlerOpts{})
}
// RecordVcenterSnapshot logs per-vCenter snapshot metrics.
func RecordVcenterSnapshot(vcenter string, duration time.Duration, vmCount int64, err error) {
VcenterSnapshotDuration.WithLabelValues(vcenter).Observe(duration.Seconds())
if err != nil {
VcenterConnectFailures.WithLabelValues(vcenter).Inc()
return
}
VcenterInventorySize.WithLabelValues(vcenter).Set(float64(vmCount))
}
// RecordHourlySnapshot logs aggregate hourly snapshot results.
func RecordHourlySnapshot(start time.Time, rows int64, err error) {
HourlySnapshotLast.Set(float64(start.Unix()))
HourlySnapshotRows.Set(float64(rows))
if err != nil {
HourlySnapshotFailures.Inc()
return
}
HourlySnapshotTotal.Inc()
}
// RecordDailyAggregation logs daily aggregation metrics.
func RecordDailyAggregation(duration time.Duration, err error) {
DailyAggregationDuration.Observe(duration.Seconds())
if err != nil {
DailyAggregationFailures.Inc()
return
}
DailyAggregationsTotal.Inc()
}
// RecordMonthlyAggregation logs monthly aggregation metrics.
func RecordMonthlyAggregation(duration time.Duration, err error) {
MonthlyAggregationDuration.Observe(duration.Seconds())
if err != nil {
MonthlyAggregationFailures.Inc()
return
}
MonthlyAggregationsTotal.Inc()
}
// SetReportsAvailable updates the gauge for report files found on disk.
func SetReportsAvailable(count int) {
ReportsAvailable.Set(float64(count))
}

View File

@@ -113,12 +113,10 @@ func CreateInventoryReport(logger *slog.Logger, Database db.Database, ctx contex
}
// Set column autowidth
/*
err = SetColAutoWidth(xlsx, sheetName)
if err != nil {
fmt.Printf("Error setting auto width : '%s'\n", err)
}
*/
err = SetColAutoWidth(xlsx, sheetName)
if err != nil {
logger.Error("Error setting auto width", "error", err)
}
// Save the Excel file into a byte buffer
if err := xlsx.Write(&buffer); err != nil {
@@ -226,12 +224,10 @@ func CreateUpdatesReport(logger *slog.Logger, Database db.Database, ctx context.
}
// Set column autowidth
/*
err = SetColAutoWidth(xlsx, sheetName)
if err != nil {
fmt.Printf("Error setting auto width : '%s'\n", err)
}
*/
err = SetColAutoWidth(xlsx, sheetName)
if err != nil {
logger.Error("Error setting auto width", "error", err)
}
// Save the Excel file into a byte buffer
if err := xlsx.Write(&buffer); err != nil {

File diff suppressed because it is too large Load Diff

View File

@@ -5,6 +5,7 @@ import (
"fmt"
"log/slog"
"os"
"path/filepath"
"vctp/internal/utils"
"gopkg.in/yaml.v2"
@@ -19,10 +20,41 @@ type Settings struct {
// SettingsYML struct holds various runtime data that is too cumbersome to specify via command line, eg replacement properties
type SettingsYML struct {
Settings struct {
TenantsToFilter []string `yaml:"tenants_to_filter"`
NodeChargeClusters []string `yaml:"node_charge_clusters"`
SrmActiveActiveVms []string `yaml:"srm_activeactive_vms"`
VcenterAddresses []string `yaml:"vcenter_addresses"`
LogLevel string `yaml:"log_level"`
LogOutput string `yaml:"log_output"`
DatabaseDriver string `yaml:"database_driver"`
DatabaseURL string `yaml:"database_url"`
BindIP string `yaml:"bind_ip"`
BindPort int `yaml:"bind_port"`
BindDisableTLS bool `yaml:"bind_disable_tls"`
TLSCertFilename string `yaml:"tls_cert_filename"`
TLSKeyFilename string `yaml:"tls_key_filename"`
VcenterUsername string `yaml:"vcenter_username"`
VcenterPassword string `yaml:"vcenter_password"`
VcenterInsecure bool `yaml:"vcenter_insecure"`
VcenterEventPollingSeconds int `yaml:"vcenter_event_polling_seconds"`
VcenterInventoryPollingSeconds int `yaml:"vcenter_inventory_polling_seconds"`
VcenterInventorySnapshotSeconds int `yaml:"vcenter_inventory_snapshot_seconds"`
VcenterInventoryAggregateSeconds int `yaml:"vcenter_inventory_aggregate_seconds"`
HourlySnapshotConcurrency int `yaml:"hourly_snapshot_concurrency"`
HourlySnapshotMaxAgeDays int `yaml:"hourly_snapshot_max_age_days"`
DailySnapshotMaxAgeMonths int `yaml:"daily_snapshot_max_age_months"`
SnapshotCleanupCron string `yaml:"snapshot_cleanup_cron"`
ReportsDir string `yaml:"reports_dir"`
HourlyJobTimeoutSeconds int `yaml:"hourly_job_timeout_seconds"`
HourlySnapshotTimeoutSeconds int `yaml:"hourly_snapshot_timeout_seconds"`
HourlySnapshotRetrySeconds int `yaml:"hourly_snapshot_retry_seconds"`
HourlySnapshotMaxRetries int `yaml:"hourly_snapshot_max_retries"`
DailyJobTimeoutSeconds int `yaml:"daily_job_timeout_seconds"`
MonthlyJobTimeoutSeconds int `yaml:"monthly_job_timeout_seconds"`
MonthlyAggregationGranularity string `yaml:"monthly_aggregation_granularity"`
MonthlyAggregationCron string `yaml:"monthly_aggregation_cron"`
CleanupJobTimeoutSeconds int `yaml:"cleanup_job_timeout_seconds"`
TenantsToFilter []string `yaml:"tenants_to_filter"`
NodeChargeClusters []string `yaml:"node_charge_clusters"`
SrmActiveActiveVms []string `yaml:"srm_activeactive_vms"`
VcenterAddresses []string `yaml:"vcenter_addresses"`
PostgresWorkMemMB int `yaml:"postgres_work_mem_mb"`
} `yaml:"settings"`
}
@@ -60,8 +92,57 @@ func (s *Settings) ReadYMLSettings() error {
return fmt.Errorf("unable to decode settings file : '%s'", err)
}
s.Logger.Debug("Updating settings", "settings", settings)
// Avoid logging sensitive fields (e.g., credentials).
redacted := settings
redacted.Settings.VcenterPassword = "REDACTED"
s.Logger.Debug("Updating settings", "settings", redacted)
s.Values = &settings
return nil
}
func (s *Settings) WriteYMLSettings() error {
if s.Values == nil {
return errors.New("settings are not loaded")
}
if len(s.SettingsPath) == 0 {
return errors.New("settings file path not specified")
}
data, err := yaml.Marshal(s.Values)
if err != nil {
return fmt.Errorf("unable to encode settings file: %w", err)
}
mode := os.FileMode(0o644)
if info, err := os.Stat(s.SettingsPath); err == nil {
mode = info.Mode().Perm()
}
dir := filepath.Dir(s.SettingsPath)
tmp, err := os.CreateTemp(dir, "vctp-settings-*.yml")
if err != nil {
return fmt.Errorf("unable to create temp settings file: %w", err)
}
tmpName := tmp.Name()
defer func() {
_ = os.Remove(tmpName)
}()
if _, err := tmp.Write(data); err != nil {
_ = tmp.Close()
return fmt.Errorf("unable to write temp settings file: %w", err)
}
if err := tmp.Chmod(mode); err != nil {
_ = tmp.Close()
return fmt.Errorf("unable to set temp settings permissions: %w", err)
}
if err := tmp.Close(); err != nil {
return fmt.Errorf("unable to close temp settings file: %w", err)
}
if err := os.Rename(tmpName, s.SettingsPath); err != nil {
return fmt.Errorf("unable to replace settings file: %w", err)
}
return nil
}

View File

@@ -0,0 +1,32 @@
package tasks
import (
"context"
"time"
"vctp/db"
)
// runAggregateJob wraps aggregation cron jobs with timeout, migration check, and circuit breaker semantics.
func (c *CronTask) runAggregateJob(ctx context.Context, jobName string, timeout time.Duration, fn func(context.Context) error) (err error) {
jobCtx := ctx
if timeout > 0 {
var cancel context.CancelFunc
jobCtx, cancel = context.WithTimeout(ctx, timeout)
defer cancel()
}
tracker := NewCronTracker(c.Database)
done, skip, err := tracker.Start(jobCtx, jobName)
if err != nil {
return err
}
if skip {
return nil
}
defer func() { done(err) }()
if err := db.CheckMigrationState(jobCtx, c.Database.DB()); err != nil {
return err
}
return fn(jobCtx)
}

View File

@@ -0,0 +1,188 @@
package tasks
import (
"context"
"strings"
"time"
"vctp/db"
"github.com/jmoiron/sqlx"
)
func NewCronTracker(database db.Database) *CronTracker {
return &CronTracker{
db: database,
bindType: sqlx.BindType(database.DB().DriverName()),
}
}
// ClearAllInProgress resets any stuck in-progress flags (e.g., after crashes).
func (c *CronTracker) ClearAllInProgress(ctx context.Context) error {
if err := c.ensureTable(ctx); err != nil {
return err
}
_, err := c.db.DB().ExecContext(ctx, `UPDATE cron_status SET in_progress = FALSE`)
return err
}
// ClearStale resets in_progress for a specific job if it has been running longer than maxAge.
func (c *CronTracker) ClearStale(ctx context.Context, job string, maxAge time.Duration) error {
if err := c.ensureTable(ctx); err != nil {
return err
}
driver := strings.ToLower(c.db.DB().DriverName())
var query string
switch driver {
case "sqlite":
query = `
UPDATE cron_status
SET in_progress = FALSE
WHERE job_name = ?
AND in_progress = TRUE
AND started_at > 0
AND (strftime('%s','now') - started_at) > ?
`
case "pgx", "postgres":
query = `
UPDATE cron_status
SET in_progress = FALSE
WHERE job_name = $1
AND in_progress = TRUE
AND started_at > 0
AND (EXTRACT(EPOCH FROM now())::BIGINT - started_at) > $2
`
default:
return nil
}
_, err := c.db.DB().ExecContext(ctx, query, job, int64(maxAge.Seconds()))
return err
}
func (c *CronTracker) ensureTable(ctx context.Context) error {
conn := c.db.DB()
driver := conn.DriverName()
var ddl string
switch driver {
case "pgx", "postgres":
ddl = `
CREATE TABLE IF NOT EXISTS cron_status (
job_name TEXT PRIMARY KEY,
started_at BIGINT NOT NULL,
ended_at BIGINT NOT NULL,
duration_ms BIGINT NOT NULL,
last_error TEXT,
in_progress BOOLEAN NOT NULL DEFAULT FALSE
);`
default:
ddl = `
CREATE TABLE IF NOT EXISTS cron_status (
job_name TEXT PRIMARY KEY,
started_at BIGINT NOT NULL,
ended_at BIGINT NOT NULL,
duration_ms BIGINT NOT NULL,
last_error TEXT,
in_progress BOOLEAN NOT NULL DEFAULT FALSE
);`
}
_, err := conn.ExecContext(ctx, ddl)
return err
}
// Start marks a job as in-progress; returns a completion callback and whether to skip because it's already running.
func (c *CronTracker) Start(ctx context.Context, job string) (func(error), bool, error) {
if err := c.ensureTable(ctx); err != nil {
return nil, false, err
}
conn := c.db.DB()
now := time.Now().Unix()
tx, err := conn.BeginTxx(ctx, nil)
if err != nil {
return nil, false, err
}
var inProgress bool
query := sqlx.Rebind(c.bindType, `SELECT in_progress FROM cron_status WHERE job_name = ?`)
err = tx.QueryRowContext(ctx, query, job).Scan(&inProgress)
if err != nil {
// no row, insert
if err := upsertCron(tx, c.bindType, job, now, false); err != nil {
tx.Rollback()
return nil, false, err
}
} else {
if inProgress {
tx.Rollback()
return nil, true, nil
}
if err := markCronStart(tx, c.bindType, job, now); err != nil {
tx.Rollback()
return nil, false, err
}
}
if err := tx.Commit(); err != nil {
return nil, false, err
}
done := func(runErr error) {
_ = c.finish(context.Background(), job, now, runErr)
}
return done, false, nil
}
func (c *CronTracker) finish(ctx context.Context, job string, startedAt int64, runErr error) error {
conn := c.db.DB()
duration := time.Since(time.Unix(startedAt, 0)).Milliseconds()
tx, err := conn.BeginTxx(ctx, nil)
if err != nil {
return err
}
lastErr := ""
if runErr != nil {
lastErr = runErr.Error()
}
err = upsertCronFinish(tx, c.bindType, job, duration, lastErr)
if err != nil {
tx.Rollback()
return err
}
return tx.Commit()
}
func upsertCron(tx *sqlx.Tx, bindType int, job string, startedAt int64, inProgress bool) error {
query := `
INSERT INTO cron_status (job_name, started_at, ended_at, duration_ms, last_error, in_progress)
VALUES (?, ?, 0, 0, NULL, ?)
ON CONFLICT (job_name) DO UPDATE SET started_at = excluded.started_at, in_progress = excluded.in_progress, ended_at = excluded.ended_at, duration_ms = excluded.duration_ms, last_error = excluded.last_error
`
_, err := tx.Exec(sqlx.Rebind(bindType, query), job, startedAt, inProgress)
return err
}
func markCronStart(tx *sqlx.Tx, bindType int, job string, startedAt int64) error {
query := `
UPDATE cron_status
SET started_at = ?, in_progress = TRUE, ended_at = 0, duration_ms = 0, last_error = NULL
WHERE job_name = ?
`
_, err := tx.Exec(sqlx.Rebind(bindType, query), startedAt, job)
return err
}
func upsertCronFinish(tx *sqlx.Tx, bindType int, job string, durationMS int64, lastErr string) error {
query := `
UPDATE cron_status
SET ended_at = ?, duration_ms = ?, last_error = ?, in_progress = FALSE
WHERE job_name = ?
`
_, err := tx.Exec(sqlx.Rebind(bindType, query), time.Now().Unix(), durationMS, nullableString(lastErr), job)
return err
}
func nullableString(s string) interface{} {
if s == "" {
return nil
}
return s
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,191 @@
package tasks
import (
"context"
"fmt"
"strings"
"vctp/db"
"github.com/jmoiron/sqlx"
)
func insertHourlyCache(ctx context.Context, dbConn *sqlx.DB, rows []InventorySnapshotRow) error {
if len(rows) == 0 {
return nil
}
if err := db.EnsureVmHourlyStats(ctx, dbConn); err != nil {
return err
}
driver := strings.ToLower(dbConn.DriverName())
conflict := ""
verb := "INSERT INTO"
if driver == "sqlite" {
verb = "INSERT OR REPLACE INTO"
} else {
conflict = ` ON CONFLICT ("Vcenter","VmId","SnapshotTime") DO UPDATE SET
"VmUuid"=EXCLUDED."VmUuid",
"Name"=EXCLUDED."Name",
"CreationTime"=EXCLUDED."CreationTime",
"DeletionTime"=EXCLUDED."DeletionTime",
"ResourcePool"=EXCLUDED."ResourcePool",
"Datacenter"=EXCLUDED."Datacenter",
"Cluster"=EXCLUDED."Cluster",
"Folder"=EXCLUDED."Folder",
"ProvisionedDisk"=EXCLUDED."ProvisionedDisk",
"VcpuCount"=EXCLUDED."VcpuCount",
"RamGB"=EXCLUDED."RamGB",
"IsTemplate"=EXCLUDED."IsTemplate",
"PoweredOn"=EXCLUDED."PoweredOn",
"SrmPlaceholder"=EXCLUDED."SrmPlaceholder"`
}
cols := []string{
"SnapshotTime", "Vcenter", "VmId", "VmUuid", "Name", "CreationTime", "DeletionTime", "ResourcePool",
"Datacenter", "Cluster", "Folder", "ProvisionedDisk", "VcpuCount", "RamGB", "IsTemplate", "PoweredOn", "SrmPlaceholder",
}
bind := sqlx.BindType(dbConn.DriverName())
placeholders := strings.TrimRight(strings.Repeat("?, ", len(cols)), ", ")
stmtText := fmt.Sprintf(`%s vm_hourly_stats ("%s") VALUES (%s)%s`, verb, strings.Join(cols, `","`), placeholders, conflict)
stmtText = sqlx.Rebind(bind, stmtText)
tx, err := dbConn.BeginTxx(ctx, nil)
if err != nil {
return err
}
stmt, err := tx.PreparexContext(ctx, stmtText)
if err != nil {
tx.Rollback()
return err
}
defer stmt.Close()
for _, r := range rows {
args := []interface{}{
r.SnapshotTime, r.Vcenter, r.VmId, r.VmUuid, r.Name, r.CreationTime, r.DeletionTime, r.ResourcePool,
r.Datacenter, r.Cluster, r.Folder, r.ProvisionedDisk, r.VcpuCount, r.RamGB, r.IsTemplate, r.PoweredOn, r.SrmPlaceholder,
}
if _, err := stmt.ExecContext(ctx, args...); err != nil {
tx.Rollback()
return err
}
}
return tx.Commit()
}
func insertHourlyBatch(ctx context.Context, dbConn *sqlx.DB, tableName string, rows []InventorySnapshotRow) error {
if len(rows) == 0 {
return nil
}
if err := db.EnsureVmHourlyStats(ctx, dbConn); err != nil {
return err
}
tx, err := dbConn.BeginTxx(ctx, nil)
if err != nil {
return err
}
baseCols := []string{
"InventoryId", "Name", "Vcenter", "VmId", "EventKey", "CloudId", "CreationTime", "DeletionTime",
"ResourcePool", "Datacenter", "Cluster", "Folder", "ProvisionedDisk", "VcpuCount",
"RamGB", "IsTemplate", "PoweredOn", "SrmPlaceholder", "VmUuid", "SnapshotTime",
}
bind := sqlx.BindType(dbConn.DriverName())
buildStmt := func(cols []string) (*sqlx.Stmt, error) {
colList := `"` + strings.Join(cols, `", "`) + `"`
placeholders := strings.TrimRight(strings.Repeat("?, ", len(cols)), ", ")
return tx.PreparexContext(ctx, sqlx.Rebind(bind, fmt.Sprintf(`INSERT INTO %s (%s) VALUES (%s)`, tableName, colList, placeholders)))
}
stmt, err := buildStmt(baseCols)
if err != nil {
// Fallback for legacy tables that still have IsPresent.
withLegacy := append(append([]string{}, baseCols...), "IsPresent")
stmt, err = buildStmt(withLegacy)
if err != nil {
tx.Rollback()
return err
}
defer stmt.Close()
for _, row := range rows {
args := []interface{}{
row.InventoryId,
row.Name,
row.Vcenter,
row.VmId,
row.EventKey,
row.CloudId,
row.CreationTime,
row.DeletionTime,
row.ResourcePool,
row.Datacenter,
row.Cluster,
row.Folder,
row.ProvisionedDisk,
row.VcpuCount,
row.RamGB,
row.IsTemplate,
row.PoweredOn,
row.SrmPlaceholder,
row.VmUuid,
row.SnapshotTime,
"TRUE",
}
if _, err := stmt.ExecContext(ctx, args...); err != nil {
tx.Rollback()
return err
}
}
return tx.Commit()
}
defer stmt.Close()
for _, row := range rows {
args := []interface{}{
row.InventoryId,
row.Name,
row.Vcenter,
row.VmId,
row.EventKey,
row.CloudId,
row.CreationTime,
row.DeletionTime,
row.ResourcePool,
row.Datacenter,
row.Cluster,
row.Folder,
row.ProvisionedDisk,
row.VcpuCount,
row.RamGB,
row.IsTemplate,
row.PoweredOn,
row.SrmPlaceholder,
row.VmUuid,
row.SnapshotTime,
}
if _, err := stmt.ExecContext(ctx, args...); err != nil {
tx.Rollback()
return err
}
}
return tx.Commit()
}
func dropSnapshotTable(ctx context.Context, dbConn *sqlx.DB, table string) error {
if _, err := db.SafeTableName(table); err != nil {
return err
}
_, err := dbConn.ExecContext(ctx, fmt.Sprintf("DROP TABLE IF EXISTS %s", table))
return err
}
func clearTable(ctx context.Context, dbConn *sqlx.DB, table string) error {
if _, err := db.SafeTableName(table); err != nil {
return err
}
_, err := dbConn.ExecContext(ctx, fmt.Sprintf("DELETE FROM %s", table))
if err != nil {
return fmt.Errorf("failed to clear table %s: %w", table, err)
}
return nil
}

View File

@@ -0,0 +1,548 @@
package tasks
import (
"context"
"database/sql"
"errors"
"fmt"
"log/slog"
"strconv"
"strings"
"time"
"vctp/db"
"vctp/db/queries"
"github.com/jmoiron/sqlx"
)
var snapshotProbeLimiter = make(chan struct{}, 1)
func acquireSnapshotProbe(ctx context.Context) (func(), error) {
select {
case snapshotProbeLimiter <- struct{}{}:
return func() { <-snapshotProbeLimiter }, nil
case <-ctx.Done():
return nil, ctx.Err()
}
}
func boolStringFromInterface(value interface{}) string {
switch v := value.(type) {
case nil:
return ""
case string:
return v
case []byte:
return string(v)
case bool:
if v {
return "TRUE"
}
return "FALSE"
case int:
if v != 0 {
return "TRUE"
}
return "FALSE"
case int64:
if v != 0 {
return "TRUE"
}
return "FALSE"
default:
return fmt.Sprint(v)
}
}
// latestHourlySnapshotBefore finds the most recent hourly snapshot table prior to the given time, skipping empty tables.
func latestHourlySnapshotBefore(ctx context.Context, dbConn *sqlx.DB, cutoff time.Time, logger *slog.Logger) (string, error) {
tables, err := listLatestHourlyWithRows(ctx, dbConn, "", cutoff.Unix(), 1, logger)
if err != nil {
return "", err
}
if len(tables) == 0 {
return "", nil
}
return tables[0].Table, nil
}
// parseSnapshotTime extracts the unix suffix from an inventory_hourly table name.
func parseSnapshotTime(table string) (int64, bool) {
const prefix = "inventory_hourly_"
if !strings.HasPrefix(table, prefix) {
return 0, false
}
ts, err := strconv.ParseInt(strings.TrimPrefix(table, prefix), 10, 64)
if err != nil {
return 0, false
}
return ts, true
}
// listLatestHourlyWithRows returns recent hourly snapshot tables (ordered desc by time) that have rows, optionally filtered by vcenter.
func listLatestHourlyWithRows(ctx context.Context, dbConn *sqlx.DB, vcenter string, beforeUnix int64, limit int, logger *slog.Logger) ([]snapshotTable, error) {
if limit <= 0 {
limit = 50
}
rows, err := dbConn.QueryxContext(ctx, `
SELECT table_name, snapshot_time, snapshot_count
FROM snapshot_registry
WHERE snapshot_type = 'hourly' AND snapshot_time < ?
ORDER BY snapshot_time DESC
LIMIT ?
`, beforeUnix, limit)
if err != nil {
return nil, err
}
defer rows.Close()
var out []snapshotTable
for rows.Next() {
var name string
var ts int64
var count sql.NullInt64
if scanErr := rows.Scan(&name, &ts, &count); scanErr != nil {
continue
}
if err := db.ValidateTableName(name); err != nil {
continue
}
if count.Valid && count.Int64 == 0 {
if logger != nil {
logger.Debug("skipping snapshot table with zero count", "table", name, "snapshot_time", ts, "vcenter", vcenter)
}
continue
}
probed := false
var probeErr error
probeTimeout := false
// If count is known and >0, trust it; if NULL, accept optimistically to avoid heavy probes.
hasRows := !count.Valid || count.Int64 > 0
start := time.Now()
if vcenter != "" && hasRows {
probed = true
probeCtx, cancel := context.WithTimeout(ctx, 2*time.Second)
release, err := acquireSnapshotProbe(probeCtx)
if err != nil {
probeErr = err
hasRows = false
cancel()
} else {
vrows, qerr := querySnapshotRows(probeCtx, dbConn, name, []string{"VmId"}, `"Vcenter" = ? LIMIT 1`, vcenter)
if qerr == nil {
hasRows = vrows.Next()
vrows.Close()
} else {
probeErr = qerr
hasRows = false
}
release()
cancel()
}
probeTimeout = errors.Is(probeErr, context.DeadlineExceeded) || errors.Is(probeErr, context.Canceled)
}
elapsed := time.Since(start)
if logger != nil {
logger.Debug("evaluated snapshot table", "table", name, "snapshot_time", ts, "snapshot_count", count, "probed", probed, "has_rows", hasRows, "elapsed", elapsed, "vcenter", vcenter, "probe_error", probeErr, "probe_timeout", probeTimeout)
}
if !hasRows {
continue
}
out = append(out, snapshotTable{Table: name, Time: ts, Count: count})
}
return out, nil
}
// SnapshotTooSoon reports whether the gap between prev and curr is significantly shorter than expected.
func SnapshotTooSoon(prevUnix, currUnix int64, expectedSeconds int64) bool {
if prevUnix == 0 || currUnix == 0 || expectedSeconds <= 0 {
return false
}
return currUnix-prevUnix < expectedSeconds
}
// querySnapshotRows builds a SELECT with proper rebind for the given table/columns/where.
func querySnapshotRows(ctx context.Context, dbConn *sqlx.DB, table string, columns []string, where string, args ...interface{}) (*sqlx.Rows, error) {
if err := db.ValidateTableName(table); err != nil {
return nil, err
}
colExpr := "*"
if len(columns) > 0 {
colExpr = `"` + strings.Join(columns, `","`) + `"`
}
query := fmt.Sprintf(`SELECT %s FROM %s`, colExpr, table)
if strings.TrimSpace(where) != "" {
query = fmt.Sprintf(`%s WHERE %s`, query, where)
}
query = sqlx.Rebind(sqlx.BindType(dbConn.DriverName()), query)
return dbConn.QueryxContext(ctx, query, args...)
}
func updateDeletionTimeInSnapshot(ctx context.Context, dbConn *sqlx.DB, table, vcenter, vmID, vmUUID, name string, deletionUnix int64) (int64, error) {
if err := db.ValidateTableName(table); err != nil {
return 0, err
}
matchColumn := ""
matchValue := ""
switch {
case vmID != "":
matchColumn = "VmId"
matchValue = vmID
case vmUUID != "":
matchColumn = "VmUuid"
matchValue = vmUUID
case name != "":
matchColumn = "Name"
matchValue = name
default:
return 0, nil
}
query := fmt.Sprintf(`UPDATE %s SET "DeletionTime" = ? WHERE "Vcenter" = ? AND "%s" = ? AND ("DeletionTime" IS NULL OR "DeletionTime" = 0 OR "DeletionTime" > ?)`, table, matchColumn)
query = sqlx.Rebind(sqlx.BindType(dbConn.DriverName()), query)
result, err := dbConn.ExecContext(ctx, query, deletionUnix, vcenter, matchValue, deletionUnix)
if err != nil {
return 0, err
}
rowsAffected, err := result.RowsAffected()
if err != nil {
return 0, err
}
return rowsAffected, nil
}
func updateDeletionTimeInHourlyCache(ctx context.Context, dbConn *sqlx.DB, vcenter, vmID, vmUUID, name string, snapshotUnix, deletionUnix int64) (int64, error) {
if snapshotUnix <= 0 {
return 0, nil
}
matchColumn := ""
matchValue := ""
switch {
case vmID != "":
matchColumn = "VmId"
matchValue = vmID
case vmUUID != "":
matchColumn = "VmUuid"
matchValue = vmUUID
case name != "":
matchColumn = "Name"
matchValue = name
default:
return 0, nil
}
query := fmt.Sprintf(`UPDATE vm_hourly_stats SET "DeletionTime" = ? WHERE "Vcenter" = ? AND "SnapshotTime" = ? AND "%s" = ? AND ("DeletionTime" IS NULL OR "DeletionTime" = 0 OR "DeletionTime" > ?)`, matchColumn)
query = sqlx.Rebind(sqlx.BindType(dbConn.DriverName()), query)
result, err := dbConn.ExecContext(ctx, query, deletionUnix, vcenter, snapshotUnix, matchValue, deletionUnix)
if err != nil {
return 0, err
}
rowsAffected, err := result.RowsAffected()
if err != nil {
return 0, err
}
return rowsAffected, nil
}
// markMissingFromPrevious marks VMs that were present in the previous snapshot but missing now.
func (c *CronTask) markMissingFromPrevious(ctx context.Context, dbConn *sqlx.DB, prevTable string, vcenter string, snapshotTime time.Time,
currentByID map[string]InventorySnapshotRow, currentByUuid map[string]struct{}, currentByName map[string]struct{},
invByID map[string]queries.Inventory, invByUuid map[string]queries.Inventory, invByName map[string]queries.Inventory) (int, bool) {
if err := db.ValidateTableName(prevTable); err != nil {
return 0, false
}
type prevRow struct {
VmId sql.NullString `db:"VmId"`
VmUuid sql.NullString `db:"VmUuid"`
Name string `db:"Name"`
Cluster sql.NullString `db:"Cluster"`
Datacenter sql.NullString `db:"Datacenter"`
DeletionTime sql.NullInt64 `db:"DeletionTime"`
}
rows, err := querySnapshotRows(ctx, dbConn, prevTable, []string{"VmId", "VmUuid", "Name", "Cluster", "Datacenter", "DeletionTime"}, `"Vcenter" = ?`, vcenter)
if err != nil {
c.Logger.Warn("failed to read previous snapshot for deletion detection", "error", err, "table", prevTable, "vcenter", vcenter)
return 0, false
}
defer rows.Close()
missing := 0
tableUpdated := false
for rows.Next() {
var r prevRow
if err := rows.StructScan(&r); err != nil {
continue
}
vmID := r.VmId.String
uuid := r.VmUuid.String
name := r.Name
cluster := r.Cluster.String
found := false
if vmID != "" {
if _, ok := currentByID[vmID]; ok {
found = true
}
}
if !found && uuid != "" {
if _, ok := currentByUuid[uuid]; ok {
found = true
}
}
if !found && name != "" {
if _, ok := currentByName[name]; ok {
found = true
}
}
// If the name is missing but UUID+Cluster still exists in inventory/current, treat it as present (rename, not delete).
if !found && uuid != "" && cluster != "" {
if inv, ok := invByUuid[uuid]; ok && strings.EqualFold(inv.Cluster.String, cluster) {
found = true
}
}
if found {
continue
}
var inv queries.Inventory
var ok bool
if vmID != "" {
inv, ok = invByID[vmID]
}
if !ok && uuid != "" {
inv, ok = invByUuid[uuid]
}
if !ok && name != "" {
inv, ok = invByName[name]
}
if !ok {
continue
}
delTime := inv.DeletionTime
if !delTime.Valid {
delTime = sql.NullInt64{Int64: snapshotTime.Unix(), Valid: true}
if err := c.Database.Queries().InventoryMarkDeleted(ctx, queries.InventoryMarkDeletedParams{
DeletionTime: delTime,
VmId: inv.VmId,
DatacenterName: inv.Datacenter,
}); err != nil {
c.Logger.Warn("failed to mark inventory record deleted from previous snapshot", "error", err, "vm_id", inv.VmId.String)
}
}
// Also update lifecycle cache so deletion time is available for rollups.
vmUUID := ""
if inv.VmUuid.Valid {
vmUUID = inv.VmUuid.String
}
if err := db.MarkVmDeletedWithDetails(ctx, dbConn, vcenter, inv.VmId.String, vmUUID, inv.Name, inv.Cluster.String, delTime.Int64); err != nil {
c.Logger.Warn("failed to mark lifecycle cache deleted from previous snapshot", "error", err, "vm_id", inv.VmId.String, "vm_uuid", vmUUID, "vcenter", vcenter)
}
if rowsAffected, err := updateDeletionTimeInSnapshot(ctx, dbConn, prevTable, vcenter, inv.VmId.String, vmUUID, inv.Name, delTime.Int64); err != nil {
c.Logger.Warn("failed to update hourly snapshot deletion time", "error", err, "table", prevTable, "vm_id", inv.VmId.String, "vm_uuid", vmUUID, "vcenter", vcenter)
} else if rowsAffected > 0 {
tableUpdated = true
c.Logger.Debug("updated hourly snapshot deletion time", "table", prevTable, "vm_id", inv.VmId.String, "vm_uuid", vmUUID, "vcenter", vcenter, "deletion_time", delTime.Int64)
if snapUnix, ok := parseSnapshotTime(prevTable); ok {
if cacheRows, err := updateDeletionTimeInHourlyCache(ctx, dbConn, vcenter, inv.VmId.String, vmUUID, inv.Name, snapUnix, delTime.Int64); err != nil {
c.Logger.Warn("failed to update hourly cache deletion time", "error", err, "snapshot_time", snapUnix, "vm_id", inv.VmId.String, "vm_uuid", vmUUID, "vcenter", vcenter)
} else if cacheRows > 0 {
c.Logger.Debug("updated hourly cache deletion time", "snapshot_time", snapUnix, "vm_id", inv.VmId.String, "vm_uuid", vmUUID, "vcenter", vcenter, "deletion_time", delTime.Int64)
}
}
}
c.Logger.Debug("Detected VM missing compared to previous snapshot", "name", inv.Name, "vm_id", inv.VmId.String, "vm_uuid", inv.VmUuid.String, "vcenter", vcenter, "snapshot_time", snapshotTime, "prev_table", prevTable)
missing++
}
return missing, tableUpdated
}
// countNewFromPrevious returns how many VMs are present in the current snapshot but not in the previous snapshot.
func countNewFromPrevious(ctx context.Context, dbConn *sqlx.DB, prevTable string, vcenter string, current map[string]InventorySnapshotRow) int {
if err := db.ValidateTableName(prevTable); err != nil {
return len(current)
}
query := fmt.Sprintf(`SELECT "VmId","VmUuid","Name" FROM %s WHERE "Vcenter" = ?`, prevTable)
query = sqlx.Rebind(sqlx.BindType(dbConn.DriverName()), query)
rows, err := dbConn.QueryxContext(ctx, query, vcenter)
if err != nil {
return len(current)
}
defer rows.Close()
prevIDs := make(map[string]struct{})
prevUUIDs := make(map[string]struct{})
prevNames := make(map[string]struct{})
for rows.Next() {
var vmID, vmUUID, name string
if scanErr := rows.Scan(&vmID, &vmUUID, &name); scanErr != nil {
continue
}
if vmID != "" {
prevIDs[vmID] = struct{}{}
}
if vmUUID != "" {
prevUUIDs[vmUUID] = struct{}{}
}
if name != "" {
prevNames[name] = struct{}{}
}
}
newCount := 0
for _, cur := range current {
id := cur.VmId.String
uuid := cur.VmUuid.String
name := cur.Name
if id != "" {
if _, ok := prevIDs[id]; ok {
continue
}
}
if uuid != "" {
if _, ok := prevUUIDs[uuid]; ok {
continue
}
}
if name != "" {
if _, ok := prevNames[name]; ok {
continue
}
}
newCount++
}
return newCount
}
// listNewFromPrevious returns the rows present now but not in the previous snapshot.
func listNewFromPrevious(ctx context.Context, dbConn *sqlx.DB, prevTable string, vcenter string, current map[string]InventorySnapshotRow) []InventorySnapshotRow {
if err := db.ValidateTableName(prevTable); err != nil {
all := make([]InventorySnapshotRow, 0, len(current))
for _, cur := range current {
all = append(all, cur)
}
return all
}
query := fmt.Sprintf(`SELECT "VmId","VmUuid","Name" FROM %s WHERE "Vcenter" = ?`, prevTable)
query = sqlx.Rebind(sqlx.BindType(dbConn.DriverName()), query)
rows, err := dbConn.QueryxContext(ctx, query, vcenter)
if err != nil {
all := make([]InventorySnapshotRow, 0, len(current))
for _, cur := range current {
all = append(all, cur)
}
return all
}
defer rows.Close()
prevIDs := make(map[string]struct{})
prevUUIDs := make(map[string]struct{})
prevNames := make(map[string]struct{})
for rows.Next() {
var vmID, vmUUID, name string
if scanErr := rows.Scan(&vmID, &vmUUID, &name); scanErr != nil {
continue
}
if vmID != "" {
prevIDs[vmID] = struct{}{}
}
if vmUUID != "" {
prevUUIDs[vmUUID] = struct{}{}
}
if name != "" {
prevNames[name] = struct{}{}
}
}
newRows := make([]InventorySnapshotRow, 0)
for _, cur := range current {
id := cur.VmId.String
uuid := cur.VmUuid.String
name := cur.Name
if id != "" {
if _, ok := prevIDs[id]; ok {
continue
}
}
if uuid != "" {
if _, ok := prevUUIDs[uuid]; ok {
continue
}
}
if name != "" {
if _, ok := prevNames[name]; ok {
continue
}
}
newRows = append(newRows, cur)
}
return newRows
}
// findVMInHourlySnapshots searches recent hourly snapshot tables for a VM by ID for the given vCenter.
// extraTables are searched first (e.g., known previous snapshot tables).
func findVMInHourlySnapshots(ctx context.Context, dbConn *sqlx.DB, vcenter string, vmID string, extraTables ...string) (InventorySnapshotRow, string, bool) {
if vmID == "" {
return InventorySnapshotRow{}, "", false
}
// Use a short timeout to avoid hanging if the DB is busy.
ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
// First search any explicit tables provided.
for _, table := range extraTables {
if table == "" {
continue
}
if err := db.ValidateTableName(table); err != nil {
continue
}
query := fmt.Sprintf(`SELECT "VmId","VmUuid","Name","Datacenter","Cluster" FROM %s WHERE "Vcenter" = ? AND "VmId" = ? LIMIT 1`, table)
query = sqlx.Rebind(sqlx.BindType(dbConn.DriverName()), query)
var row InventorySnapshotRow
if err := dbConn.QueryRowxContext(ctx, query, vcenter, vmID).Scan(&row.VmId, &row.VmUuid, &row.Name, &row.Datacenter, &row.Cluster); err == nil {
return row, table, true
}
}
// Try a handful of most recent hourly tables from the registry.
rows, err := dbConn.QueryxContext(ctx, `
SELECT table_name
FROM snapshot_registry
WHERE snapshot_type = 'hourly'
ORDER BY snapshot_time DESC
LIMIT 20
`)
if err != nil {
return InventorySnapshotRow{}, "", false
}
defer rows.Close()
checked := 0
for rows.Next() {
var table string
if scanErr := rows.Scan(&table); scanErr != nil {
continue
}
if err := db.ValidateTableName(table); err != nil {
continue
}
query := fmt.Sprintf(`SELECT "VmId","VmUuid","Name","Datacenter","Cluster" FROM %s WHERE "Vcenter" = ? AND "VmId" = ? LIMIT 1`, table)
query = sqlx.Rebind(sqlx.BindType(dbConn.DriverName()), query)
var row InventorySnapshotRow
if err := dbConn.QueryRowxContext(ctx, query, vcenter, vmID).Scan(&row.VmId, &row.VmUuid, &row.Name, &row.Datacenter, &row.Cluster); err == nil {
return row, table, true
}
checked++
if checked >= 10 { // limit work
break
}
}
return InventorySnapshotRow{}, "", false
}

View File

@@ -0,0 +1,290 @@
package tasks
import (
"context"
"database/sql"
"log/slog"
"time"
"vctp/db"
"github.com/jmoiron/sqlx"
)
// presenceKeys builds lookup keys for vm presence comparison.
func presenceKeys(vmID, vmUUID, name string) []string {
keys := make([]string, 0, 3)
if vmID != "" {
keys = append(keys, "id:"+vmID)
}
if vmUUID != "" {
keys = append(keys, "uuid:"+vmUUID)
}
if name != "" {
keys = append(keys, "name:"+name)
}
return keys
}
// backfillLifecycleDeletionsToday looks for VMs in the lifecycle cache that are not in the current inventory,
// have no DeletedAt, and determines their deletion time from today's hourly snapshots, optionally checking the next snapshot (next day) to confirm.
// It returns any hourly snapshot tables that were updated with deletion times.
func backfillLifecycleDeletionsToday(ctx context.Context, logger *slog.Logger, dbConn *sqlx.DB, vcenter string, snapshotTime time.Time, present map[string]InventorySnapshotRow) ([]string, error) {
dayStart := truncateDate(snapshotTime)
dayEnd := dayStart.Add(24 * time.Hour)
candidates, err := loadLifecycleCandidates(ctx, dbConn, vcenter, present)
if err != nil || len(candidates) == 0 {
return nil, err
}
tables, err := listHourlyTablesForDay(ctx, dbConn, dayStart, dayEnd)
if err != nil {
return nil, err
}
if len(tables) == 0 {
return nil, nil
}
nextPresence := make(map[string]struct{})
if nextTable, nextErr := nextSnapshotAfter(ctx, dbConn, dayEnd, vcenter); nextErr == nil && nextTable != "" {
nextPresence = loadPresenceKeys(ctx, dbConn, nextTable, vcenter)
}
updatedTables := make(map[string]struct{})
for i := range candidates {
cand := &candidates[i]
deletion, firstMiss, lastSeenTable := findDeletionInTables(ctx, dbConn, tables, vcenter, cand)
if deletion == 0 && len(nextPresence) > 0 && firstMiss > 0 {
if !isPresent(nextPresence, *cand) {
// Single miss at end of day, confirmed by next-day absence.
deletion = firstMiss
logger.Debug("cross-day deletion inferred from next snapshot", "vcenter", vcenter, "vm_id", cand.vmID, "vm_uuid", cand.vmUUID, "name", cand.name, "deletion", deletion)
}
}
if deletion > 0 {
if err := db.MarkVmDeletedWithDetails(ctx, dbConn, vcenter, cand.vmID, cand.vmUUID, cand.name, cand.cluster, deletion); err != nil {
logger.Warn("lifecycle backfill mark deleted failed", "vcenter", vcenter, "vm_id", cand.vmID, "vm_uuid", cand.vmUUID, "name", cand.name, "cluster", cand.cluster, "deletion", deletion, "error", err)
continue
}
if lastSeenTable != "" {
if rowsAffected, err := updateDeletionTimeInSnapshot(ctx, dbConn, lastSeenTable, vcenter, cand.vmID, cand.vmUUID, cand.name, deletion); err != nil {
logger.Warn("lifecycle backfill failed to update hourly snapshot deletion time", "vcenter", vcenter, "vm_id", cand.vmID, "vm_uuid", cand.vmUUID, "name", cand.name, "cluster", cand.cluster, "table", lastSeenTable, "deletion", deletion, "error", err)
} else if rowsAffected > 0 {
updatedTables[lastSeenTable] = struct{}{}
logger.Debug("lifecycle backfill updated hourly snapshot deletion time", "vcenter", vcenter, "vm_id", cand.vmID, "vm_uuid", cand.vmUUID, "name", cand.name, "cluster", cand.cluster, "table", lastSeenTable, "deletion", deletion)
if snapUnix, ok := parseSnapshotTime(lastSeenTable); ok {
if cacheRows, err := updateDeletionTimeInHourlyCache(ctx, dbConn, vcenter, cand.vmID, cand.vmUUID, cand.name, snapUnix, deletion); err != nil {
logger.Warn("lifecycle backfill failed to update hourly cache deletion time", "vcenter", vcenter, "vm_id", cand.vmID, "vm_uuid", cand.vmUUID, "name", cand.name, "snapshot_time", snapUnix, "deletion", deletion, "error", err)
} else if cacheRows > 0 {
logger.Debug("lifecycle backfill updated hourly cache deletion time", "vcenter", vcenter, "vm_id", cand.vmID, "vm_uuid", cand.vmUUID, "name", cand.name, "snapshot_time", snapUnix, "deletion", deletion)
}
}
}
}
logger.Debug("lifecycle backfill applied", "vcenter", vcenter, "vm_id", cand.vmID, "vm_uuid", cand.vmUUID, "name", cand.name, "cluster", cand.cluster, "deletion", deletion)
}
}
if len(updatedTables) == 0 {
return nil, nil
}
tablesUpdated := make([]string, 0, len(updatedTables))
for table := range updatedTables {
tablesUpdated = append(tablesUpdated, table)
}
return tablesUpdated, nil
}
type lifecycleCandidate struct {
vmID string
vmUUID string
name string
cluster string
}
func loadLifecycleCandidates(ctx context.Context, dbConn *sqlx.DB, vcenter string, present map[string]InventorySnapshotRow) ([]lifecycleCandidate, error) {
rows, err := dbConn.QueryxContext(ctx, `
SELECT "VmId","VmUuid","Name","Cluster"
FROM vm_lifecycle_cache
WHERE "Vcenter" = ? AND ("DeletedAt" IS NULL OR "DeletedAt" = 0)
`, vcenter)
if err != nil {
return nil, err
}
defer rows.Close()
var cands []lifecycleCandidate
for rows.Next() {
var vmID, vmUUID, name, cluster sql.NullString
if scanErr := rows.Scan(&vmID, &vmUUID, &name, &cluster); scanErr != nil {
continue
}
if vmID.String == "" {
continue
}
if _, ok := present[vmID.String]; ok {
continue // still present, skip
}
cands = append(cands, lifecycleCandidate{
vmID: vmID.String,
vmUUID: vmUUID.String,
name: name.String,
cluster: cluster.String,
})
}
return cands, nil
}
type snapshotTable struct {
Table string `db:"table_name"`
Time int64 `db:"snapshot_time"`
Count sql.NullInt64 `db:"snapshot_count"`
}
func listHourlyTablesForDay(ctx context.Context, dbConn *sqlx.DB, dayStart, dayEnd time.Time) ([]snapshotTable, error) {
log := loggerFromCtx(ctx, nil)
rows, err := dbConn.QueryxContext(ctx, `
SELECT table_name, snapshot_time, snapshot_count
FROM snapshot_registry
WHERE snapshot_type = 'hourly' AND snapshot_time >= ? AND snapshot_time < ?
ORDER BY snapshot_time ASC
`, dayStart.Unix(), dayEnd.Unix())
if err != nil {
return nil, err
}
defer rows.Close()
var tables []snapshotTable
for rows.Next() {
var t snapshotTable
if err := rows.StructScan(&t); err != nil {
continue
}
if err := db.ValidateTableName(t.Table); err != nil {
continue
}
// Trust snapshot_count if present; otherwise optimistically include to avoid long probes.
if t.Count.Valid && t.Count.Int64 <= 0 {
if log != nil {
log.Debug("skipping snapshot table with zero count", "table", t.Table, "snapshot_time", t.Time)
}
continue
}
tables = append(tables, t)
}
return tables, nil
}
func nextSnapshotAfter(ctx context.Context, dbConn *sqlx.DB, after time.Time, vcenter string) (string, error) {
rows, err := dbConn.QueryxContext(ctx, `
SELECT table_name
FROM snapshot_registry
WHERE snapshot_type = 'hourly' AND snapshot_time >= ?
ORDER BY snapshot_time ASC
LIMIT 1
`, after.Unix())
if err != nil {
return "", err
}
defer rows.Close()
for rows.Next() {
var name string
if err := rows.Scan(&name); err != nil {
continue
}
if err := db.ValidateTableName(name); err != nil {
continue
}
// ensure the snapshot table actually has entries for this vcenter
vrows, qerr := querySnapshotRows(ctx, dbConn, name, []string{"VmId"}, `"Vcenter" = ? LIMIT 1`, vcenter)
if qerr != nil {
continue
}
hasVcenter := vrows.Next()
vrows.Close()
if hasVcenter {
return name, nil
}
}
return "", nil
}
func loadPresenceKeys(ctx context.Context, dbConn *sqlx.DB, table, vcenter string) map[string]struct{} {
out := make(map[string]struct{})
rows, err := querySnapshotRows(ctx, dbConn, table, []string{"VmId", "VmUuid", "Name"}, `"Vcenter" = ?`, vcenter)
if err != nil {
return out
}
defer rows.Close()
for rows.Next() {
var vmId, vmUuid, name sql.NullString
if err := rows.Scan(&vmId, &vmUuid, &name); err == nil {
for _, k := range presenceKeys(vmId.String, vmUuid.String, name.String) {
out[k] = struct{}{}
}
}
}
return out
}
func isPresent(presence map[string]struct{}, cand lifecycleCandidate) bool {
for _, k := range presenceKeys(cand.vmID, cand.vmUUID, cand.name) {
if _, ok := presence[k]; ok {
return true
}
}
return false
}
// findDeletionInTables walks ordered hourly tables for a vCenter and returns the first confirmed deletion time
// (requiring two consecutive misses), the time of the first miss for cross-day handling, and the last table where
// the VM was seen so we can backfill deletion time into that snapshot.
func findDeletionInTables(ctx context.Context, dbConn *sqlx.DB, tables []snapshotTable, vcenter string, cand *lifecycleCandidate) (int64, int64, string) {
var lastSeen int64
var lastSeenTable string
var firstMiss int64
for i, tbl := range tables {
rows, err := querySnapshotRows(ctx, dbConn, tbl.Table, []string{"VmId", "VmUuid", "Name", "Cluster"}, `"Vcenter" = ? AND "VmId" = ?`, vcenter, cand.vmID)
if err != nil {
continue
}
seen := false
if rows.Next() {
var vmId, vmUuid, name, cluster sql.NullString
if scanErr := rows.Scan(&vmId, &vmUuid, &name, &cluster); scanErr == nil {
seen = true
lastSeen = tbl.Time
lastSeenTable = tbl.Table
if cand.vmUUID == "" && vmUuid.Valid {
cand.vmUUID = vmUuid.String
}
if cand.name == "" && name.Valid {
cand.name = name.String
}
if cand.cluster == "" && cluster.Valid {
cand.cluster = cluster.String
}
}
}
rows.Close()
if lastSeen > 0 && !seen && firstMiss == 0 {
firstMiss = tbl.Time
if i+1 < len(tables) {
if seen2, _ := candSeenInTable(ctx, dbConn, tables[i+1].Table, vcenter, cand.vmID); !seen2 {
return firstMiss, firstMiss, lastSeenTable
}
}
}
}
return 0, firstMiss, lastSeenTable
}
func candSeenInTable(ctx context.Context, dbConn *sqlx.DB, table, vcenter, vmID string) (bool, error) {
rows, err := querySnapshotRows(ctx, dbConn, table, []string{"VmId"}, `"Vcenter" = ? AND "VmId" = ? LIMIT 1`, vcenter, vmID)
if err != nil {
return false, err
}
defer rows.Close()
return rows.Next(), nil
}

File diff suppressed because it is too large Load Diff

View File

@@ -3,11 +3,9 @@ package tasks
import (
"context"
"database/sql"
"encoding/json"
"errors"
"fmt"
"log/slog"
"runtime"
"strings"
"time"
"vctp/db/queries"
@@ -20,6 +18,10 @@ import (
// use gocron to check vcenters for VMs or updates we don't know about
func (c *CronTask) RunVcenterPoll(ctx context.Context, logger *slog.Logger) error {
startedAt := time.Now()
defer func() {
logger.Info("Vcenter poll job finished", "duration", time.Since(startedAt))
}()
var matchFound bool
// reload settings in case vcenter list has changed
@@ -106,7 +108,7 @@ func (c *CronTask) RunVcenterPoll(ctx context.Context, logger *slog.Logger) erro
}
}
c.Logger.Debug("Finished checking vcenter", "url", url)
vc.Logout()
_ = vc.Logout(ctx)
}
c.Logger.Debug("Finished polling vcenters")
@@ -126,8 +128,6 @@ func (c *CronTask) UpdateVmInventory(vmObj *mo.VirtualMachine, vc *vcenter.Vcent
existingUpdateFound bool
)
// TODO - how to prevent creating a new record every polling cycle?
params := queries.CreateUpdateParams{
InventoryId: sql.NullInt64{Int64: dbVm.Iid, Valid: dbVm.Iid > 0},
}
@@ -177,12 +177,8 @@ func (c *CronTask) UpdateVmInventory(vmObj *mo.VirtualMachine, vc *vcenter.Vcent
}
}
// TODO - should we bother to check if disk space has changed?
if updateType != "unknown" {
// TODO query updates table to see if there is already an update of this type and the new value
// Check if we already have an existing update record for this same change
checkParams := queries.GetVmUpdatesParams{
InventoryId: sql.NullInt64{Int64: dbVm.Iid, Valid: dbVm.Iid > 0},
UpdateType: updateType,
@@ -237,7 +233,6 @@ func (c *CronTask) UpdateVmInventory(vmObj *mo.VirtualMachine, vc *vcenter.Vcent
// add sleep to slow down mass VM additions
utils.SleepWithContext(ctx, (10 * time.Millisecond))
}
}
return nil
@@ -262,6 +257,11 @@ func (c *CronTask) AddVmToInventory(vmObject *mo.VirtualMachine, vc *vcenter.Vce
return errors.New("can't process empty vm object")
}
if strings.HasPrefix(vmObject.Name, "vCLS-") {
c.Logger.Debug("Skipping internal vCLS VM", "vm_name", vmObject.Name)
return nil
}
c.Logger.Debug("found VM")
/*
@@ -400,6 +400,7 @@ func (c *CronTask) AddVmToInventory(vmObject *mo.VirtualMachine, vc *vcenter.Vce
return nil
}
/*
// prettyPrint comes from https://gist.github.com/sfate/9d45f6c5405dc4c9bf63bf95fe6d1a7c
func prettyPrint(args ...interface{}) {
var caller string
@@ -427,3 +428,4 @@ func prettyPrint(args ...interface{}) {
fmt.Printf("%s%s\n", prefix, string(s))
}
}
*/

View File

@@ -0,0 +1,739 @@
package tasks
import (
"context"
"database/sql"
"fmt"
"log/slog"
"os"
"runtime"
"sort"
"strings"
"sync"
"time"
"vctp/db"
"vctp/internal/metrics"
"vctp/internal/report"
)
// RunVcenterMonthlyAggregate summarizes the previous month's daily snapshots.
func (c *CronTask) RunVcenterMonthlyAggregate(ctx context.Context, logger *slog.Logger) (err error) {
jobTimeout := durationFromSeconds(c.Settings.Values.Settings.MonthlyJobTimeoutSeconds, 20*time.Minute)
return c.runAggregateJob(ctx, "monthly_aggregate", jobTimeout, func(jobCtx context.Context) error {
startedAt := time.Now()
defer func() {
logger.Info("Monthly summary job finished", "duration", time.Since(startedAt))
}()
now := time.Now()
firstOfThisMonth := time.Date(now.Year(), now.Month(), 1, 0, 0, 0, 0, now.Location())
targetMonth := firstOfThisMonth.AddDate(0, -1, 0)
return c.aggregateMonthlySummary(jobCtx, targetMonth, false)
})
}
func (c *CronTask) AggregateMonthlySummary(ctx context.Context, month time.Time, force bool) error {
return c.aggregateMonthlySummary(ctx, month, force)
}
func (c *CronTask) aggregateMonthlySummary(ctx context.Context, targetMonth time.Time, force bool) error {
jobStart := time.Now()
if err := report.EnsureSnapshotRegistry(ctx, c.Database); err != nil {
return err
}
granularity := strings.ToLower(strings.TrimSpace(c.Settings.Values.Settings.MonthlyAggregationGranularity))
if granularity == "" {
granularity = "hourly"
}
if granularity != "hourly" && granularity != "daily" {
c.Logger.Warn("unknown monthly aggregation granularity; defaulting to hourly", "granularity", granularity)
granularity = "hourly"
}
monthStart := time.Date(targetMonth.Year(), targetMonth.Month(), 1, 0, 0, 0, 0, targetMonth.Location())
monthEnd := monthStart.AddDate(0, 1, 0)
dbConn := c.Database.DB()
db.SetPostgresWorkMem(ctx, dbConn, c.Settings.Values.Settings.PostgresWorkMemMB)
driver := strings.ToLower(dbConn.DriverName())
useGoAgg := os.Getenv("MONTHLY_AGG_GO") == "1"
if !useGoAgg && granularity == "hourly" && driver == "sqlite" {
c.Logger.Warn("SQL monthly aggregation is slow on sqlite; overriding to Go path", "granularity", granularity)
useGoAgg = true
}
var snapshots []report.SnapshotRecord
var unionColumns []string
if granularity == "daily" {
dailySnapshots, err := report.SnapshotRecordsWithFallback(ctx, c.Database, "daily", "inventory_daily_summary_", "20060102", monthStart, monthEnd)
if err != nil {
return err
}
dailySnapshots = filterRecordsInRange(dailySnapshots, monthStart, monthEnd)
dailySnapshots = filterSnapshotsWithRows(ctx, dbConn, dailySnapshots)
snapshots = dailySnapshots
unionColumns = monthlyUnionColumns
} else {
hourlySnapshots, err := report.SnapshotRecordsWithFallback(ctx, c.Database, "hourly", "inventory_hourly_", "epoch", monthStart, monthEnd)
if err != nil {
return err
}
hourlySnapshots = filterRecordsInRange(hourlySnapshots, monthStart, monthEnd)
hourlySnapshots = filterSnapshotsWithRows(ctx, dbConn, hourlySnapshots)
snapshots = hourlySnapshots
unionColumns = summaryUnionColumns
}
if len(snapshots) == 0 {
return fmt.Errorf("no %s snapshot tables found for %s", granularity, targetMonth.Format("2006-01"))
}
monthlyTable, err := monthlySummaryTableName(targetMonth)
if err != nil {
return err
}
if err := db.EnsureSummaryTable(ctx, dbConn, monthlyTable); err != nil {
return err
}
if rowsExist, err := db.TableHasRows(ctx, dbConn, monthlyTable); err != nil {
return err
} else if rowsExist && !force {
c.Logger.Debug("Monthly summary already exists, skipping aggregation", "summary_table", monthlyTable)
return nil
} else if rowsExist && force {
if err := clearTable(ctx, dbConn, monthlyTable); err != nil {
return err
}
}
// Optional Go-based aggregation path.
if useGoAgg {
if granularity == "daily" {
c.Logger.Debug("Using go implementation of monthly aggregation (daily)")
if err := c.aggregateMonthlySummaryGo(ctx, monthStart, monthEnd, monthlyTable, snapshots); err != nil {
c.Logger.Warn("go-based monthly aggregation failed, falling back to SQL path", "error", err)
} else {
metrics.RecordMonthlyAggregation(time.Since(jobStart), nil)
c.Logger.Debug("Finished monthly inventory aggregation (Go path)", "summary_table", monthlyTable)
return nil
}
} else if granularity == "hourly" {
c.Logger.Debug("Using go implementation of monthly aggregation (hourly)")
if err := c.aggregateMonthlySummaryGoHourly(ctx, monthStart, monthEnd, monthlyTable, snapshots); err != nil {
c.Logger.Warn("go-based monthly aggregation failed, falling back to SQL path", "error", err)
} else {
metrics.RecordMonthlyAggregation(time.Since(jobStart), nil)
c.Logger.Debug("Finished monthly inventory aggregation (Go path)", "summary_table", monthlyTable)
return nil
}
} else {
c.Logger.Warn("MONTHLY_AGG_GO is set but granularity is unsupported; using SQL path", "granularity", granularity)
}
}
tables := make([]string, 0, len(snapshots))
for _, snapshot := range snapshots {
tables = append(tables, snapshot.TableName)
}
unionQuery, err := buildUnionQuery(tables, unionColumns, templateExclusionFilter())
if err != nil {
return err
}
monthlyTotals, err := db.SnapshotTotalsForUnion(ctx, dbConn, unionQuery)
if err != nil {
c.Logger.Warn("unable to calculate monthly totals", "error", err, "month", targetMonth.Format("2006-01"))
} else {
c.Logger.Info("Monthly snapshot totals",
"month", targetMonth.Format("2006-01"),
"vm_count", monthlyTotals.VmCount,
"vcpu_total", monthlyTotals.VcpuTotal,
"ram_total_gb", monthlyTotals.RamTotal,
"disk_total_gb", monthlyTotals.DiskTotal,
)
}
var insertQuery string
if granularity == "daily" {
insertQuery, err = db.BuildMonthlySummaryInsert(monthlyTable, unionQuery)
} else {
insertQuery, err = db.BuildDailySummaryInsert(monthlyTable, unionQuery)
}
if err != nil {
return err
}
if _, err := dbConn.ExecContext(ctx, insertQuery); err != nil {
c.Logger.Error("failed to aggregate monthly inventory", "error", err, "month", targetMonth.Format("2006-01"))
return err
}
if applied, err := db.ApplyLifecycleDeletionToSummary(ctx, dbConn, monthlyTable, monthStart.Unix(), monthEnd.Unix()); err != nil {
c.Logger.Warn("failed to apply lifecycle deletions to monthly summary", "error", err, "table", monthlyTable)
} else {
c.Logger.Info("Monthly aggregation deletion times", "source_lifecycle_cache", applied)
}
if err := db.UpdateSummaryPresenceByWindow(ctx, dbConn, monthlyTable, monthStart.Unix(), monthEnd.Unix()); err != nil {
c.Logger.Warn("failed to update monthly AvgIsPresent from lifecycle window", "error", err, "table", monthlyTable)
}
rowCount, err := db.TableRowCount(ctx, dbConn, monthlyTable)
if err != nil {
c.Logger.Warn("unable to count monthly summary rows", "error", err, "table", monthlyTable)
}
if err := report.RegisterSnapshot(ctx, c.Database, "monthly", monthlyTable, targetMonth, rowCount); err != nil {
c.Logger.Warn("failed to register monthly snapshot", "error", err, "table", monthlyTable)
}
db.AnalyzeTableIfPostgres(ctx, dbConn, monthlyTable)
if err := c.generateReport(ctx, monthlyTable); err != nil {
c.Logger.Warn("failed to generate monthly report", "error", err, "table", monthlyTable)
metrics.RecordMonthlyAggregation(time.Since(jobStart), err)
return err
}
c.Logger.Debug("Finished monthly inventory aggregation", "summary_table", monthlyTable)
metrics.RecordMonthlyAggregation(time.Since(jobStart), nil)
return nil
}
func monthlySummaryTableName(t time.Time) (string, error) {
return db.SafeTableName(fmt.Sprintf("inventory_monthly_summary_%s", t.Format("200601")))
}
// aggregateMonthlySummaryGoHourly aggregates hourly snapshots directly into the monthly summary table.
func (c *CronTask) aggregateMonthlySummaryGoHourly(ctx context.Context, monthStart, monthEnd time.Time, summaryTable string, hourlySnapshots []report.SnapshotRecord) error {
jobStart := time.Now()
dbConn := c.Database.DB()
if err := clearTable(ctx, dbConn, summaryTable); err != nil {
return err
}
if len(hourlySnapshots) == 0 {
return fmt.Errorf("no hourly snapshot tables found for %s", monthStart.Format("2006-01"))
}
totalSamples := len(hourlySnapshots)
var (
aggMap map[dailyAggKey]*dailyAggVal
snapTimes []int64
)
if db.TableExists(ctx, dbConn, "vm_hourly_stats") {
cacheAgg, cacheTimes, cacheErr := c.scanHourlyCache(ctx, monthStart, monthEnd)
if cacheErr != nil {
c.Logger.Warn("failed to use hourly cache, falling back to table scans", "error", cacheErr)
} else if len(cacheAgg) > 0 {
c.Logger.Debug("using hourly cache for monthly aggregation", "month", monthStart.Format("2006-01"), "snapshots", len(cacheTimes), "vm_count", len(cacheAgg))
aggMap = cacheAgg
snapTimes = cacheTimes
totalSamples = len(cacheTimes)
}
}
if aggMap == nil {
var errScan error
aggMap, errScan = c.scanHourlyTablesParallel(ctx, hourlySnapshots)
if errScan != nil {
return errScan
}
c.Logger.Debug("scanned hourly tables for monthly aggregation", "month", monthStart.Format("2006-01"), "tables", len(hourlySnapshots), "vm_count", len(aggMap))
if len(aggMap) == 0 {
return fmt.Errorf("no VM records aggregated for %s", monthStart.Format("2006-01"))
}
snapTimes = make([]int64, 0, len(hourlySnapshots))
for _, snap := range hourlySnapshots {
snapTimes = append(snapTimes, snap.SnapshotTime.Unix())
}
sort.Slice(snapTimes, func(i, j int) bool { return snapTimes[i] < snapTimes[j] })
}
lifecycleDeletions := c.applyLifecycleDeletions(ctx, aggMap, monthStart, monthEnd)
c.Logger.Info("Monthly aggregation deletion times", "source_lifecycle_cache", lifecycleDeletions)
inventoryDeletions := c.applyInventoryDeletions(ctx, aggMap, monthStart, monthEnd)
c.Logger.Info("Monthly aggregation deletion times", "source_inventory", inventoryDeletions)
if len(snapTimes) > 0 {
maxSnap := snapTimes[len(snapTimes)-1]
inferredDeletions := 0
for _, v := range aggMap {
if v.deletion != 0 {
continue
}
consecutiveMisses := 0
firstMiss := int64(0)
for _, t := range snapTimes {
if t <= v.lastSeen {
continue
}
if _, ok := v.seen[t]; ok {
consecutiveMisses = 0
firstMiss = 0
continue
}
consecutiveMisses++
if firstMiss == 0 {
firstMiss = t
}
if consecutiveMisses >= 2 {
v.deletion = firstMiss
inferredDeletions++
break
}
}
if v.deletion == 0 && v.lastSeen < maxSnap && firstMiss > 0 {
c.Logger.Debug("pending deletion inference (insufficient consecutive misses)", "vm_id", v.key.VmId, "vm_uuid", v.key.VmUuid, "name", v.key.Name, "last_seen", v.lastSeen, "first_missing_snapshot", firstMiss)
}
}
c.Logger.Info("Monthly aggregation deletion times", "source_inferred", inferredDeletions)
}
totalSamplesByVcenter := sampleCountsByVcenter(aggMap)
if err := c.insertDailyAggregates(ctx, summaryTable, aggMap, totalSamples, totalSamplesByVcenter); err != nil {
return err
}
if err := db.UpdateSummaryPresenceByWindow(ctx, dbConn, summaryTable, monthStart.Unix(), monthEnd.Unix()); err != nil {
c.Logger.Warn("failed to update monthly AvgIsPresent from lifecycle window (Go hourly)", "error", err, "table", summaryTable)
}
db.AnalyzeTableIfPostgres(ctx, dbConn, summaryTable)
rowCount, err := db.TableRowCount(ctx, dbConn, summaryTable)
if err != nil {
c.Logger.Warn("unable to count monthly summary rows (Go hourly)", "error", err, "table", summaryTable)
}
if err := report.RegisterSnapshot(ctx, c.Database, "monthly", summaryTable, monthStart, rowCount); err != nil {
c.Logger.Warn("failed to register monthly snapshot (Go hourly)", "error", err, "table", summaryTable)
}
if err := c.generateReport(ctx, summaryTable); err != nil {
c.Logger.Warn("failed to generate monthly report (Go hourly)", "error", err, "table", summaryTable)
return err
}
c.Logger.Debug("Finished monthly inventory aggregation (Go hourly)",
"summary_table", summaryTable,
"duration", time.Since(jobStart),
"tables_scanned", len(hourlySnapshots),
"rows_written", rowCount,
"total_samples", totalSamples,
)
return nil
}
// aggregateMonthlySummaryGo mirrors the SQL-based monthly aggregation but performs the work in Go,
// reading daily summaries in parallel and reducing them to a single monthly summary table.
func (c *CronTask) aggregateMonthlySummaryGo(ctx context.Context, monthStart, monthEnd time.Time, summaryTable string, dailySnapshots []report.SnapshotRecord) error {
jobStart := time.Now()
dbConn := c.Database.DB()
if err := clearTable(ctx, dbConn, summaryTable); err != nil {
return err
}
// Build union query for lifecycle refinement after inserts.
dailyTables := make([]string, 0, len(dailySnapshots))
for _, snapshot := range dailySnapshots {
dailyTables = append(dailyTables, snapshot.TableName)
}
unionQuery, err := buildUnionQuery(dailyTables, monthlyUnionColumns, templateExclusionFilter())
if err != nil {
return err
}
aggMap, err := c.scanDailyTablesParallel(ctx, dailySnapshots)
if err != nil {
return err
}
if len(aggMap) == 0 {
cacheAgg, cacheErr := c.scanDailyRollup(ctx, monthStart, monthEnd)
if cacheErr == nil && len(cacheAgg) > 0 {
aggMap = cacheAgg
} else if cacheErr != nil {
c.Logger.Warn("failed to read daily rollup cache; using table scan", "error", cacheErr)
}
}
if len(aggMap) == 0 {
return fmt.Errorf("no VM records aggregated for %s", monthStart.Format("2006-01"))
}
if err := c.insertMonthlyAggregates(ctx, summaryTable, aggMap); err != nil {
return err
}
if applied, err := db.ApplyLifecycleDeletionToSummary(ctx, dbConn, summaryTable, monthStart.Unix(), monthEnd.Unix()); err != nil {
c.Logger.Warn("failed to apply lifecycle deletions to monthly summary (Go)", "error", err, "table", summaryTable)
} else {
c.Logger.Info("Monthly aggregation deletion times", "source_lifecycle_cache", applied)
}
if err := db.RefineCreationDeletionFromUnion(ctx, dbConn, summaryTable, unionQuery); err != nil {
c.Logger.Warn("failed to refine creation/deletion times (monthly Go)", "error", err, "table", summaryTable)
}
if err := db.UpdateSummaryPresenceByWindow(ctx, dbConn, summaryTable, monthStart.Unix(), monthEnd.Unix()); err != nil {
c.Logger.Warn("failed to update monthly AvgIsPresent from lifecycle window (Go)", "error", err, "table", summaryTable)
}
db.AnalyzeTableIfPostgres(ctx, dbConn, summaryTable)
rowCount, err := db.TableRowCount(ctx, dbConn, summaryTable)
if err != nil {
c.Logger.Warn("unable to count monthly summary rows", "error", err, "table", summaryTable)
}
if err := report.RegisterSnapshot(ctx, c.Database, "monthly", summaryTable, monthStart, rowCount); err != nil {
c.Logger.Warn("failed to register monthly snapshot", "error", err, "table", summaryTable)
}
if err := c.generateReport(ctx, summaryTable); err != nil {
c.Logger.Warn("failed to generate monthly report (Go)", "error", err, "table", summaryTable)
return err
}
c.Logger.Debug("Finished monthly inventory aggregation (Go path)", "summary_table", summaryTable, "duration", time.Since(jobStart))
return nil
}
func (c *CronTask) scanDailyTablesParallel(ctx context.Context, snapshots []report.SnapshotRecord) (map[monthlyAggKey]*monthlyAggVal, error) {
agg := make(map[monthlyAggKey]*monthlyAggVal, 1024)
mu := sync.Mutex{}
workers := runtime.NumCPU()
if workers < 2 {
workers = 2
}
if workers > len(snapshots) {
workers = len(snapshots)
}
jobs := make(chan report.SnapshotRecord, len(snapshots))
wg := sync.WaitGroup{}
for i := 0; i < workers; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for snap := range jobs {
rows, err := c.scanDailyTable(ctx, snap)
if err != nil {
c.Logger.Warn("failed to scan daily summary", "table", snap.TableName, "error", err)
continue
}
mu.Lock()
for k, v := range rows {
if existing, ok := agg[k]; ok {
mergeMonthlyAgg(existing, v)
} else {
agg[k] = v
}
}
mu.Unlock()
}
}()
}
for _, snap := range snapshots {
jobs <- snap
}
close(jobs)
wg.Wait()
return agg, nil
}
func mergeMonthlyAgg(dst, src *monthlyAggVal) {
if src.creation > 0 && (dst.creation == 0 || src.creation < dst.creation) {
dst.creation = src.creation
}
// If creation is unknown in all daily summaries, leave it zero for reports (VM trace handles approximation separately).
if src.deletion > 0 && (dst.deletion == 0 || src.deletion < dst.deletion) {
dst.deletion = src.deletion
}
if src.lastSnapshot.After(dst.lastSnapshot) {
dst.lastSnapshot = src.lastSnapshot
if src.inventoryId != 0 {
dst.inventoryId = src.inventoryId
}
dst.resourcePool = src.resourcePool
dst.datacenter = src.datacenter
dst.cluster = src.cluster
dst.folder = src.folder
dst.isTemplate = src.isTemplate
dst.poweredOn = src.poweredOn
dst.srmPlaceholder = src.srmPlaceholder
dst.provisioned = src.provisioned
dst.vcpuCount = src.vcpuCount
dst.ramGB = src.ramGB
dst.eventKey = src.eventKey
dst.cloudId = src.cloudId
}
dst.samplesPresent += src.samplesPresent
dst.totalSamples += src.totalSamples
dst.sumVcpu += src.sumVcpu
dst.sumRam += src.sumRam
dst.sumDisk += src.sumDisk
dst.tinWeighted += src.tinWeighted
dst.bronzeWeighted += src.bronzeWeighted
dst.silverWeighted += src.silverWeighted
dst.goldWeighted += src.goldWeighted
}
func (c *CronTask) scanDailyTable(ctx context.Context, snap report.SnapshotRecord) (map[monthlyAggKey]*monthlyAggVal, error) {
dbConn := c.Database.DB()
query := fmt.Sprintf(`
SELECT
"InventoryId",
"Name","Vcenter","VmId","VmUuid","EventKey","CloudId","ResourcePool","Datacenter","Cluster","Folder",
COALESCE("ProvisionedDisk",0) AS disk,
COALESCE("VcpuCount",0) AS vcpu,
COALESCE("RamGB",0) AS ram,
COALESCE("CreationTime",0) AS creation,
COALESCE("DeletionTime",0) AS deletion,
COALESCE("SamplesPresent",0) AS samples_present,
"AvgVcpuCount","AvgRamGB","AvgProvisionedDisk","AvgIsPresent",
"PoolTinPct","PoolBronzePct","PoolSilverPct","PoolGoldPct",
"Tin","Bronze","Silver","Gold","IsTemplate","PoweredOn","SrmPlaceholder"
FROM %s
`, snap.TableName)
rows, err := dbConn.QueryxContext(ctx, query)
if err != nil {
return nil, err
}
defer rows.Close()
result := make(map[monthlyAggKey]*monthlyAggVal, 256)
for rows.Next() {
var (
inventoryId sql.NullInt64
name, vcenter, vmId, vmUuid string
eventKey, cloudId sql.NullString
resourcePool, datacenter, cluster, folder sql.NullString
isTemplate, poweredOn, srmPlaceholder sql.NullString
disk, avgVcpu, avgRam, avgDisk sql.NullFloat64
avgIsPresent sql.NullFloat64
poolTin, poolBronze, poolSilver, poolGold sql.NullFloat64
tinPct, bronzePct, silverPct, goldPct sql.NullFloat64
vcpu, ram sql.NullInt64
creation, deletion sql.NullInt64
samplesPresent sql.NullInt64
)
if err := rows.Scan(
&inventoryId,
&name, &vcenter, &vmId, &vmUuid, &eventKey, &cloudId, &resourcePool, &datacenter, &cluster, &folder,
&disk, &vcpu, &ram, &creation, &deletion, &samplesPresent,
&avgVcpu, &avgRam, &avgDisk, &avgIsPresent,
&poolTin, &poolBronze, &poolSilver, &poolGold,
&tinPct, &bronzePct, &silverPct, &goldPct,
&isTemplate, &poweredOn, &srmPlaceholder,
); err != nil {
c.Logger.Warn("failed to scan daily summary row", "table", snap.TableName, "error", err)
continue
}
templateVal := strings.TrimSpace(isTemplate.String)
if strings.EqualFold(templateVal, "true") || templateVal == "1" {
continue
}
key := monthlyAggKey{Vcenter: vcenter, VmId: vmId, VmUuid: vmUuid, Name: name}
agg := &monthlyAggVal{
key: key,
inventoryId: inventoryId.Int64,
eventKey: eventKey.String,
cloudId: cloudId.String,
resourcePool: resourcePool.String,
datacenter: datacenter.String,
cluster: cluster.String,
folder: folder.String,
isTemplate: isTemplate.String,
poweredOn: poweredOn.String,
srmPlaceholder: srmPlaceholder.String,
provisioned: disk.Float64,
vcpuCount: vcpu.Int64,
ramGB: ram.Int64,
creation: creation.Int64,
deletion: deletion.Int64,
lastSnapshot: snap.SnapshotTime,
samplesPresent: samplesPresent.Int64,
}
totalSamplesDay := float64(samplesPresent.Int64)
if avgIsPresent.Valid && avgIsPresent.Float64 > 0 {
totalSamplesDay = float64(samplesPresent.Int64) / avgIsPresent.Float64
}
agg.totalSamples = totalSamplesDay
if avgVcpu.Valid {
agg.sumVcpu = avgVcpu.Float64 * totalSamplesDay
}
if avgRam.Valid {
agg.sumRam = avgRam.Float64 * totalSamplesDay
}
if avgDisk.Valid {
agg.sumDisk = avgDisk.Float64 * totalSamplesDay
}
if poolTin.Valid {
agg.tinWeighted = (poolTin.Float64 / 100.0) * totalSamplesDay
}
if poolBronze.Valid {
agg.bronzeWeighted = (poolBronze.Float64 / 100.0) * totalSamplesDay
}
if poolSilver.Valid {
agg.silverWeighted = (poolSilver.Float64 / 100.0) * totalSamplesDay
}
if poolGold.Valid {
agg.goldWeighted = (poolGold.Float64 / 100.0) * totalSamplesDay
}
result[key] = agg
}
return result, rows.Err()
}
// scanDailyRollup aggregates monthly data from vm_daily_rollup cache.
func (c *CronTask) scanDailyRollup(ctx context.Context, start, end time.Time) (map[monthlyAggKey]*monthlyAggVal, error) {
dbConn := c.Database.DB()
if !db.TableExists(ctx, dbConn, "vm_daily_rollup") {
return map[monthlyAggKey]*monthlyAggVal{}, nil
}
query := `
SELECT
"Date","Vcenter","VmId","VmUuid","Name","CreationTime","DeletionTime",
"SamplesPresent","TotalSamples","SumVcpu","SumRam","SumDisk",
"TinHits","BronzeHits","SilverHits","GoldHits",
"LastResourcePool","LastDatacenter","LastCluster","LastFolder",
"LastProvisionedDisk","LastVcpuCount","LastRamGB","IsTemplate","PoweredOn","SrmPlaceholder"
FROM vm_daily_rollup
WHERE "Date" >= ? AND "Date" < ?
`
bind := dbConn.Rebind(query)
rows, err := dbConn.QueryxContext(ctx, bind, start.Unix(), end.Unix())
if err != nil {
return nil, err
}
defer rows.Close()
agg := make(map[monthlyAggKey]*monthlyAggVal, 512)
for rows.Next() {
var (
date sql.NullInt64
vcenter, vmId, vmUuid, name string
creation, deletion sql.NullInt64
samplesPresent, totalSamples sql.NullInt64
sumVcpu, sumRam, sumDisk sql.NullFloat64
tinHits, bronzeHits, silverHits, goldHits sql.NullInt64
lastPool, lastDc, lastCluster, lastFolder sql.NullString
lastDisk, lastVcpu, lastRam sql.NullFloat64
isTemplate, poweredOn, srmPlaceholder sql.NullString
)
if err := rows.Scan(
&date, &vcenter, &vmId, &vmUuid, &name, &creation, &deletion,
&samplesPresent, &totalSamples, &sumVcpu, &sumRam, &sumDisk,
&tinHits, &bronzeHits, &silverHits, &goldHits,
&lastPool, &lastDc, &lastCluster, &lastFolder,
&lastDisk, &lastVcpu, &lastRam, &isTemplate, &poweredOn, &srmPlaceholder,
); err != nil {
continue
}
templateVal := strings.TrimSpace(isTemplate.String)
if strings.EqualFold(templateVal, "true") || templateVal == "1" {
continue
}
key := monthlyAggKey{Vcenter: vcenter, VmId: vmId, VmUuid: vmUuid, Name: name}
val := &monthlyAggVal{
key: key,
resourcePool: lastPool.String,
datacenter: lastDc.String,
cluster: lastCluster.String,
folder: lastFolder.String,
isTemplate: isTemplate.String,
poweredOn: poweredOn.String,
srmPlaceholder: srmPlaceholder.String,
provisioned: lastDisk.Float64,
vcpuCount: int64(lastVcpu.Float64),
ramGB: int64(lastRam.Float64),
creation: creation.Int64,
deletion: deletion.Int64,
lastSnapshot: time.Unix(date.Int64, 0),
samplesPresent: samplesPresent.Int64,
totalSamples: float64(totalSamples.Int64),
sumVcpu: sumVcpu.Float64,
sumRam: sumRam.Float64,
sumDisk: sumDisk.Float64,
tinWeighted: float64(tinHits.Int64),
bronzeWeighted: float64(bronzeHits.Int64),
silverWeighted: float64(silverHits.Int64),
goldWeighted: float64(goldHits.Int64),
}
if existing, ok := agg[key]; ok {
mergeMonthlyAgg(existing, val)
} else {
agg[key] = val
}
}
return agg, rows.Err()
}
func (c *CronTask) insertMonthlyAggregates(ctx context.Context, summaryTable string, aggMap map[monthlyAggKey]*monthlyAggVal) error {
dbConn := c.Database.DB()
columns := []string{
"InventoryId", "Name", "Vcenter", "VmId", "EventKey", "CloudId", "CreationTime", "DeletionTime",
"ResourcePool", "Datacenter", "Cluster", "Folder", "ProvisionedDisk", "VcpuCount",
"RamGB", "IsTemplate", "PoweredOn", "SrmPlaceholder", "VmUuid", "SamplesPresent",
"AvgVcpuCount", "AvgRamGB", "AvgProvisionedDisk", "AvgIsPresent",
"PoolTinPct", "PoolBronzePct", "PoolSilverPct", "PoolGoldPct",
"Tin", "Bronze", "Silver", "Gold",
}
placeholders := make([]string, len(columns))
for i := range columns {
placeholders[i] = "?"
}
stmtText := fmt.Sprintf(`INSERT INTO %s (%s) VALUES (%s)`, summaryTable, strings.Join(columns, ","), strings.Join(placeholders, ","))
stmtText = dbConn.Rebind(stmtText)
tx, err := dbConn.BeginTxx(ctx, nil)
if err != nil {
return err
}
stmt, err := tx.PreparexContext(ctx, stmtText)
if err != nil {
tx.Rollback()
return err
}
defer stmt.Close()
for _, v := range aggMap {
inventoryVal := sql.NullInt64{}
if v.inventoryId != 0 {
inventoryVal = sql.NullInt64{Int64: v.inventoryId, Valid: true}
}
avgVcpu := sql.NullFloat64{}
avgRam := sql.NullFloat64{}
avgDisk := sql.NullFloat64{}
avgIsPresent := sql.NullFloat64{}
tinPct := sql.NullFloat64{}
bronzePct := sql.NullFloat64{}
silverPct := sql.NullFloat64{}
goldPct := sql.NullFloat64{}
if v.totalSamples > 0 {
avgVcpu = sql.NullFloat64{Float64: v.sumVcpu / v.totalSamples, Valid: true}
avgRam = sql.NullFloat64{Float64: v.sumRam / v.totalSamples, Valid: true}
avgDisk = sql.NullFloat64{Float64: v.sumDisk / v.totalSamples, Valid: true}
avgIsPresent = sql.NullFloat64{Float64: float64(v.samplesPresent) / v.totalSamples, Valid: true}
tinPct = sql.NullFloat64{Float64: 100.0 * v.tinWeighted / v.totalSamples, Valid: true}
bronzePct = sql.NullFloat64{Float64: 100.0 * v.bronzeWeighted / v.totalSamples, Valid: true}
silverPct = sql.NullFloat64{Float64: 100.0 * v.silverWeighted / v.totalSamples, Valid: true}
goldPct = sql.NullFloat64{Float64: 100.0 * v.goldWeighted / v.totalSamples, Valid: true}
}
if _, err := stmt.ExecContext(ctx,
inventoryVal,
v.key.Name, v.key.Vcenter, v.key.VmId, v.eventKey, v.cloudId, v.creation, v.deletion,
v.resourcePool, v.datacenter, v.cluster, v.folder, v.provisioned, v.vcpuCount, v.ramGB,
v.isTemplate, v.poweredOn, v.srmPlaceholder, v.key.VmUuid, v.samplesPresent,
avgVcpu, avgRam, avgDisk, avgIsPresent,
tinPct, bronzePct, silverPct, goldPct,
tinPct, bronzePct, silverPct, goldPct,
); err != nil {
tx.Rollback()
return err
}
}
return tx.Commit()
}

View File

@@ -14,6 +14,10 @@ import (
// use gocron to check events in the Events table
func (c *CronTask) RunVmCheck(ctx context.Context, logger *slog.Logger) error {
startedAt := time.Now()
defer func() {
logger.Info("Event processing job finished", "duration", time.Since(startedAt))
}()
var (
numVcpus int32
numRam int32
@@ -83,6 +87,14 @@ func (c *CronTask) RunVmCheck(ctx context.Context, logger *slog.Logger) error {
continue
}
if strings.HasPrefix(vmObject.Name, "vCLS-") {
c.Logger.Info("Skipping internal vCLS VM event", "vm_name", vmObject.Name)
if err := c.Database.Queries().UpdateEventsProcessed(ctx, evt.Eid); err != nil {
c.Logger.Error("Unable to mark vCLS event as processed", "event_id", evt.Eid, "error", err)
}
continue
}
//c.Logger.Debug("found VM")
srmPlaceholder = "FALSE" // Default assumption
//prettyPrint(vmObject)
@@ -153,10 +165,7 @@ func (c *CronTask) RunVmCheck(ctx context.Context, logger *slog.Logger) error {
poweredOn = "TRUE"
}
err = vc.Logout()
if err != nil {
c.Logger.Error("unable to logout of vcenter", "error", err)
}
_ = vc.Logout(ctx)
if foundVm {
c.Logger.Debug("Adding to Inventory table", "vm_name", evt.VmName.String, "vcpus", numVcpus, "ram", numRam, "dc", evt.DatacenterId.String)

View File

@@ -1,16 +0,0 @@
package tasks
import (
"log/slog"
"vctp/db"
"vctp/internal/settings"
"vctp/internal/vcenter"
)
// CronTask stores runtime information to be used by tasks
type CronTask struct {
Logger *slog.Logger
Database db.Database
Settings *settings.Settings
VcCreds *vcenter.VcenterLogin
}

123
internal/tasks/types.go Normal file
View File

@@ -0,0 +1,123 @@
package tasks
import (
"database/sql"
"log/slog"
"time"
"vctp/db"
"vctp/internal/settings"
"vctp/internal/vcenter"
)
// CronTask stores runtime information to be used by tasks.
type CronTask struct {
Logger *slog.Logger
Database db.Database
Settings *settings.Settings
VcCreds *vcenter.VcenterLogin
FirstHourlySnapshotCheck bool
}
// InventorySnapshotRow represents a single VM snapshot row.
type InventorySnapshotRow struct {
InventoryId sql.NullInt64
Name string
Vcenter string
VmId sql.NullString
EventKey sql.NullString
CloudId sql.NullString
CreationTime sql.NullInt64
DeletionTime sql.NullInt64
ResourcePool sql.NullString
Datacenter sql.NullString
Cluster sql.NullString
Folder sql.NullString
ProvisionedDisk sql.NullFloat64
VcpuCount sql.NullInt64
RamGB sql.NullInt64
IsTemplate string
PoweredOn string
SrmPlaceholder string
VmUuid sql.NullString
SnapshotTime int64
}
// snapshotTotals aliases DB snapshot totals for convenience.
type snapshotTotals = db.SnapshotTotals
type dailyAggKey struct {
Vcenter string
VmId string
VmUuid string
Name string
}
type dailyAggVal struct {
key dailyAggKey
resourcePool string
datacenter string
cluster string
folder string
isTemplate string
poweredOn string
srmPlaceholder string
creation int64
firstSeen int64
lastSeen int64
lastDisk float64
lastVcpu int64
lastRam int64
sumVcpu int64
sumRam int64
sumDisk float64
samples int64
tinHits int64
bronzeHits int64
silverHits int64
goldHits int64
seen map[int64]struct{}
deletion int64
}
type monthlyAggKey struct {
Vcenter string
VmId string
VmUuid string
Name string
}
type monthlyAggVal struct {
key monthlyAggKey
inventoryId int64
eventKey string
cloudId string
resourcePool string
datacenter string
cluster string
folder string
isTemplate string
poweredOn string
srmPlaceholder string
creation int64
deletion int64
lastSnapshot time.Time
provisioned float64
vcpuCount int64
ramGB int64
samplesPresent int64
totalSamples float64
sumVcpu float64
sumRam float64
sumDisk float64
tinWeighted float64
bronzeWeighted float64
silverWeighted float64
goldWeighted float64
}
// CronTracker manages re-entry protection and status recording for cron jobs.
type CronTracker struct {
db db.Database
bindType int
}

View File

@@ -7,6 +7,7 @@ import (
"net"
"os"
"path/filepath"
"strconv"
"time"
)
@@ -20,6 +21,10 @@ func GetFilePath(path string) string {
// check if filename exists
if _, err := os.Stat(path); os.IsNotExist((err)) {
if filepath.IsAbs(path) {
slog.Info("File not found, using absolute path", "filename", path)
return path
}
slog.Info("File not found, searching in same directory as binary", "filename", path)
// if not, check that it exists in the same directory as the currently executing binary
ex, err2 := os.Executable()
@@ -66,3 +71,29 @@ func SleepWithContext(ctx context.Context, d time.Duration) {
case <-timer.C:
}
}
// EnvInt parses an environment variable into an int; returns (value, true) when set and valid.
func EnvInt(key string) (int, bool) {
val := os.Getenv(key)
if val == "" {
return 0, false
}
parsed, err := strconv.Atoi(val)
if err != nil {
return 0, false
}
return parsed, true
}
// DurationFromEnv parses an environment variable representing seconds into a duration, defaulting when unset/invalid.
func DurationFromEnv(key string, fallback time.Duration) time.Duration {
val := os.Getenv(key)
if val == "" {
return fallback
}
seconds, err := strconv.ParseInt(val, 10, 64)
if err != nil || seconds <= 0 {
return fallback
}
return time.Duration(seconds) * time.Second
}

View File

@@ -3,14 +3,14 @@ package vcenter
import (
"context"
"fmt"
"log"
"log/slog"
"net/url"
"os"
"path"
"strings"
"time"
"github.com/vmware/govmomi"
"github.com/vmware/govmomi/event"
"github.com/vmware/govmomi/find"
"github.com/vmware/govmomi/object"
"github.com/vmware/govmomi/view"
@@ -30,6 +30,7 @@ type Vcenter struct {
type VcenterLogin struct {
Username string
Password string
Insecure bool
}
type VmProperties struct {
@@ -37,6 +38,22 @@ type VmProperties struct {
ResourcePool string
}
var clientUserAgent = "vCTP"
// SetUserAgent customizes the User-Agent used when talking to vCenter.
func SetUserAgent(ua string) {
if strings.TrimSpace(ua) != "" {
clientUserAgent = ua
}
}
type HostLookup struct {
Cluster string
Datacenter string
}
type FolderLookup map[string]string
// New creates a new Vcenter with the given logger
func New(logger *slog.Logger, creds *VcenterLogin) *Vcenter {
@@ -51,17 +68,19 @@ func New(logger *slog.Logger, creds *VcenterLogin) *Vcenter {
}
func (v *Vcenter) Login(vUrl string) error {
var insecure bool
// TODO - fix this
insecureString := os.Getenv("VCENTER_INSECURE")
//username := os.Getenv("VCENTER_USERNAME")
//password := os.Getenv("VCENTER_PASSWORD")
if v == nil {
return fmt.Errorf("vcenter is nil")
}
if strings.TrimSpace(vUrl) == "" {
return fmt.Errorf("vcenter URL is empty")
}
if v.credentials == nil {
return fmt.Errorf("vcenter credentials are nil")
}
// Connect to vCenter
u, err := soap.ParseURL(vUrl)
if err != nil {
log.Fatalf("Error parsing vCenter URL: %s", err)
return fmt.Errorf("error parsing vCenter URL: %w", err)
}
v.Vurl = vUrl
@@ -74,15 +93,14 @@ func (v *Vcenter) Login(vUrl string) error {
}
*/
if insecureString == "true" {
insecure = true
}
c, err := govmomi.NewClient(v.ctx, u, insecure)
c, err := govmomi.NewClient(v.ctx, u, v.credentials.Insecure)
if err != nil {
v.Logger.Error("Unable to connect to vCenter", "error", err)
return fmt.Errorf("unable to connect to vCenter : %s", err)
}
if clientUserAgent != "" {
c.Client.UserAgent = clientUserAgent
}
//defer c.Logout(v.ctx)
@@ -93,22 +111,19 @@ func (v *Vcenter) Login(vUrl string) error {
return nil
}
func (v *Vcenter) Logout() error {
//v.Logger.Debug("vcenter logging out")
if v.ctx == nil {
func (v *Vcenter) Logout(ctx context.Context) error {
if ctx == nil {
ctx = v.ctx
}
if ctx == nil {
v.Logger.Warn("Nil context, unable to logout")
return nil
}
if v.client.Valid() {
//v.Logger.Debug("vcenter client is valid. Logging out")
return v.client.Logout(v.ctx)
} else {
v.Logger.Debug("vcenter client is not valid")
return nil
return v.client.Logout(ctx)
}
v.Logger.Debug("vcenter client is not valid")
return nil
}
func (v *Vcenter) GetAllVmReferences() ([]*object.VirtualMachine, error) {
@@ -154,6 +169,345 @@ func (v *Vcenter) GetAllVmReferences() ([]*object.VirtualMachine, error) {
return results, err
}
// GetAllVMsWithProps returns all VMs with the properties needed for snapshotting in a single property-collector call.
func (v *Vcenter) GetAllVMsWithProps() ([]mo.VirtualMachine, error) {
m := view.NewManager(v.client.Client)
cv, err := m.CreateContainerView(v.ctx, v.client.ServiceContent.RootFolder, []string{"VirtualMachine"}, true)
if err != nil {
return nil, fmt.Errorf("failed to create VM container view: %w", err)
}
defer cv.Destroy(v.ctx)
var vms []mo.VirtualMachine
props := []string{
"name",
"parent",
"config.uuid",
"config.createDate",
"config.hardware",
"config.managedBy",
"config.template",
"runtime.powerState",
"runtime.host",
"resourcePool",
}
if err := cv.Retrieve(v.ctx, []string{"VirtualMachine"}, props, &vms); err != nil {
return nil, fmt.Errorf("failed to retrieve VMs: %w", err)
}
return vms, nil
}
// FindVmDeletionEvents returns a map of MoRef (VmId) to the deletion event time within the given window.
func (v *Vcenter) FindVmDeletionEvents(ctx context.Context, begin, end time.Time) (map[string]time.Time, error) {
return v.findVmDeletionEvents(ctx, begin, end, nil)
}
// FindVmDeletionEventsForCandidates returns deletion event times for the provided VM IDs only.
func (v *Vcenter) FindVmDeletionEventsForCandidates(ctx context.Context, begin, end time.Time, candidates []string) (map[string]time.Time, error) {
if len(candidates) == 0 {
return map[string]time.Time{}, nil
}
candidateSet := make(map[string]struct{}, len(candidates))
for _, id := range candidates {
if id == "" {
continue
}
candidateSet[id] = struct{}{}
}
if len(candidateSet) == 0 {
return map[string]time.Time{}, nil
}
return v.findVmDeletionEvents(ctx, begin, end, candidateSet)
}
func (v *Vcenter) findVmDeletionEvents(ctx context.Context, begin, end time.Time, candidateSet map[string]struct{}) (map[string]time.Time, error) {
result := make(map[string]time.Time)
if v.client == nil || !v.client.Valid() {
return result, fmt.Errorf("vcenter client is not valid")
}
// vCenter events are stored in UTC; normalize the query window.
beginUTC := begin.UTC()
endUTC := end.UTC()
mgr := event.NewManager(v.client.Client)
type deletionHit struct {
ts time.Time
priority int
}
const (
deletionPriorityRemoved = iota
deletionPriorityVmEvent
deletionPriorityTask
)
hits := make(map[string]deletionHit)
foundCandidates := 0
recordDeletion := func(vmID string, ts time.Time, priority int) {
if vmID == "" {
return
}
if candidateSet != nil {
if _, ok := candidateSet[vmID]; !ok {
return
}
}
if prev, ok := hits[vmID]; !ok {
hits[vmID] = deletionHit{ts: ts, priority: priority}
if candidateSet != nil {
foundCandidates++
}
} else if priority < prev.priority || (priority == prev.priority && ts.Before(prev.ts)) {
hits[vmID] = deletionHit{ts: ts, priority: priority}
}
}
isDeletionMessage := func(msg string) bool {
msg = strings.ToLower(msg)
return strings.Contains(msg, "destroy") ||
strings.Contains(msg, "deleted") ||
strings.Contains(msg, "unregister") ||
strings.Contains(msg, "removed from inventory")
}
isVmDeletionTask := func(info types.TaskInfo, msg string) bool {
id := strings.ToLower(strings.TrimSpace(info.DescriptionId))
if id != "" {
if strings.Contains(id, "virtualmachine") &&
(strings.Contains(id, "destroy") || strings.Contains(id, "delete") || strings.Contains(id, "unregister")) {
return true
}
}
name := strings.ToLower(strings.TrimSpace(info.Name))
if name != "" {
if (strings.Contains(name, "destroy") || strings.Contains(name, "delete") || strings.Contains(name, "unregister")) &&
(strings.Contains(name, "virtualmachine") || strings.Contains(name, "virtual machine")) {
return true
}
}
if msg != "" && isDeletionMessage(msg) {
return true
}
return false
}
processEvents := func(evts []types.BaseEvent) {
for _, ev := range evts {
switch e := ev.(type) {
case *types.VmRemovedEvent:
if e.Vm != nil {
vmID := e.Vm.Vm.Value
recordDeletion(vmID, e.CreatedTime, deletionPriorityRemoved)
}
case *types.TaskEvent:
// Fallback for destroy task events.
if e.Info.Entity != nil {
vmID := e.Info.Entity.Value
if vmID != "" && isVmDeletionTask(e.Info, e.GetEvent().FullFormattedMessage) {
recordDeletion(vmID, e.CreatedTime, deletionPriorityTask)
}
}
case *types.VmEvent:
if e.Vm != nil {
vmID := e.Vm.Vm.Value
if vmID != "" && isDeletionMessage(e.GetEvent().FullFormattedMessage) {
recordDeletion(vmID, e.CreatedTime, deletionPriorityVmEvent)
}
}
}
}
}
const (
eventPageSize = int32(1000)
maxEventPages = 25
)
readCollector := func(label string, collector *event.HistoryCollector) error {
pageCount := 0
for {
events, err := collector.ReadNextEvents(ctx, eventPageSize)
if err != nil {
return err
}
if len(events) == 0 {
break
}
processEvents(events)
if candidateSet != nil && foundCandidates >= len(candidateSet) {
break
}
pageCount++
if pageCount >= maxEventPages {
if v.Logger != nil {
v.Logger.Warn("vcenter deletion events truncated", "vcenter", v.Vurl, "label", label, "pages", pageCount, "page_size", eventPageSize, "window_start_utc", beginUTC, "window_end_utc", endUTC)
}
break
}
if len(events) < int(eventPageSize) {
break
}
}
return nil
}
// First attempt: specific deletion event types.
disableFullMessage := false
filter := types.EventFilterSpec{
Time: &types.EventFilterSpecByTime{
BeginTime: &beginUTC,
EndTime: &endUTC,
},
DisableFullMessage: &disableFullMessage,
EventTypeId: []string{
"VmRemovedEvent",
"TaskEvent",
},
}
collector, err := mgr.CreateCollectorForEvents(ctx, filter)
if err != nil {
return result, fmt.Errorf("failed to create event collector: %w", err)
}
defer collector.Destroy(ctx)
if err := readCollector("primary", collector); err != nil {
return result, fmt.Errorf("failed to read events: %w", err)
}
// If nothing found, widen the filter to all event types in the window as a fallback.
if len(hits) == 0 {
fallbackFilter := types.EventFilterSpec{
Time: &types.EventFilterSpecByTime{
BeginTime: &beginUTC,
EndTime: &endUTC,
},
DisableFullMessage: &disableFullMessage,
}
fc, err := mgr.CreateCollectorForEvents(ctx, fallbackFilter)
if err == nil {
defer fc.Destroy(ctx)
if readErr := readCollector("fallback", fc); readErr != nil && v.Logger != nil {
v.Logger.Warn("vcenter fallback event read failed", "vcenter", v.Vurl, "error", readErr)
}
}
}
for vmID, hit := range hits {
result[vmID] = hit.ts
}
return result, nil
}
func (v *Vcenter) BuildHostLookup() (map[string]HostLookup, error) {
finder := find.NewFinder(v.client.Client, true)
datacenters, err := finder.DatacenterList(v.ctx, "*")
if err != nil {
return nil, fmt.Errorf("failed to list datacenters: %w", err)
}
lookup := make(map[string]HostLookup)
clusterCache := make(map[string]string)
for _, dc := range datacenters {
finder.SetDatacenter(dc)
hosts, err := finder.HostSystemList(v.ctx, "*")
if err != nil {
v.Logger.Warn("failed to list hosts for datacenter", "datacenter", dc.Name(), "error", err)
continue
}
for _, host := range hosts {
ref := host.Reference()
var moHost mo.HostSystem
if err := v.client.RetrieveOne(v.ctx, ref, []string{"parent"}, &moHost); err != nil {
v.Logger.Warn("failed to retrieve host info", "host", host.Name(), "error", err)
continue
}
clusterName := ""
if moHost.Parent != nil {
if cached, ok := clusterCache[moHost.Parent.Value]; ok {
clusterName = cached
} else {
var moCompute mo.ComputeResource
if err := v.client.RetrieveOne(v.ctx, *moHost.Parent, []string{"name"}, &moCompute); err == nil {
clusterName = moCompute.Name
clusterCache[moHost.Parent.Value] = clusterName
}
}
}
lookup[ref.Value] = HostLookup{
Cluster: clusterName,
Datacenter: dc.Name(),
}
}
}
return lookup, nil
}
func (v *Vcenter) BuildFolderPathLookup() (FolderLookup, error) {
m := view.NewManager(v.client.Client)
folders, err := m.CreateContainerView(v.ctx, v.client.ServiceContent.RootFolder, []string{"Folder"}, true)
if err != nil {
return nil, err
}
defer folders.Destroy(v.ctx)
var results []mo.Folder
if err := folders.Retrieve(v.ctx, []string{"Folder"}, []string{"name", "parent"}, &results); err != nil {
return nil, err
}
nameByID := make(map[string]string, len(results))
parentByID := make(map[string]*types.ManagedObjectReference, len(results))
for _, folder := range results {
nameByID[folder.Reference().Value] = folder.Name
parentByID[folder.Reference().Value] = folder.Parent
}
paths := make(FolderLookup, len(results))
var buildPath func(id string) string
buildPath = func(id string) string {
if pathValue, ok := paths[id]; ok {
return pathValue
}
name, ok := nameByID[id]
if !ok {
return ""
}
parent := parentByID[id]
if parent == nil || parent.Type == "Datacenter" {
paths[id] = path.Join("/", name)
return paths[id]
}
if parent.Type != "Folder" {
paths[id] = path.Join("/", name)
return paths[id]
}
parentPath := buildPath(parent.Value)
if parentPath == "" {
paths[id] = path.Join("/", name)
return paths[id]
}
paths[id] = path.Join(parentPath, name)
return paths[id]
}
for id := range nameByID {
_ = buildPath(id)
}
return paths, nil
}
func (v *Vcenter) GetVMFolderPathFromLookup(vm mo.VirtualMachine, lookup FolderLookup) (string, bool) {
if vm.Parent == nil || lookup == nil {
return "", false
}
pathValue, ok := lookup[vm.Parent.Value]
return pathValue, ok
}
func (v *Vcenter) ConvertObjToMoVM(vmObj *object.VirtualMachine) (*mo.VirtualMachine, error) {
// Use the InventoryPath to extract the datacenter name and VM path
inventoryPath := vmObj.InventoryPath
@@ -262,16 +616,24 @@ func (v *Vcenter) GetHostSystemObject(hostRef types.ManagedObjectReference) (*mo
// Function to find the cluster or compute resource from a host reference
func (v *Vcenter) GetClusterFromHost(hostRef *types.ManagedObjectReference) (string, error) {
if hostRef == nil {
v.Logger.Warn("nil hostRef passed to GetClusterFromHost")
return "", nil
}
// Get the host object
host, err := v.GetHostSystemObject(*hostRef)
if err != nil {
v.Logger.Error("cant get host", "error", err)
return "", err
}
if host == nil {
v.Logger.Warn("host lookup returned nil", "host_ref", hostRef)
return "", nil
}
v.Logger.Debug("host parent", "parent", host.Parent)
if host.Parent.Type == "ClusterComputeResource" {
if host.Parent != nil && host.Parent.Type == "ClusterComputeResource" {
// Retrieve properties of the compute resource
var moCompute mo.ComputeResource
err = v.client.RetrieveOne(v.ctx, *host.Parent, nil, &moCompute)
@@ -478,6 +840,27 @@ func (v *Vcenter) GetVmResourcePool(vm mo.VirtualMachine) (string, error) {
return resourcePool, nil
}
// BuildResourcePoolLookup creates a cache of resource pool MoRef -> name for fast lookups.
func (v *Vcenter) BuildResourcePoolLookup() (map[string]string, error) {
m := view.NewManager(v.client.Client)
cv, err := m.CreateContainerView(v.ctx, v.client.ServiceContent.RootFolder, []string{"ResourcePool"}, true)
if err != nil {
return nil, fmt.Errorf("failed to create resource pool view: %w", err)
}
defer cv.Destroy(v.ctx)
var pools []mo.ResourcePool
if err := cv.Retrieve(v.ctx, []string{"ResourcePool"}, []string{"name"}, &pools); err != nil {
return nil, fmt.Errorf("failed to retrieve resource pools: %w", err)
}
lookup := make(map[string]string, len(pools))
for _, pool := range pools {
lookup[pool.Reference().Value] = pool.Name
}
return lookup, nil
}
// Helper function to retrieve the full folder path for the VM
func (v *Vcenter) GetVMFolderPath(vm mo.VirtualMachine) (string, error) {
//finder := find.NewFinder(v.client.Client, true)
@@ -494,7 +877,8 @@ func (v *Vcenter) GetVMFolderPath(vm mo.VirtualMachine) (string, error) {
folderPath := ""
//v.Logger.Debug("parent is", "parent", parentRef)
for parentRef.Type != "Datacenter" {
maxHops := 128
for parentRef != nil && parentRef.Type != "Datacenter" && maxHops > 0 {
// Retrieve the parent object
//parentObj, err := finder.ObjectReference(v.ctx, *parentRef)
//if err != nil {
@@ -522,6 +906,11 @@ func (v *Vcenter) GetVMFolderPath(vm mo.VirtualMachine) (string, error) {
return "", fmt.Errorf("unexpected parent type: %s", parentObj.Reference().Type)
}
//break
maxHops--
}
if parentRef == nil || maxHops == 0 {
return "", fmt.Errorf("folder traversal terminated early for VM %s", vm.Name)
}
return folderPath, nil

View File

@@ -65,10 +65,9 @@ func ToLevel(level string) Level {
}
}
// GetLevel returns the log level from the environment variable.
// GetLevel returns the default log level.
func GetLevel() Level {
level := os.Getenv("LOG_LEVEL")
return ToLevel(level)
return LevelInfo
}
// Output represents the log output.
@@ -93,8 +92,7 @@ func ToOutput(output string) Output {
}
}
// GetOutput returns the log output from the environment variable.
// GetOutput returns the default log output.
func GetOutput() Output {
output := os.Getenv("LOG_OUTPUT")
return ToOutput(output)
return OutputText
}

296
main.go
View File

@@ -2,8 +2,8 @@ package main
import (
"context"
"flag"
"fmt"
"log/slog"
"os"
"runtime"
"strings"
@@ -18,8 +18,10 @@ import (
"vctp/server"
"vctp/server/router"
"crypto/sha256"
"log/slog"
"github.com/go-co-op/gocron/v2"
"github.com/joho/godotenv"
)
var (
@@ -30,26 +32,37 @@ var (
cronInvFrequency time.Duration
cronSnapshotFrequency time.Duration
cronAggregateFrequency time.Duration
encryptionKey = []byte("5L1l3B5KvwOCzUHMAlCgsgUTRAYMfSpa")
)
func main() {
// Load data from environment file
envFilename := utils.GetFilePath(".env")
err := godotenv.Load(envFilename)
if err != nil {
panic("Error loading .env file")
}
const fallbackEncryptionKey = "5L1l3B5KvwOCzUHMAlCgsgUTRAYMfSpa"
logger := log.New(
log.GetLevel(),
log.GetOutput(),
)
func main() {
settingsPath := flag.String("settings", "/etc/dtms/vctp.yml", "Path to settings YAML")
runInventory := flag.Bool("run-inventory", false, "Run a single inventory snapshot across all configured vCenters and exit")
flag.Parse()
bootstrapLogger := log.New(log.LevelInfo, log.OutputText)
ctx, cancel := context.WithCancel(context.Background())
// Load settings from yaml
s := settings.New(bootstrapLogger, *settingsPath)
err := s.ReadYMLSettings()
if err != nil {
bootstrapLogger.Error("failed to open yaml settings file", "error", err, "filename", *settingsPath)
os.Exit(1)
}
logger := log.New(
log.ToLevel(strings.ToLower(strings.TrimSpace(s.Values.Settings.LogLevel))),
log.ToOutput(strings.ToLower(strings.TrimSpace(s.Values.Settings.LogOutput))),
)
s.Logger = logger
logger.Info("vCTP starting", "build_time", buildTime, "sha1_version", sha1ver, "go_version", runtime.Version(), "settings_file", *settingsPath)
// Configure database
dbDriver := os.Getenv("DB_DRIVER")
dbDriver := strings.TrimSpace(s.Values.Settings.DatabaseDriver)
if dbDriver == "" {
dbDriver = "sqlite"
}
@@ -57,12 +70,12 @@ func main() {
if normalizedDriver == "" || normalizedDriver == "sqlite3" {
normalizedDriver = "sqlite"
}
dbURL := os.Getenv("DB_URL")
dbURL := strings.TrimSpace(s.Values.Settings.DatabaseURL)
if dbURL == "" && normalizedDriver == "sqlite" {
dbURL = utils.GetFilePath("db.sqlite3")
}
database, err := db.New(logger, db.Config{Driver: dbDriver, DSN: dbURL})
database, err := db.New(logger, db.Config{Driver: normalizedDriver, DSN: dbURL})
if err != nil {
logger.Error("Failed to create database", "error", err)
os.Exit(1)
@@ -70,56 +83,36 @@ func main() {
defer database.Close()
//defer database.DB().Close()
if err = db.Migrate(database, dbDriver); err != nil {
if err = db.Migrate(database, normalizedDriver); err != nil {
logger.Error("failed to migrate database", "error", err)
os.Exit(1)
}
// Load settings from yaml
settingsFile := os.Getenv("SETTINGS_FILE")
if settingsFile == "" {
settingsFile = "settings.yaml"
}
// TODO - how to pass this to the other packages that will need this info?
s := settings.New(logger, settingsFile)
err = s.ReadYMLSettings()
//s, err := settings.ReadYMLSettings(logger, settingsFile)
if err != nil {
logger.Error("failed to open yaml settings file", "error", err, "filename", settingsFile)
//os.Exit(1)
} else {
logger.Debug("Loaded yaml settings", "contents", s)
}
// Determine bind IP
bindIP := os.Getenv("BIND_IP")
bindIP := strings.TrimSpace(s.Values.Settings.BindIP)
if bindIP == "" {
bindIP = utils.GetOutboundIP().String()
}
// Determine bind port
bindPort := os.Getenv("BIND_PORT")
if bindPort == "" {
bindPort = "9443"
bindPort := s.Values.Settings.BindPort
if bindPort == 0 {
bindPort = 9443
}
bindAddress := fmt.Sprint(bindIP, ":", bindPort)
//logger.Info("Will listen on address", "ip", bindIP, "port", bindPort)
// Determine bind disable TLS
bindDisableTlsEnv := os.Getenv("BIND_DISABLE_TLS")
if bindDisableTlsEnv == "true" {
bindDisableTls = true
}
bindDisableTls = s.Values.Settings.BindDisableTLS
// Get file names for TLS cert/key
tlsCertFilename := os.Getenv("TLS_CERT_FILE")
tlsCertFilename := strings.TrimSpace(s.Values.Settings.TLSCertFilename)
if tlsCertFilename != "" {
tlsCertFilename = utils.GetFilePath(tlsCertFilename)
} else {
tlsCertFilename = "./cert.pem"
}
tlsKeyFilename := os.Getenv("TLS_KEY_FILE")
tlsKeyFilename := strings.TrimSpace(s.Values.Settings.TLSKeyFilename)
if tlsKeyFilename != "" {
tlsKeyFilename = utils.GetFilePath(tlsKeyFilename)
} else {
@@ -132,9 +125,10 @@ func main() {
utils.GenerateCerts(tlsCertFilename, tlsKeyFilename)
}
// Load vcenter credentials from .env
a := secrets.New(logger, encryptionKey)
vcEp := os.Getenv("VCENTER_PASSWORD")
// Load vcenter credentials from serttings, decrypt if required
encKey := deriveEncryptionKey(logger)
a := secrets.New(logger, encKey)
vcEp := strings.TrimSpace(s.Values.Settings.VcenterPassword)
if len(vcEp) == 0 {
logger.Error("No vcenter password configured")
os.Exit(1)
@@ -143,14 +137,34 @@ func main() {
if err != nil {
logger.Error("failed to decrypt vcenter credentials. Assuming un-encrypted", "error", err)
vcPass = []byte(vcEp)
//os.Exit(1)
if cipherText, encErr := a.Encrypt([]byte(vcEp)); encErr != nil {
logger.Warn("failed to encrypt vcenter credentials", "error", encErr)
} else {
s.Values.Settings.VcenterPassword = cipherText
if err := s.WriteYMLSettings(); err != nil {
logger.Warn("failed to update settings with encrypted vcenter password", "error", err)
} else {
logger.Info("encrypted vcenter password stored in settings file")
}
}
}
creds := vcenter.VcenterLogin{
//insecureString := os.Getenv("VCENTER_INSECURE")
Username: os.Getenv("VCENTER_USERNAME"),
Username: strings.TrimSpace(s.Values.Settings.VcenterUsername),
Password: string(vcPass),
Insecure: s.Values.Settings.VcenterInsecure,
}
if creds.Username == "" {
logger.Error("No vcenter username configured")
os.Exit(1)
}
// Set a recognizable User-Agent for vCenter sessions.
ua := "vCTP"
if sha1ver != "" {
ua = fmt.Sprintf("vCTP/%s", sha1ver)
}
vcenter.SetUserAgent(ua)
// Prepare the task scheduler
c, err := gocron.NewScheduler()
@@ -161,98 +175,32 @@ func main() {
// Pass useful information to the cron jobs
ct := &tasks.CronTask{
Logger: logger,
Database: database,
Settings: s,
VcCreds: &creds,
Logger: logger,
Database: database,
Settings: s,
VcCreds: &creds,
FirstHourlySnapshotCheck: true,
}
cronFrequencyString := os.Getenv("VCENTER_EVENT_POLLING_SECONDS")
if cronFrequencyString != "" {
cronFrequency, err = time.ParseDuration(cronFrequencyString)
if err != nil {
slog.Error("Can't convert VCENTER_EVENT_POLLING_SECONDS value to time duration. Defaulting to 60s", "value", cronFrequencyString, "error", err)
cronFrequency = time.Second * 60
}
} else {
cronFrequency = time.Second * 60
// One-shot mode: run a single inventory snapshot across all configured vCenters and exit.
if *runInventory {
logger.Info("Running one-shot inventory snapshot across all vCenters")
ct.RunVcenterSnapshotHourly(ctx, logger, true)
logger.Info("One-shot inventory snapshot complete; exiting")
return
}
logger.Debug("Setting VM event polling cronjob frequency to", "frequency", cronFrequency)
cronInventoryFrequencyString := os.Getenv("VCENTER_INVENTORY_POLLING_SECONDS")
if cronInventoryFrequencyString != "" {
cronInvFrequency, err = time.ParseDuration(cronInventoryFrequencyString)
if err != nil {
slog.Error("Can't convert VCENTER_INVENTORY_POLLING_SECONDS value to time duration. Defaulting to 7200", "value", cronInventoryFrequencyString, "error", err)
cronInvFrequency = time.Second * 7200
}
} else {
cronInvFrequency = time.Second * 7200
}
logger.Debug("Setting VM inventory polling cronjob frequency to", "frequency", cronInvFrequency)
cronSnapshotFrequencyString := os.Getenv("VCENTER_INVENTORY_SNAPSHOT_SECONDS")
if cronSnapshotFrequencyString != "" {
cronSnapshotFrequency, err = time.ParseDuration(cronSnapshotFrequencyString)
if err != nil {
slog.Error("Can't convert VCENTER_INVENTORY_SNAPSHOT_SECONDS value to time duration. Defaulting to 3600", "value", cronSnapshotFrequencyString, "error", err)
cronSnapshotFrequency = time.Hour
}
} else {
cronSnapshotFrequency = time.Hour
}
cronSnapshotFrequency = durationFromSeconds(s.Values.Settings.VcenterInventorySnapshotSeconds, 3600)
logger.Debug("Setting VM inventory snapshot cronjob frequency to", "frequency", cronSnapshotFrequency)
cronAggregateFrequencyString := os.Getenv("VCENTER_INVENTORY_AGGREGATE_SECONDS")
if cronAggregateFrequencyString != "" {
cronAggregateFrequency, err = time.ParseDuration(cronAggregateFrequencyString)
if err != nil {
slog.Error("Can't convert VCENTER_INVENTORY_AGGREGATE_SECONDS value to time duration. Defaulting to 86400", "value", cronAggregateFrequencyString, "error", err)
cronAggregateFrequency = time.Hour * 24
}
} else {
cronAggregateFrequency = time.Hour * 24
}
logger.Debug("Setting VM inventory aggregation cronjob frequency to", "frequency", cronAggregateFrequency)
cronAggregateFrequency = durationFromSeconds(s.Values.Settings.VcenterInventoryAggregateSeconds, 86400)
logger.Debug("Setting VM inventory daily aggregation cronjob frequency to", "frequency", cronAggregateFrequency)
// start background processing for events stored in events table
startsAt := time.Now().Add(time.Second * 10)
job, err := c.NewJob(
gocron.DurationJob(cronFrequency),
gocron.NewTask(func() {
ct.RunVmCheck(ctx, logger)
}), gocron.WithSingletonMode(gocron.LimitModeReschedule),
gocron.WithStartAt(gocron.WithStartDateTime(startsAt)),
)
if err != nil {
logger.Error("failed to start event processing cron job", "error", err)
os.Exit(1)
}
logger.Debug("Created event processing cron job", "job", job.ID(), "starting_at", startsAt)
// start background checks of vcenter inventory
startsAt2 := time.Now().Add(cronInvFrequency)
job2, err := c.NewJob(
gocron.DurationJob(cronInvFrequency),
gocron.NewTask(func() {
ct.RunVcenterPoll(ctx, logger)
}), gocron.WithSingletonMode(gocron.LimitModeReschedule),
gocron.WithStartAt(gocron.WithStartDateTime(startsAt2)),
)
if err != nil {
logger.Error("failed to start vcenter inventory cron job", "error", err)
os.Exit(1)
}
logger.Debug("Created vcenter inventory cron job", "job", job2.ID(), "starting_at", startsAt2)
startsAt3 := time.Now().Add(cronSnapshotFrequency)
if cronSnapshotFrequency == time.Hour {
startsAt3 = time.Now().Truncate(time.Hour).Add(time.Hour)
}
startsAt3 := alignStart(time.Now(), cronSnapshotFrequency)
job3, err := c.NewJob(
gocron.DurationJob(cronSnapshotFrequency),
gocron.NewTask(func() {
ct.RunVcenterSnapshotHourly(ctx, logger)
ct.RunVcenterSnapshotHourly(ctx, logger, false)
}), gocron.WithSingletonMode(gocron.LimitModeReschedule),
gocron.WithStartAt(gocron.WithStartDateTime(startsAt3)),
)
@@ -265,7 +213,7 @@ func main() {
startsAt4 := time.Now().Add(cronAggregateFrequency)
if cronAggregateFrequency == time.Hour*24 {
now := time.Now()
startsAt4 = time.Date(now.Year(), now.Month(), now.Day()+1, 0, 0, 0, 0, now.Location())
startsAt4 = time.Date(now.Year(), now.Month(), now.Day()+1, 0, 10, 0, 0, now.Location())
}
job4, err := c.NewJob(
gocron.DurationJob(cronAggregateFrequency),
@@ -280,8 +228,13 @@ func main() {
}
logger.Debug("Created vcenter inventory aggregation cron job", "job", job4.ID(), "starting_at", startsAt4)
monthlyCron := strings.TrimSpace(s.Values.Settings.MonthlyAggregationCron)
if monthlyCron == "" {
monthlyCron = "10 3 1 * *"
}
logger.Debug("Setting monthly aggregation cron schedule", "cron", monthlyCron)
job5, err := c.NewJob(
gocron.CronJob("0 0 1 * *", false),
gocron.CronJob(monthlyCron, false),
gocron.NewTask(func() {
ct.RunVcenterMonthlyAggregate(ctx, logger)
}), gocron.WithSingletonMode(gocron.LimitModeReschedule),
@@ -292,10 +245,22 @@ func main() {
}
logger.Debug("Created vcenter monthly aggregation cron job", "job", job5.ID())
snapshotCleanupCron := strings.TrimSpace(s.Values.Settings.SnapshotCleanupCron)
if snapshotCleanupCron == "" {
snapshotCleanupCron = "30 2 * * *"
}
job6, err := c.NewJob(
gocron.CronJob("0 30 2 * *", false),
gocron.CronJob(snapshotCleanupCron, false),
gocron.NewTask(func() {
ct.RunSnapshotCleanup(ctx, logger)
if strings.EqualFold(s.Values.Settings.DatabaseDriver, "sqlite") {
logger.Info("Performing sqlite VACUUM after snapshot cleanup")
if _, err := ct.Database.DB().ExecContext(ctx, "VACUUM"); err != nil {
logger.Warn("VACUUM failed after snapshot cleanup", "error", err)
} else {
logger.Debug("VACUUM completed after snapshot cleanup")
}
}
}), gocron.WithSingletonMode(gocron.LimitModeReschedule),
)
if err != nil {
@@ -304,6 +269,23 @@ func main() {
}
logger.Debug("Created snapshot cleanup cron job", "job", job6.ID())
// Retry failed hourly snapshots
retrySeconds := s.Values.Settings.HourlySnapshotRetrySeconds
if retrySeconds <= 0 {
retrySeconds = 300
}
job7, err := c.NewJob(
gocron.DurationJob(time.Duration(retrySeconds)*time.Second),
gocron.NewTask(func() {
ct.RunHourlySnapshotRetry(ctx, logger)
}), gocron.WithSingletonMode(gocron.LimitModeReschedule),
)
if err != nil {
logger.Error("failed to start hourly snapshot retry cron job", "error", err)
os.Exit(1)
}
logger.Debug("Created hourly snapshot retry cron job", "job", job7.ID(), "interval_seconds", retrySeconds)
// start cron scheduler
c.Start()
@@ -325,3 +307,45 @@ func main() {
os.Exit(0)
}
// alignStart snaps the first run to a sensible boundary (hour or 15-minute block) when possible.
func alignStart(now time.Time, freq time.Duration) time.Time {
if freq == time.Hour {
return now.Truncate(time.Hour).Add(time.Hour)
}
quarter := 15 * time.Minute
if freq%quarter == 0 {
return now.Truncate(quarter).Add(quarter)
}
return now.Add(freq)
}
func durationFromSeconds(value int, fallback int) time.Duration {
if value <= 0 {
return time.Second * time.Duration(fallback)
}
return time.Second * time.Duration(value)
}
func deriveEncryptionKey(logger *slog.Logger) []byte {
if runtime.GOOS == "linux" {
if data, err := os.ReadFile("/sys/class/dmi/id/product_uuid"); err == nil {
src := strings.TrimSpace(string(data))
if src != "" {
sum := sha256.Sum256([]byte(src))
logger.Debug("derived encryption key from BIOS UUID")
return sum[:]
}
}
if data, err := os.ReadFile("/etc/machine-id"); err == nil {
src := strings.TrimSpace(string(data))
if src != "" {
sum := sha256.Sum256([]byte(src))
logger.Debug("derived encryption key from machine-id")
return sum[:]
}
}
}
logger.Warn("using fallback encryption key; hardware UUID not available")
return []byte(fallbackEncryptionKey)
}

View File

@@ -10,10 +10,14 @@ buildtime=$(date +%Y-%m-%dT%T%z)
#Extract the version from yml
package_version=$(grep 'version:' "$package_name.yml" | awk '{print $2}' | tr -d '"' | sed 's/^v//')
#platforms=("linux/amd64" "darwin/amd64")
host_os=$(uname -s | tr '[:upper:]' '[:lower:]')
host_arch=$(uname -m)
platforms=("linux/amd64")
if [[ "$host_os" == "darwin" && ( "$host_arch" == "x86_64" || "$host_arch" == "amd64" || "$host_arch" == "arm64" ) ]]; then
platforms=("darwin/amd64")
fi
echo Building::
echo Building: $package_name
echo - Version $package_version
echo - Commit $commit
echo - Build Time $buildtime
@@ -30,7 +34,7 @@ do
starttime=$(TZ=Australia/Sydney date +%Y-%m-%dT%T%z)
echo "build commences at $starttime"
env GOOS=$GOOS GOARCH=$GOARCH go build -trimpath -ldflags="-X main.version=$package_version -X main.commit=$commit -X main.buildTime=$buildtime" -o build/$output_name $package
env GOOS=$GOOS GOARCH=$GOARCH go build -trimpath -ldflags="-X main.version=$package_version -X main.sha1ver=$commit -X main.buildTime=$buildtime" -o build/$output_name $package
if [ $? -ne 0 ]; then
echo 'An error has occurred! Aborting the script execution...'
exit 1
@@ -40,6 +44,4 @@ do
#sha256sum build/${output_name}.gz > build/${output_name}_checksum.txt
done
nfpm package --config $package_name.yml --packager rpm --target build/
ls -lah build

View File

@@ -4,7 +4,7 @@ set -euo pipefail
# Usage: ./update-swagger-ui.sh [version]
# Example: ./update-swagger-ui.sh v5.17.14
# If no version is provided, defaults below is used.
VERSION="${1:-v5.29.5}"
VERSION="${1:-v5.31.0}"
TARGET_DIR="server/router/swagger-ui-dist"
TARBALL_URL="https://github.com/swagger-api/swagger-ui/archive/refs/tags/${VERSION}.tar.gz"
@@ -41,11 +41,19 @@ fi
echo ">> Patching swagger-initializer.js to point at /swagger.json"
sed -i -E \
if sed --version >/dev/null 2>&1; then
SED_INPLACE=(-i)
else
SED_INPLACE=(-i '')
fi
append_validator=$'/url:[[:space:]]*"[^"]*swagger\\.json"[[:space:]]*,?$/a\\\n validatorUrl: null,'
sed "${SED_INPLACE[@]}" -E \
-e 's#configUrl:[[:space:]]*["'\''"][^"'\''"]*["'\''"]#url: "/swagger.json"#' \
-e 's#url:[[:space:]]*["'\''"][^"'\''"]*["'\''"]#url: "/swagger.json"#' \
-e 's#urls:[[:space:]]*\[[^]]*\]#url: "/swagger.json"#' \
-e '/url:[[:space:]]*"[^\"]*swagger\.json"[[:space:]]*,?$/a\ validatorUrl: null,' \
-e "$append_validator" \
"$INDEX"
echo ">> Done. Files are in ${TARGET_DIR}"
echo ">> Done. Files are in ${TARGET_DIR}"

View File

@@ -0,0 +1,205 @@
package handler
import (
"context"
"database/sql"
"encoding/json"
"fmt"
"net/http"
"strings"
"time"
"vctp/db"
"vctp/server/models"
)
// DailyCreationDiagnostics returns missing CreationTime diagnostics for a daily summary table.
// @Summary Daily summary CreationTime diagnostics
// @Description Returns counts of daily summary rows missing CreationTime and sample rows for the given date.
// @Tags diagnostics
// @Produce json
// @Param date query string true "Daily date (YYYY-MM-DD)"
// @Success 200 {object} models.DailyCreationDiagnosticsResponse "Diagnostics result"
// @Failure 400 {object} models.ErrorResponse "Invalid request"
// @Failure 404 {object} models.ErrorResponse "Summary not found"
// @Failure 500 {object} models.ErrorResponse "Server error"
// @Router /api/diagnostics/daily-creation [get]
func (h *Handler) DailyCreationDiagnostics(w http.ResponseWriter, r *http.Request) {
dateValue := strings.TrimSpace(r.URL.Query().Get("date"))
if dateValue == "" {
writeJSONError(w, http.StatusBadRequest, "date is required")
return
}
loc := time.Now().Location()
parsed, err := time.ParseInLocation("2006-01-02", dateValue, loc)
if err != nil {
writeJSONError(w, http.StatusBadRequest, "date must be YYYY-MM-DD")
return
}
tableName := fmt.Sprintf("inventory_daily_summary_%s", parsed.Format("20060102"))
if _, err := db.SafeTableName(tableName); err != nil {
writeJSONError(w, http.StatusBadRequest, "invalid summary table name")
return
}
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
dbConn := h.Database.DB()
if !db.TableExists(ctx, dbConn, tableName) {
writeJSONError(w, http.StatusNotFound, "daily summary table not found")
return
}
var totalRows int64
countQuery := fmt.Sprintf(`SELECT COUNT(*) FROM %s`, tableName)
if err := dbConn.GetContext(ctx, &totalRows, countQuery); err != nil {
h.Logger.Warn("daily creation diagnostics count failed", "table", tableName, "error", err)
writeJSONError(w, http.StatusInternalServerError, "failed to read summary rows")
return
}
var missingTotal int64
missingQuery := fmt.Sprintf(`SELECT COUNT(*) FROM %s WHERE "CreationTime" IS NULL OR "CreationTime" = 0`, tableName)
if err := dbConn.GetContext(ctx, &missingTotal, missingQuery); err != nil {
h.Logger.Warn("daily creation diagnostics missing count failed", "table", tableName, "error", err)
writeJSONError(w, http.StatusInternalServerError, "failed to read missing creation rows")
return
}
var avgIsPresentLtOne int64
avgPresenceQuery := fmt.Sprintf(`SELECT COUNT(*) FROM %s WHERE "AvgIsPresent" IS NOT NULL AND "AvgIsPresent" < 0.999999`, tableName)
if err := dbConn.GetContext(ctx, &avgIsPresentLtOne, avgPresenceQuery); err != nil {
h.Logger.Warn("daily creation diagnostics avg-is-present count failed", "table", tableName, "error", err)
writeJSONError(w, http.StatusInternalServerError, "failed to read avg is present rows")
return
}
var missingPartialCount int64
missingPartialQuery := fmt.Sprintf(`SELECT COUNT(*) FROM %s WHERE ("CreationTime" IS NULL OR "CreationTime" = 0) AND "AvgIsPresent" IS NOT NULL AND "AvgIsPresent" < 0.999999`, tableName)
if err := dbConn.GetContext(ctx, &missingPartialCount, missingPartialQuery); err != nil {
h.Logger.Warn("daily creation diagnostics missing partial count failed", "table", tableName, "error", err)
writeJSONError(w, http.StatusInternalServerError, "failed to read missing partial rows")
return
}
missingPct := 0.0
if totalRows > 0 {
missingPct = float64(missingTotal) * 100 / float64(totalRows)
}
byVcenter := make([]models.DailyCreationMissingByVcenter, 0)
byVcenterQuery := fmt.Sprintf(`
SELECT "Vcenter", COUNT(*) AS missing_count
FROM %s
WHERE "CreationTime" IS NULL OR "CreationTime" = 0
GROUP BY "Vcenter"
ORDER BY missing_count DESC
`, tableName)
if rows, err := dbConn.QueryxContext(ctx, byVcenterQuery); err != nil {
h.Logger.Warn("daily creation diagnostics by-vcenter failed", "table", tableName, "error", err)
} else {
for rows.Next() {
var vcenter string
var count int64
if err := rows.Scan(&vcenter, &count); err != nil {
continue
}
byVcenter = append(byVcenter, models.DailyCreationMissingByVcenter{
Vcenter: vcenter,
MissingCount: count,
})
}
rows.Close()
}
const sampleLimit = 10
samples := make([]models.DailyCreationMissingSample, 0, sampleLimit)
sampleQuery := fmt.Sprintf(`
SELECT "Vcenter","VmId","VmUuid","Name","SamplesPresent","AvgIsPresent","SnapshotTime"
FROM %s
WHERE "CreationTime" IS NULL OR "CreationTime" = 0
ORDER BY "SamplesPresent" DESC
LIMIT %d
`, tableName, sampleLimit)
if rows, err := dbConn.QueryxContext(ctx, sampleQuery); err != nil {
h.Logger.Warn("daily creation diagnostics sample failed", "table", tableName, "error", err)
} else {
for rows.Next() {
var (
vcenter string
vmId, vmUuid, name sql.NullString
samplesPresent, snapshotTime sql.NullInt64
avgIsPresent sql.NullFloat64
)
if err := rows.Scan(&vcenter, &vmId, &vmUuid, &name, &samplesPresent, &avgIsPresent, &snapshotTime); err != nil {
continue
}
samples = append(samples, models.DailyCreationMissingSample{
Vcenter: vcenter,
VmId: vmId.String,
VmUuid: vmUuid.String,
Name: name.String,
SamplesPresent: samplesPresent.Int64,
AvgIsPresent: avgIsPresent.Float64,
SnapshotTime: snapshotTime.Int64,
})
}
rows.Close()
}
partialSamples := make([]models.DailyCreationMissingSample, 0, sampleLimit)
partialSampleQuery := fmt.Sprintf(`
SELECT "Vcenter","VmId","VmUuid","Name","SamplesPresent","AvgIsPresent","SnapshotTime"
FROM %s
WHERE ("CreationTime" IS NULL OR "CreationTime" = 0)
AND "AvgIsPresent" IS NOT NULL
AND "AvgIsPresent" < 0.999999
ORDER BY "SamplesPresent" DESC
LIMIT %d
`, tableName, sampleLimit)
if rows, err := dbConn.QueryxContext(ctx, partialSampleQuery); err != nil {
h.Logger.Warn("daily creation diagnostics partial sample failed", "table", tableName, "error", err)
} else {
for rows.Next() {
var (
vcenter string
vmId, vmUuid, name sql.NullString
samplesPresent, snapshotTime sql.NullInt64
avgIsPresent sql.NullFloat64
)
if err := rows.Scan(&vcenter, &vmId, &vmUuid, &name, &samplesPresent, &avgIsPresent, &snapshotTime); err != nil {
continue
}
partialSamples = append(partialSamples, models.DailyCreationMissingSample{
Vcenter: vcenter,
VmId: vmId.String,
VmUuid: vmUuid.String,
Name: name.String,
SamplesPresent: samplesPresent.Int64,
AvgIsPresent: avgIsPresent.Float64,
SnapshotTime: snapshotTime.Int64,
})
}
rows.Close()
}
response := models.DailyCreationDiagnosticsResponse{
Status: "OK",
Date: parsed.Format("2006-01-02"),
Table: tableName,
TotalRows: totalRows,
MissingCreationCount: missingTotal,
MissingCreationPct: missingPct,
AvgIsPresentLtOneCount: avgIsPresentLtOne,
MissingCreationPartialCount: missingPartialCount,
MissingByVcenter: byVcenter,
Samples: samples,
MissingCreationPartialSamples: partialSamples,
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(response)
}

View File

@@ -14,8 +14,8 @@ import (
// @Accept json
// @Produce json
// @Param payload body map[string]string true "Plaintext payload"
// @Success 200 {object} map[string]string "Ciphertext response"
// @Failure 500 {object} map[string]string "Server error"
// @Success 200 {object} models.StatusMessageResponse "Ciphertext response"
// @Failure 500 {object} models.ErrorResponse "Server error"
// @Router /api/encrypt [post]
func (h *Handler) EncryptData(w http.ResponseWriter, r *http.Request) {
//ctx := context.Background()

View File

@@ -1,15 +1,11 @@
package handler
import (
"context"
"log/slog"
"net/http"
"vctp/db"
"vctp/internal/secrets"
"vctp/internal/settings"
"vctp/internal/vcenter"
"github.com/a-h/templ"
)
// Handler handles requests.
@@ -23,12 +19,3 @@ type Handler struct {
Secret *secrets.Secrets
Settings *settings.Settings
}
func (h *Handler) html(ctx context.Context, w http.ResponseWriter, status int, t templ.Component) {
w.Header().Set("Content-Type", "text/html; charset=utf-8")
w.WriteHeader(status)
if err := t.Render(ctx, w); err != nil {
h.Logger.Error("Failed to render component", "error", err)
}
}

17
server/handler/metrics.go Normal file
View File

@@ -0,0 +1,17 @@
package handler
import (
"net/http"
"vctp/internal/metrics"
)
// Metrics exposes Prometheus metrics.
// @Summary Prometheus metrics
// @Description Exposes Prometheus metrics for vctp.
// @Tags metrics
// @Produce plain
// @Success 200 "Prometheus metrics"
// @Router /metrics [get]
func (h *Handler) Metrics(w http.ResponseWriter, r *http.Request) {
metrics.Handler().ServeHTTP(w, r)
}

View File

@@ -14,7 +14,7 @@ import (
// @Tags reports
// @Produce application/vnd.openxmlformats-officedocument.spreadsheetml.sheet
// @Success 200 {file} file "Inventory XLSX report"
// @Failure 500 {object} map[string]string "Report generation failed"
// @Failure 500 {object} models.ErrorResponse "Report generation failed"
// @Router /api/report/inventory [get]
func (h *Handler) InventoryReportDownload(w http.ResponseWriter, r *http.Request) {
@@ -48,7 +48,7 @@ func (h *Handler) InventoryReportDownload(w http.ResponseWriter, r *http.Request
// @Tags reports
// @Produce application/vnd.openxmlformats-officedocument.spreadsheetml.sheet
// @Success 200 {file} file "Updates XLSX report"
// @Failure 500 {object} map[string]string "Report generation failed"
// @Failure 500 {object} models.ErrorResponse "Report generation failed"
// @Router /api/report/updates [get]
func (h *Handler) UpdateReportDownload(w http.ResponseWriter, r *http.Request) {

View File

@@ -0,0 +1,119 @@
package handler
import (
"context"
"encoding/json"
"net/http"
"strings"
"time"
"vctp/internal/settings"
"vctp/internal/tasks"
"vctp/server/models"
)
// SnapshotAggregateForce forces regeneration of a daily or monthly summary table.
// @Summary Force snapshot aggregation
// @Description Forces regeneration of a daily or monthly summary table for a specified date or month.
// @Tags snapshots
// @Produce json
// @Param type query string true "Aggregation type: daily or monthly"
// @Param date query string true "Daily date (YYYY-MM-DD) or monthly date (YYYY-MM)"
// @Param granularity query string false "Monthly aggregation granularity: hourly or daily"
// @Success 200 {object} models.StatusResponse "Aggregation complete"
// @Failure 400 {object} models.ErrorResponse "Invalid request"
// @Failure 500 {object} models.ErrorResponse "Server error"
// @Router /api/snapshots/aggregate [post]
func (h *Handler) SnapshotAggregateForce(w http.ResponseWriter, r *http.Request) {
snapshotType := strings.ToLower(strings.TrimSpace(r.URL.Query().Get("type")))
dateValue := strings.TrimSpace(r.URL.Query().Get("date"))
granularity := strings.ToLower(strings.TrimSpace(r.URL.Query().Get("granularity")))
startedAt := time.Now()
loc := time.Now().Location()
if snapshotType == "" || dateValue == "" {
h.Logger.Warn("Snapshot aggregation request missing parameters",
"type", snapshotType,
"date", dateValue,
)
writeJSONError(w, http.StatusBadRequest, "type and date are required")
return
}
if granularity != "" && snapshotType != "monthly" {
h.Logger.Debug("Snapshot aggregation ignoring granularity for non-monthly request",
"type", snapshotType,
"granularity", granularity,
)
granularity = ""
}
if snapshotType == "monthly" && granularity != "" && granularity != "hourly" && granularity != "daily" {
h.Logger.Warn("Snapshot aggregation invalid granularity", "granularity", granularity)
writeJSONError(w, http.StatusBadRequest, "granularity must be hourly or daily")
return
}
ctx := context.Background()
settingsCopy := *h.Settings.Values
if granularity != "" {
settingsCopy.Settings.MonthlyAggregationGranularity = granularity
}
ct := &tasks.CronTask{
Logger: h.Logger,
Database: h.Database,
Settings: &settings.Settings{Logger: h.Logger, SettingsPath: h.Settings.SettingsPath, Values: &settingsCopy},
}
switch snapshotType {
case "daily":
parsed, err := time.ParseInLocation("2006-01-02", dateValue, loc)
if err != nil {
h.Logger.Warn("Snapshot aggregation invalid daily date format", "date", dateValue)
writeJSONError(w, http.StatusBadRequest, "date must be YYYY-MM-DD")
return
}
h.Logger.Info("Starting daily snapshot aggregation", "date", parsed.Format("2006-01-02"), "force", true)
if err := ct.AggregateDailySummary(ctx, parsed, true); err != nil {
h.Logger.Error("Daily snapshot aggregation failed", "date", parsed.Format("2006-01-02"), "error", err)
writeJSONError(w, http.StatusInternalServerError, err.Error())
return
}
case "monthly":
parsed, err := time.ParseInLocation("2006-01", dateValue, loc)
if err != nil {
h.Logger.Warn("Snapshot aggregation invalid monthly date format", "date", dateValue)
writeJSONError(w, http.StatusBadRequest, "date must be YYYY-MM")
return
}
h.Logger.Info("Starting monthly snapshot aggregation", "date", parsed.Format("2006-01"), "force", true, "granularity", granularity)
if err := ct.AggregateMonthlySummary(ctx, parsed, true); err != nil {
h.Logger.Error("Monthly snapshot aggregation failed", "date", parsed.Format("2006-01"), "error", err)
writeJSONError(w, http.StatusInternalServerError, err.Error())
return
}
default:
h.Logger.Warn("Snapshot aggregation invalid type", "type", snapshotType)
writeJSONError(w, http.StatusBadRequest, "type must be daily or monthly")
return
}
h.Logger.Info("Snapshot aggregation completed",
"type", snapshotType,
"date", dateValue,
"granularity", granularity,
"duration", time.Since(startedAt),
)
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(map[string]string{
"status": "OK",
})
}
func writeJSONError(w http.ResponseWriter, status int, message string) {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(status)
json.NewEncoder(w).Encode(models.ErrorResponse{
Status: "ERROR",
Message: message,
})
}

View File

@@ -0,0 +1,51 @@
package handler
import (
"context"
"encoding/json"
"net/http"
"strings"
"time"
"vctp/internal/tasks"
)
// SnapshotForceHourly triggers an on-demand hourly snapshot run.
// @Summary Trigger hourly snapshot (manual)
// @Description Manually trigger an hourly snapshot for all configured vCenters. Requires confirmation text to avoid accidental execution.
// @Tags snapshots
// @Accept json
// @Produce json
// @Param confirm query string true "Confirmation text; must be 'FORCE'"
// @Success 200 {object} models.StatusResponse "Snapshot started"
// @Failure 400 {object} models.ErrorResponse "Invalid request"
// @Failure 500 {object} models.ErrorResponse "Server error"
// @Router /api/snapshots/hourly/force [post]
func (h *Handler) SnapshotForceHourly(w http.ResponseWriter, r *http.Request) {
confirm := strings.TrimSpace(r.URL.Query().Get("confirm"))
if strings.ToUpper(confirm) != "FORCE" {
writeJSONError(w, http.StatusBadRequest, "confirm must be 'FORCE'")
return
}
ctx := context.Background()
ct := &tasks.CronTask{
Logger: h.Logger,
Database: h.Database,
Settings: h.Settings,
VcCreds: h.VcCreds,
}
started := time.Now()
h.Logger.Info("Manual hourly snapshot requested")
if err := ct.RunVcenterSnapshotHourly(ctx, h.Logger.With("manual", true), true); err != nil {
h.Logger.Error("Manual hourly snapshot failed", "error", err)
writeJSONError(w, http.StatusInternalServerError, err.Error())
return
}
h.Logger.Info("Manual hourly snapshot completed", "duration", time.Since(started))
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(map[string]string{
"status": "OK",
})
}

View File

@@ -0,0 +1,38 @@
package handler
import (
"context"
"encoding/json"
"net/http"
"vctp/internal/report"
)
// SnapshotMigrate rebuilds the snapshot registry and normalizes hourly table names.
// @Summary Migrate snapshot registry
// @Description Rebuilds the snapshot registry from existing tables and renames hourly tables to epoch-based names.
// @Tags snapshots
// @Produce json
// @Success 200 {object} models.SnapshotMigrationResponse "Migration results"
// @Failure 500 {object} models.SnapshotMigrationResponse "Server error"
// @Router /api/snapshots/migrate [post]
func (h *Handler) SnapshotMigrate(w http.ResponseWriter, r *http.Request) {
ctx := context.Background()
stats, err := report.MigrateSnapshotRegistry(ctx, h.Database)
if err != nil {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusInternalServerError)
json.NewEncoder(w).Encode(map[string]interface{}{
"status": "ERROR",
"error": err.Error(),
"stats": stats,
})
return
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(map[string]interface{}{
"status": "OK",
"stats": stats,
})
}

View File

@@ -0,0 +1,68 @@
package handler
import (
"encoding/json"
"net/http"
"os"
"path/filepath"
"strings"
"time"
"vctp/internal/report"
)
// SnapshotRegenerateHourlyReports regenerates missing hourly snapshot XLSX reports on disk.
// @Summary Regenerate hourly snapshot reports
// @Description Regenerates XLSX reports for hourly snapshots when the report files are missing or empty.
// @Tags snapshots
// @Produce json
// @Success 200 {object} models.SnapshotRegenerateReportsResponse "Regeneration summary"
// @Failure 500 {object} models.ErrorResponse "Server error"
// @Router /api/snapshots/regenerate-hourly-reports [post]
func (h *Handler) SnapshotRegenerateHourlyReports(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
reportsDir := strings.TrimSpace(h.Settings.Values.Settings.ReportsDir)
if reportsDir == "" {
reportsDir = "/var/lib/vctp/reports"
}
if err := os.MkdirAll(reportsDir, 0o755); err != nil {
h.Logger.Error("failed to create reports directory", "error", err, "path", reportsDir)
writeJSONError(w, http.StatusInternalServerError, "failed to create reports directory")
return
}
start := time.Unix(0, 0)
end := time.Now().AddDate(10, 0, 0) // sufficiently in the future to include all records
records, err := report.SnapshotRecordsWithFallback(ctx, h.Database, "hourly", "inventory_hourly_", "epoch", start, end)
if err != nil {
h.Logger.Error("failed to list hourly snapshots", "error", err)
writeJSONError(w, http.StatusInternalServerError, "failed to list hourly snapshots")
return
}
var regenerated, skipped, errors int
for _, rec := range records {
dest := filepath.Join(reportsDir, rec.TableName+".xlsx")
if info, err := os.Stat(dest); err == nil && info.Size() > 0 {
skipped++
continue
}
if _, err := report.SaveTableReport(h.Logger, h.Database, ctx, rec.TableName, reportsDir); err != nil {
errors++
h.Logger.Warn("failed to regenerate hourly report", "table", rec.TableName, "error", err)
continue
}
regenerated++
}
resp := map[string]interface{}{
"status": "OK",
"total": len(records),
"regenerated": regenerated,
"skipped": skipped,
"errors": errors,
"reports_dir": reportsDir,
"snapshotType": "hourly",
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(resp)
}

View File

@@ -0,0 +1,219 @@
package handler
import (
"context"
"encoding/json"
"fmt"
"net/http"
"strconv"
"strings"
"time"
"vctp/db"
"vctp/internal/report"
)
// SnapshotRepair scans existing daily summaries and backfills missing SnapshotTime and lifecycle fields.
// @Summary Repair daily summaries
// @Description Backfills SnapshotTime and lifecycle info for existing daily summary tables and reruns monthly lifecycle refinement using hourly data.
// @Tags snapshots
// @Produce json
// @Success 200 {object} models.SnapshotRepairResponse
// @Router /api/snapshots/repair [post]
func (h *Handler) SnapshotRepair(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
http.Error(w, "method not allowed", http.StatusMethodNotAllowed)
return
}
h.Logger.Info("snapshot repair started", "scope", "daily")
repaired, failed := h.repairDailySummaries(r.Context(), time.Now())
h.Logger.Info("snapshot repair finished", "daily_repaired", repaired, "daily_failed", failed)
resp := map[string]string{
"status": "ok",
"repaired": strconv.Itoa(repaired),
"failed": strconv.Itoa(failed),
}
w.Header().Set("Content-Type", "application/json")
_ = json.NewEncoder(w).Encode(resp)
}
func (h *Handler) repairDailySummaries(ctx context.Context, now time.Time) (repaired int, failed int) {
dbConn := h.Database.DB()
dailyRecs, err := report.SnapshotRecordsWithFallback(ctx, h.Database, "daily", "inventory_daily_summary_", "20060102", time.Time{}, now)
if err != nil {
h.Logger.Warn("failed to list daily summaries", "error", err)
return 0, 1
}
for _, rec := range dailyRecs {
h.Logger.Debug("repair daily summary table", "table", rec.TableName, "snapshot_time", rec.SnapshotTime)
dayStart := rec.SnapshotTime
dayEnd := dayStart.Add(24 * time.Hour)
if err := db.EnsureSummaryTable(ctx, dbConn, rec.TableName); err != nil {
h.Logger.Warn("ensure summary table failed", "table", rec.TableName, "error", err)
failed++
continue
}
hourlyRecs, err := report.SnapshotRecordsWithFallback(ctx, h.Database, "hourly", "inventory_hourly_", "epoch", dayStart, dayEnd)
if err != nil || len(hourlyRecs) == 0 {
h.Logger.Warn("no hourly snapshots for repair window", "table", rec.TableName, "error", err)
failed++
continue
}
cols := []string{
`"InventoryId"`, `"Name"`, `"Vcenter"`, `"VmId"`, `"EventKey"`, `"CloudId"`, `"CreationTime"`,
`"DeletionTime"`, `"ResourcePool"`, `"Datacenter"`, `"Cluster"`, `"Folder"`,
`"ProvisionedDisk"`, `"VcpuCount"`, `"RamGB"`, `"IsTemplate"`, `"PoweredOn"`,
`"SrmPlaceholder"`, `"VmUuid"`, `"SnapshotTime"`,
}
union, err := buildUnionFromRecords(hourlyRecs, cols, `COALESCE(CAST("IsTemplate" AS TEXT), '') NOT IN ('TRUE','true','1')`)
if err != nil {
h.Logger.Warn("failed to build union for repair", "table", rec.TableName, "error", err)
failed++
continue
}
h.Logger.Debug("built hourly union for repair", "table", rec.TableName, "hourly_tables", len(hourlyRecs))
if err := db.BackfillSnapshotTimeFromUnion(ctx, dbConn, rec.TableName, union); err != nil {
h.Logger.Warn("failed to backfill snapshot time", "table", rec.TableName, "error", err)
failed++
continue
}
h.Logger.Debug("snapshot time backfill complete", "table", rec.TableName)
if err := db.RefineCreationDeletionFromUnion(ctx, dbConn, rec.TableName, union); err != nil {
h.Logger.Warn("failed to refine lifecycle during repair", "table", rec.TableName, "error", err)
failed++
continue
}
h.Logger.Debug("lifecycle refinement complete", "table", rec.TableName)
h.Logger.Info("repair applied", "table", rec.TableName, "actions", "snapshot_time+lifecycle")
repaired++
}
return repaired, failed
}
// SnapshotRepairSuite runs a sequence of repair routines to fix older deployments in one call.
// It rebuilds the snapshot registry, syncs vcenter totals, repairs daily summaries, and refines monthly lifecycle data.
// @Summary Run full snapshot repair suite
// @Description Rebuilds snapshot registry, backfills per-vCenter totals, repairs daily summaries (SnapshotTime/lifecycle), and refines monthly lifecycle.
// @Tags snapshots
// @Produce json
// @Success 200 {object} models.SnapshotRepairSuiteResponse
// @Router /api/snapshots/repair/all [post]
func (h *Handler) SnapshotRepairSuite(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
http.Error(w, "method not allowed", http.StatusMethodNotAllowed)
return
}
ctx := r.Context()
dbConn := h.Database.DB()
// Step 1: rebuild snapshot registry from existing tables.
h.Logger.Info("repair suite step", "step", "snapshot_registry")
if stats, err := report.MigrateSnapshotRegistry(ctx, h.Database); err != nil {
h.Logger.Warn("snapshot registry migration failed", "error", err)
} else {
h.Logger.Info("snapshot registry migration complete", "hourly_renamed", stats.HourlyRenamed, "daily_registered", stats.DailyRegistered, "monthly_registered", stats.MonthlyRegistered, "errors", stats.Errors)
}
// Step 2: backfill vcenter_totals from registry hourly tables.
h.Logger.Info("repair suite step", "step", "vcenter_totals")
if err := db.SyncVcenterTotalsFromSnapshots(ctx, dbConn); err != nil {
h.Logger.Warn("sync vcenter totals failed", "error", err)
}
// Step 3: repair daily summaries (snapshot time + lifecycle).
h.Logger.Info("repair suite step", "step", "daily_summaries")
dailyRepaired, dailyFailed := h.repairDailySummaries(ctx, time.Now())
// Step 4: refine monthly lifecycle using daily summaries (requires SnapshotTime now present after step 3).
h.Logger.Info("repair suite step", "step", "monthly_refine")
monthlyRefined, monthlyFailed := h.refineMonthlyFromDaily(ctx, time.Now())
resp := map[string]string{
"status": "ok",
"daily_repaired": strconv.Itoa(dailyRepaired),
"daily_failed": strconv.Itoa(dailyFailed),
"monthly_refined": strconv.Itoa(monthlyRefined),
"monthly_failed": strconv.Itoa(monthlyFailed),
}
w.Header().Set("Content-Type", "application/json")
_ = json.NewEncoder(w).Encode(resp)
}
func (h *Handler) refineMonthlyFromDaily(ctx context.Context, now time.Time) (refined int, failed int) {
dbConn := h.Database.DB()
dailyRecs, err := report.SnapshotRecordsWithFallback(ctx, h.Database, "daily", "inventory_daily_summary_", "20060102", time.Time{}, now)
if err != nil {
h.Logger.Warn("failed to list daily summaries for monthly refine", "error", err)
return 0, 1
}
// Group daily tables by month (YYYYMM).
grouped := make(map[string][]report.SnapshotRecord)
for _, rec := range dailyRecs {
key := rec.SnapshotTime.Format("200601")
grouped[key] = append(grouped[key], rec)
}
cols := []string{
`"InventoryId"`, `"Name"`, `"Vcenter"`, `"VmId"`, `"EventKey"`, `"CloudId"`, `"CreationTime"`,
`"DeletionTime"`, `"ResourcePool"`, `"Datacenter"`, `"Cluster"`, `"Folder"`,
`"ProvisionedDisk"`, `"VcpuCount"`, `"RamGB"`, `"IsTemplate"`, `"PoweredOn"`,
`"SrmPlaceholder"`, `"VmUuid"`, `"SnapshotTime"`,
}
for monthKey, recs := range grouped {
summaryTable := fmt.Sprintf("inventory_monthly_summary_%s", monthKey)
h.Logger.Debug("monthly refine", "table", summaryTable, "daily_tables", len(recs))
if err := db.EnsureSummaryTable(ctx, dbConn, summaryTable); err != nil {
h.Logger.Warn("ensure monthly summary failed", "table", summaryTable, "error", err)
failed++
continue
}
union, err := buildUnionFromRecords(recs, cols, `COALESCE(CAST("IsTemplate" AS TEXT), '') NOT IN ('TRUE','true','1')`)
if err != nil {
h.Logger.Warn("failed to build union for monthly refine", "table", summaryTable, "error", err)
failed++
continue
}
if err := db.RefineCreationDeletionFromUnion(ctx, dbConn, summaryTable, union); err != nil {
h.Logger.Warn("failed to refine monthly lifecycle", "table", summaryTable, "error", err)
failed++
continue
}
h.Logger.Debug("monthly refine applied", "table", summaryTable)
refined++
}
return refined, failed
}
func buildUnionFromRecords(recs []report.SnapshotRecord, columns []string, where string) (string, error) {
if len(recs) == 0 {
return "", fmt.Errorf("no tables provided for union")
}
colList := strings.Join(columns, ", ")
parts := make([]string, 0, len(recs))
for _, rec := range recs {
if err := db.ValidateTableName(rec.TableName); err != nil {
continue
}
q := fmt.Sprintf(`SELECT %s FROM %s`, colList, rec.TableName)
if where != "" {
q = q + " WHERE " + where
}
parts = append(parts, q)
}
if len(parts) == 0 {
return "", fmt.Errorf("no valid tables for union")
}
return strings.Join(parts, "\nUNION ALL\n"), nil
}

View File

@@ -6,7 +6,6 @@ import (
"fmt"
"net/http"
"net/url"
"strings"
"vctp/components/views"
"vctp/internal/report"
@@ -22,7 +21,7 @@ import (
// @Failure 500 {string} string "Server error"
// @Router /snapshots/hourly [get]
func (h *Handler) SnapshotHourlyList(w http.ResponseWriter, r *http.Request) {
h.renderSnapshotList(w, r, "inventory_daily_", "Hourly Inventory Snapshots", views.SnapshotHourlyList)
h.renderSnapshotList(w, r, "hourly", "Hourly Inventory Snapshots", views.SnapshotHourlyList)
}
// SnapshotDailyList renders the daily snapshot list page.
@@ -34,7 +33,7 @@ func (h *Handler) SnapshotHourlyList(w http.ResponseWriter, r *http.Request) {
// @Failure 500 {string} string "Server error"
// @Router /snapshots/daily [get]
func (h *Handler) SnapshotDailyList(w http.ResponseWriter, r *http.Request) {
h.renderSnapshotList(w, r, "inventory_daily_summary_", "Daily Inventory Snapshots", views.SnapshotDailyList)
h.renderSnapshotList(w, r, "daily", "Daily Inventory Snapshots", views.SnapshotDailyList)
}
// SnapshotMonthlyList renders the monthly snapshot list page.
@@ -46,7 +45,7 @@ func (h *Handler) SnapshotDailyList(w http.ResponseWriter, r *http.Request) {
// @Failure 500 {string} string "Server error"
// @Router /snapshots/monthly [get]
func (h *Handler) SnapshotMonthlyList(w http.ResponseWriter, r *http.Request) {
h.renderSnapshotList(w, r, "inventory_monthly_summary_", "Monthly Inventory Snapshots", views.SnapshotMonthlyList)
h.renderSnapshotList(w, r, "monthly", "Monthly Inventory Snapshots", views.SnapshotMonthlyList)
}
// SnapshotReportDownload streams a snapshot table as XLSX.
@@ -56,8 +55,8 @@ func (h *Handler) SnapshotMonthlyList(w http.ResponseWriter, r *http.Request) {
// @Produce application/vnd.openxmlformats-officedocument.spreadsheetml.sheet
// @Param table query string true "Snapshot table name"
// @Success 200 {file} file "Snapshot XLSX report"
// @Failure 400 {object} map[string]string "Invalid request"
// @Failure 500 {object} map[string]string "Server error"
// @Failure 400 {object} models.ErrorResponse "Invalid request"
// @Failure 500 {object} models.ErrorResponse "Server error"
// @Router /api/report/snapshot [get]
func (h *Handler) SnapshotReportDownload(w http.ResponseWriter, r *http.Request) {
ctx := context.Background()
@@ -91,28 +90,43 @@ func (h *Handler) SnapshotReportDownload(w http.ResponseWriter, r *http.Request)
w.Write(reportData)
}
func (h *Handler) renderSnapshotList(w http.ResponseWriter, r *http.Request, prefix string, title string, renderer func([]views.SnapshotEntry) templ.Component) {
func (h *Handler) renderSnapshotList(w http.ResponseWriter, r *http.Request, snapshotType string, title string, renderer func([]views.SnapshotEntry) templ.Component) {
ctx := context.Background()
tables, err := report.ListTablesByPrefix(ctx, h.Database, prefix)
if err := report.EnsureSnapshotRegistry(ctx, h.Database); err != nil {
h.Logger.Error("Failed to ensure snapshot registry", "error", err)
w.WriteHeader(http.StatusInternalServerError)
fmt.Fprintf(w, "Unable to list snapshot tables: %s\n", err)
return
}
records, err := report.ListSnapshots(ctx, h.Database, snapshotType)
if err != nil {
h.Logger.Error("Failed to list snapshot tables", "error", err, "prefix", prefix)
h.Logger.Error("Failed to list snapshots", "error", err, "snapshot_type", snapshotType)
w.WriteHeader(http.StatusInternalServerError)
fmt.Fprintf(w, "Unable to list snapshot tables: %s\n", err)
return
}
entries := make([]views.SnapshotEntry, 0, len(tables))
for _, table := range tables {
if prefix == "inventory_daily_" && strings.HasPrefix(table, "inventory_daily_summary_") {
continue
entries := make([]views.SnapshotEntry, 0, len(records))
for _, record := range records {
label := report.FormatSnapshotLabel(snapshotType, record.SnapshotTime, record.TableName)
group := ""
switch snapshotType {
case "hourly":
group = record.SnapshotTime.Format("2006-01-02")
case "daily":
group = record.SnapshotTime.Format("January 2006")
case "monthly":
group = record.SnapshotTime.Format("2006")
}
label := table
if parsed, ok := report.FormatSnapshotLabel(prefix, table); ok {
label = parsed
count := record.SnapshotCount
if count < 0 {
count = 0
}
entries = append(entries, views.SnapshotEntry{
Label: label,
Link: "/api/report/snapshot?table=" + url.QueryEscape(table),
Link: "/reports/" + url.PathEscape(record.TableName) + ".xlsx",
Count: count,
Group: group,
})
}

View File

@@ -7,9 +7,10 @@ import (
)
// UpdateCleanup removes orphaned update records.
// @Summary Cleanup updates
// @Description Removes update records that are no longer associated with a VM.
// @Summary Cleanup updates (deprecated)
// @Description Deprecated: Removes update records that are no longer associated with a VM.
// @Tags maintenance
// @Deprecated
// @Produce text/plain
// @Success 200 {string} string "Cleanup completed"
// @Failure 500 {string} string "Server error"

View File

@@ -10,13 +10,14 @@ import (
)
// VcCleanup removes inventory entries for a vCenter instance.
// @Summary Cleanup vCenter inventory
// @Description Removes all inventory entries associated with a vCenter URL.
// @Summary Cleanup vCenter inventory (deprecated)
// @Description Deprecated: Removes all inventory entries associated with a vCenter URL.
// @Tags maintenance
// @Deprecated
// @Produce json
// @Param vc_url query string true "vCenter URL"
// @Success 200 {object} map[string]string "Cleanup completed"
// @Failure 400 {object} map[string]string "Invalid request"
// @Success 200 {object} models.StatusMessageResponse "Cleanup completed"
// @Failure 400 {object} models.ErrorResponse "Invalid request"
// @Router /api/cleanup/vcenter [delete]
func (h *Handler) VcCleanup(w http.ResponseWriter, r *http.Request) {
ctx := context.Background()

231
server/handler/vcenters.go Normal file
View File

@@ -0,0 +1,231 @@
package handler
import (
"fmt"
"net/http"
"net/url"
"strconv"
"strings"
"time"
"vctp/components/views"
"vctp/db"
)
// VcenterList renders a list of vCenters being monitored.
// @Summary List vCenters
// @Description Lists all vCenters with recorded snapshot totals.
// @Tags vcenters
// @Produce text/html
// @Success 200 {string} string "HTML page"
// @Router /vcenters [get]
func (h *Handler) VcenterList(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
if err := db.SyncVcenterTotalsFromSnapshots(ctx, h.Database.DB()); err != nil {
h.Logger.Warn("failed to sync vcenter totals", "error", err)
}
vcs, err := db.ListVcenters(ctx, h.Database.DB())
if err != nil {
http.Error(w, fmt.Sprintf("failed to list vcenters: %v", err), http.StatusInternalServerError)
return
}
links := make([]views.VcenterLink, 0, len(vcs))
for _, vc := range vcs {
links = append(links, views.VcenterLink{
Name: vc,
Link: "/vcenters/totals?vcenter=" + url.QueryEscape(vc),
})
}
w.Header().Set("Content-Type", "text/html; charset=utf-8")
if err := views.VcenterList(links).Render(ctx, w); err != nil {
http.Error(w, "Failed to render template", http.StatusInternalServerError)
}
}
// VcenterTotals renders totals for a vCenter.
// @Summary vCenter totals
// @Description Shows per-snapshot totals for a vCenter.
// @Tags vcenters
// @Produce text/html
// @Param vcenter query string true "vCenter URL"
// @Param type query string false "hourly|daily|monthly (default: hourly)"
// @Param limit query int false "Limit results (default 200)"
// @Success 200 {string} string "HTML page"
// @Failure 400 {string} string "Missing vcenter"
// @Router /vcenters/totals [get]
func (h *Handler) VcenterTotals(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
vc := r.URL.Query().Get("vcenter")
if vc == "" {
http.Error(w, "vcenter is required", http.StatusBadRequest)
return
}
viewType := strings.ToLower(r.URL.Query().Get("type"))
if viewType == "" {
viewType = "hourly"
}
switch viewType {
case "hourly", "daily", "monthly":
default:
viewType = "hourly"
}
if viewType == "hourly" {
if err := db.SyncVcenterTotalsFromSnapshots(ctx, h.Database.DB()); err != nil {
h.Logger.Warn("failed to sync vcenter totals", "error", err)
}
}
limit := 200
if l := r.URL.Query().Get("limit"); l != "" {
if v, err := strconv.Atoi(l); err == nil && v > 0 {
limit = v
}
}
rows, err := db.ListVcenterTotalsByType(ctx, h.Database.DB(), vc, viewType, limit)
if err != nil {
http.Error(w, fmt.Sprintf("failed to list totals: %v", err), http.StatusInternalServerError)
return
}
entries := make([]views.VcenterTotalsEntry, 0, len(rows))
for _, row := range rows {
entries = append(entries, views.VcenterTotalsEntry{
Snapshot: time.Unix(row.SnapshotTime, 0).Local().Format("2006-01-02 15:04:05"),
RawTime: row.SnapshotTime,
VmCount: row.VmCount,
VcpuTotal: row.VcpuTotal,
RamTotalGB: row.RamTotalGB,
})
}
chart := buildVcenterChart(entries)
meta := buildVcenterMeta(vc, viewType)
w.Header().Set("Content-Type", "text/html; charset=utf-8")
if err := views.VcenterTotalsPage(vc, entries, chart, meta).Render(ctx, w); err != nil {
http.Error(w, "Failed to render template", http.StatusInternalServerError)
}
}
func buildVcenterMeta(vcenter string, viewType string) views.VcenterTotalsMeta {
active := viewType
if active == "" {
active = "hourly"
}
meta := views.VcenterTotalsMeta{
ViewType: active,
TypeLabel: "Hourly",
HourlyLink: "/vcenters/totals?vcenter=" + url.QueryEscape(vcenter) + "&type=hourly",
DailyLink: "/vcenters/totals?vcenter=" + url.QueryEscape(vcenter) + "&type=daily",
MonthlyLink: "/vcenters/totals?vcenter=" + url.QueryEscape(vcenter) + "&type=monthly",
HourlyClass: "web3-button",
DailyClass: "web3-button",
MonthlyClass: "web3-button",
}
switch active {
case "daily":
meta.TypeLabel = "Daily"
meta.DailyClass = "web3-button active"
case "monthly":
meta.TypeLabel = "Monthly"
meta.MonthlyClass = "web3-button active"
default:
meta.ViewType = "hourly"
meta.HourlyClass = "web3-button active"
}
return meta
}
func buildVcenterChart(entries []views.VcenterTotalsEntry) views.VcenterChartData {
if len(entries) == 0 {
return views.VcenterChartData{}
}
// Plot oldest on the left, newest on the right.
plot := make([]views.VcenterTotalsEntry, 0, len(entries))
for i := len(entries) - 1; i >= 0; i-- {
plot = append(plot, entries[i])
}
width := 1200.0
height := 260.0
plotWidth := width - 60.0
startX := 40.0
maxVal := float64(0)
for _, e := range plot {
if float64(e.VmCount) > maxVal {
maxVal = float64(e.VmCount)
}
if float64(e.VcpuTotal) > maxVal {
maxVal = float64(e.VcpuTotal)
}
if float64(e.RamTotalGB) > maxVal {
maxVal = float64(e.RamTotalGB)
}
}
if maxVal == 0 {
maxVal = 1
}
stepX := plotWidth
if len(plot) > 1 {
stepX = plotWidth / float64(len(plot)-1)
}
pointsVm := ""
pointsVcpu := ""
pointsRam := ""
for i, e := range plot {
x := startX + float64(i)*stepX
yVm := 10 + (1-(float64(e.VmCount)/maxVal))*height
yVcpu := 10 + (1-(float64(e.VcpuTotal)/maxVal))*height
yRam := 10 + (1-(float64(e.RamTotalGB)/maxVal))*height
if i == 0 {
pointsVm = fmt.Sprintf("%.1f,%.1f", x, yVm)
pointsVcpu = fmt.Sprintf("%.1f,%.1f", x, yVcpu)
pointsRam = fmt.Sprintf("%.1f,%.1f", x, yRam)
} else {
pointsVm = pointsVm + " " + fmt.Sprintf("%.1f,%.1f", x, yVm)
pointsVcpu = pointsVcpu + " " + fmt.Sprintf("%.1f,%.1f", x, yVcpu)
pointsRam = pointsRam + " " + fmt.Sprintf("%.1f,%.1f", x, yRam)
}
}
gridX := []float64{}
if len(plot) > 1 {
for i := 0; i < len(plot); i++ {
gridX = append(gridX, startX+float64(i)*stepX)
}
}
gridY := []float64{}
for i := 0; i <= 4; i++ {
gridY = append(gridY, 10+float64(i)*(height/4))
}
yTicks := []views.ChartTick{}
for i := 0; i <= 4; i++ {
val := maxVal * float64(4-i) / 4
pos := 10 + float64(i)*(height/4)
yTicks = append(yTicks, views.ChartTick{Pos: pos, Label: fmt.Sprintf("%.0f", val)})
}
xTicks := []views.ChartTick{}
maxTicks := 6
stepIdx := 1
if len(plot) > 1 {
stepIdx = (len(plot)-1)/maxTicks + 1
}
for idx := 0; idx < len(plot); idx += stepIdx {
x := startX + float64(idx)*stepX
label := time.Unix(plot[idx].RawTime, 0).Local().Format("01-02 15:04")
xTicks = append(xTicks, views.ChartTick{Pos: x, Label: label})
}
if len(plot) > 1 {
lastIdx := len(plot) - 1
xLast := startX + float64(lastIdx)*stepX
labelLast := time.Unix(plot[lastIdx].RawTime, 0).Local().Format("01-02 15:04")
if len(xTicks) == 0 || xTicks[len(xTicks)-1].Pos != xLast {
xTicks = append(xTicks, views.ChartTick{Pos: xLast, Label: labelLast})
}
}
return views.VcenterChartData{
PointsVm: pointsVm,
PointsVcpu: pointsVcpu,
PointsRam: pointsRam,
Width: int(width),
Height: int(height),
GridX: gridX,
GridY: gridY,
YTicks: yTicks,
XTicks: xTicks,
}
}

View File

@@ -17,8 +17,8 @@ import (
// @Produce json
// @Param vm_id query string true "VM ID"
// @Param datacenter_name query string true "Datacenter name"
// @Success 200 {object} map[string]string "Cleanup completed"
// @Failure 400 {object} map[string]string "Invalid request"
// @Success 200 {object} models.StatusMessageResponse "Cleanup completed"
// @Failure 400 {object} models.ErrorResponse "Invalid request"
// @Router /api/inventory/vm/delete [delete]
func (h *Handler) VmCleanup(w http.ResponseWriter, r *http.Request) {
ctx := context.Background()

View File

@@ -15,9 +15,10 @@ import (
)
// VmCreateEvent records a VM creation CloudEvent.
// @Summary Record VM create event
// @Description Parses a VM create CloudEvent and stores the event data.
// @Summary Record VM create event (deprecated)
// @Description Deprecated: Parses a VM create CloudEvent and stores the event data.
// @Tags events
// @Deprecated
// @Accept json
// @Produce text/plain
// @Param event body models.CloudEventReceived true "CloudEvent payload"

View File

@@ -13,9 +13,10 @@ import (
)
// VmDeleteEvent records a VM deletion CloudEvent in the inventory.
// @Summary Record VM delete event
// @Description Parses a VM delete CloudEvent and marks the VM as deleted in inventory.
// @Summary Record VM delete event (deprecated)
// @Description Deprecated: Parses a VM delete CloudEvent and marks the VM as deleted in inventory.
// @Tags events
// @Deprecated
// @Accept json
// @Produce text/plain
// @Param event body models.CloudEventReceived true "CloudEvent payload"

View File

@@ -8,6 +8,7 @@ import (
"fmt"
"io"
"net/http"
"strings"
"vctp/db"
queries "vctp/db/queries"
models "vctp/server/models"
@@ -20,8 +21,8 @@ import (
// @Accept json
// @Produce json
// @Param import body models.ImportReceived true "Bulk import payload"
// @Success 200 {object} map[string]string "Import processed"
// @Failure 500 {object} map[string]string "Server error"
// @Success 200 {object} models.StatusMessageResponse "Import processed"
// @Failure 500 {object} models.ErrorResponse "Server error"
// @Router /api/import/vm [post]
func (h *Handler) VmImport(w http.ResponseWriter, r *http.Request) {
// Read request body
@@ -56,6 +57,17 @@ func (h *Handler) VmImport(w http.ResponseWriter, r *http.Request) {
//prettyPrint(inData)
}
if strings.HasPrefix(inData.Name, "vCLS-") {
h.Logger.Info("Skipping internal vCLS VM import", "vm_name", inData.Name)
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(map[string]string{
"status": "OK",
"message": fmt.Sprintf("Skipped internal VM '%s'", inData.Name),
})
return
}
ctx := context.Background()
// Query Inventory table for this VM before adding it

View File

@@ -20,15 +20,16 @@ import (
)
// VmModifyEvent records a VM modification CloudEvent.
// @Summary Record VM modify event
// @Description Parses a VM modify CloudEvent and creates an update record when relevant changes are detected.
// @Summary Record VM modify event (deprecated)
// @Description Deprecated: Parses a VM modify CloudEvent and creates an update record when relevant changes are detected.
// @Tags events
// @Deprecated
// @Accept json
// @Produce json
// @Param event body models.CloudEventReceived true "CloudEvent payload"
// @Success 200 {object} map[string]string "Modify event processed"
// @Success 202 {object} map[string]string "No relevant changes"
// @Failure 500 {object} map[string]string "Server error"
// @Success 200 {object} models.StatusMessageResponse "Modify event processed"
// @Success 202 {object} models.StatusMessageResponse "No relevant changes"
// @Failure 500 {object} models.ErrorResponse "Server error"
// @Router /api/event/vm/modify [post]
func (h *Handler) VmModifyEvent(w http.ResponseWriter, r *http.Request) {
var configChanges []map[string]string
@@ -116,22 +117,23 @@ func (h *Handler) VmModifyEvent(w http.ResponseWriter, r *http.Request) {
params.UpdateType = "reconfigure"
}
case "config.managedBy": // This changes when a VM becomes a placeholder or vice versa
if change["newValue"] == "(extensionKey = \"com.vmware.vcDr\", type = \"placeholderVm\")" {
switch change["newValue"] {
case "(extensionKey = \"com.vmware.vcDr\", type = \"placeholderVm\")":
params.PlaceholderChange = sql.NullString{String: "placeholderVm", Valid: true}
h.Logger.Debug("placeholderVm")
changeFound = true
params.UpdateType = "srm"
} else if change["newValue"] == "<unset>" {
case "<unset>":
params.PlaceholderChange = sql.NullString{String: "Vm", Valid: true}
h.Logger.Debug("vm")
changeFound = true
params.UpdateType = "srm"
} else if change["newValue"] == "testVm" {
case "testVm":
h.Logger.Debug("testVm")
params.PlaceholderChange = sql.NullString{String: "testVm", Valid: true}
changeFound = true
params.UpdateType = "srm"
} else {
default:
h.Logger.Error("Unexpected value for managedBy configuration", "new_value", change["newValue"])
}
@@ -145,12 +147,13 @@ func (h *Handler) VmModifyEvent(w http.ResponseWriter, r *http.Request) {
// TODO - track when this happens, maybe need a new database column?
case "config.managedBy.type":
h.Logger.Debug("config.managedBy.type")
if change["newValue"] == "testVm" {
switch change["newValue"] {
case "testVm":
h.Logger.Debug("testVm")
params.PlaceholderChange = sql.NullString{String: "testVm", Valid: true}
changeFound = true
params.UpdateType = "srm"
} else if change["newValue"] == "\\\"placeholderVm\\\"" {
case "\\\"placeholderVm\\\"":
h.Logger.Debug("placeholderVm")
params.PlaceholderChange = sql.NullString{String: "placeholderVm", Valid: true}
changeFound = true
@@ -401,10 +404,7 @@ func (h *Handler) calculateNewDiskSize(event models.CloudEventReceived) float64
}
}
err = vc.Logout()
if err != nil {
h.Logger.Error("unable to logout of vcenter", "error", err)
}
_ = vc.Logout(context.Background())
h.Logger.Debug("Calculated new disk size", "value", diskSize)
@@ -441,6 +441,12 @@ func (h *Handler) AddVmToInventory(evt models.CloudEventReceived, ctx context.Co
}
if strings.HasPrefix(vmObject.Name, "vCLS-") {
h.Logger.Info("Skipping internal vCLS VM", "vm_name", vmObject.Name)
_ = vc.Logout(ctx)
return 0, nil
}
//c.Logger.Debug("found VM")
srmPlaceholder = "FALSE" // Default assumption
//prettyPrint(vmObject)
@@ -511,10 +517,7 @@ func (h *Handler) AddVmToInventory(evt models.CloudEventReceived, ctx context.Co
poweredOn = "TRUE"
}
err = vc.Logout()
if err != nil {
h.Logger.Error("unable to logout of vcenter", "error", err)
}
_ = vc.Logout(ctx)
if foundVm {
e := evt.CloudEvent

View File

@@ -15,15 +15,16 @@ import (
)
// VmMoveEvent records a VM move CloudEvent as an update.
// @Summary Record VM move event
// @Description Parses a VM move CloudEvent and creates an update record.
// @Summary Record VM move event (deprecated)
// @Description Deprecated: Parses a VM move CloudEvent and creates an update record.
// @Tags events
// @Deprecated
// @Accept json
// @Produce json
// @Param event body models.CloudEventReceived true "CloudEvent payload"
// @Success 200 {object} map[string]string "Move event processed"
// @Failure 400 {object} map[string]string "Invalid request"
// @Failure 500 {object} map[string]string "Server error"
// @Success 200 {object} models.StatusMessageResponse "Move event processed"
// @Failure 400 {object} models.ErrorResponse "Invalid request"
// @Failure 500 {object} models.ErrorResponse "Server error"
// @Router /api/event/vm/move [post]
func (h *Handler) VmMoveEvent(w http.ResponseWriter, r *http.Request) {
params := queries.CreateUpdateParams{}

236
server/handler/vmTrace.go Normal file
View File

@@ -0,0 +1,236 @@
package handler
import (
"fmt"
"net/http"
"strings"
"time"
"vctp/components/views"
"vctp/db"
)
// VmTrace shows per-snapshot details for a VM across all snapshots.
// @Summary Trace VM history
// @Description Shows VM resource history across snapshots, with chart and table.
// @Tags vm
// @Produce text/html
// @Param vm_id query string false "VM ID"
// @Param vm_uuid query string false "VM UUID"
// @Param name query string false "VM name"
// @Success 200 {string} string "HTML page"
// @Failure 400 {string} string "Missing identifier"
// @Router /vm/trace [get]
func (h *Handler) VmTrace(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
vmID := r.URL.Query().Get("vm_id")
vmUUID := r.URL.Query().Get("vm_uuid")
name := r.URL.Query().Get("name")
var entries []views.VmTraceEntry
chart := views.VmTraceChart{}
queryLabel := firstNonEmpty(vmID, vmUUID, name)
displayQuery := ""
if queryLabel != "" {
displayQuery = " for " + queryLabel
}
creationLabel := ""
deletionLabel := ""
creationApprox := false
// Only fetch data when a query is provided; otherwise render empty page with form.
if vmID != "" || vmUUID != "" || name != "" {
h.Logger.Info("vm trace request", "vm_id", vmID, "vm_uuid", vmUUID, "name", name)
lifecycle, lifeErr := db.FetchVmLifecycle(ctx, h.Database.DB(), vmID, vmUUID, name)
if lifeErr != nil {
h.Logger.Warn("failed to fetch VM lifecycle", "error", lifeErr)
}
rows, err := db.FetchVmTrace(ctx, h.Database.DB(), vmID, vmUUID, name)
if err != nil {
h.Logger.Error("failed to fetch VM trace", "error", err)
http.Error(w, fmt.Sprintf("failed to fetch VM trace: %v", err), http.StatusInternalServerError)
return
}
h.Logger.Info("vm trace results", "row_count", len(rows))
entries = make([]views.VmTraceEntry, 0, len(rows))
for _, row := range rows {
creation := int64(0)
if row.CreationTime.Valid {
creation = row.CreationTime.Int64
}
deletion := int64(0)
if row.DeletionTime.Valid {
deletion = row.DeletionTime.Int64
}
entries = append(entries, views.VmTraceEntry{
Snapshot: time.Unix(row.SnapshotTime, 0).Local().Format("2006-01-02 15:04:05"),
RawTime: row.SnapshotTime,
Name: row.Name,
VmId: row.VmId,
VmUuid: row.VmUuid,
Vcenter: row.Vcenter,
ResourcePool: row.ResourcePool,
VcpuCount: row.VcpuCount,
RamGB: row.RamGB,
ProvisionedDisk: row.ProvisionedDisk,
CreationTime: formatMaybeTime(creation),
DeletionTime: formatMaybeTime(deletion),
})
}
chart = buildVmTraceChart(entries)
if len(entries) > 0 {
if lifecycle.CreationTime > 0 {
ts := time.Unix(lifecycle.CreationTime, 0).Local().Format("2006-01-02 15:04:05")
if lifecycle.CreationApprox {
creationLabel = fmt.Sprintf("%s (approx. earliest snapshot)", ts)
// dont double up on the approximate text
//creationApprox = true
} else {
creationLabel = ts
}
} else {
creationLabel = time.Unix(entries[0].RawTime, 0).Local().Format("2006-01-02 15:04:05")
creationApprox = true
}
if lifecycle.DeletionTime > 0 {
deletionLabel = time.Unix(lifecycle.DeletionTime, 0).Local().Format("2006-01-02 15:04:05")
}
}
}
w.Header().Set("Content-Type", "text/html; charset=utf-8")
if err := views.VmTracePage(queryLabel, displayQuery, vmID, vmUUID, name, creationLabel, deletionLabel, creationApprox, entries, chart).Render(ctx, w); err != nil {
http.Error(w, "Failed to render template", http.StatusInternalServerError)
}
}
func buildVmTraceChart(entries []views.VmTraceEntry) views.VmTraceChart {
if len(entries) == 0 {
return views.VmTraceChart{}
}
width := 1200.0
height := 220.0
plotWidth := width - 60.0
startX := 40.0
maxVal := float64(0)
for _, e := range entries {
if float64(e.VcpuCount) > maxVal {
maxVal = float64(e.VcpuCount)
}
if float64(e.RamGB) > maxVal {
maxVal = float64(e.RamGB)
}
}
if maxVal == 0 {
maxVal = 1
}
stepX := plotWidth
if len(entries) > 1 {
stepX = plotWidth / float64(len(entries)-1)
}
scale := height / maxVal
var ptsVcpu, ptsRam, ptsTin, ptsBronze, ptsSilver, ptsGold string
appendPt := func(s string, x, y float64) string {
if s == "" {
return fmt.Sprintf("%.1f,%.1f", x, y)
}
return s + " " + fmt.Sprintf("%.1f,%.1f", x, y)
}
for i, e := range entries {
x := startX + float64(i)*stepX
yVcpu := 10 + height - float64(e.VcpuCount)*scale
yRam := 10 + height - float64(e.RamGB)*scale
ptsVcpu = appendPt(ptsVcpu, x, yVcpu)
ptsRam = appendPt(ptsRam, x, yRam)
poolY := map[string]float64{
"tin": 10 + height - scale*maxVal,
"bronze": 10 + height - scale*maxVal*0.9,
"silver": 10 + height - scale*maxVal*0.8,
"gold": 10 + height - scale*maxVal*0.7,
}
lower := strings.ToLower(e.ResourcePool)
if lower == "tin" {
ptsTin = appendPt(ptsTin, x, poolY["tin"])
} else {
ptsTin = appendPt(ptsTin, x, 10+height)
}
if lower == "bronze" {
ptsBronze = appendPt(ptsBronze, x, poolY["bronze"])
} else {
ptsBronze = appendPt(ptsBronze, x, 10+height)
}
if lower == "silver" {
ptsSilver = appendPt(ptsSilver, x, poolY["silver"])
} else {
ptsSilver = appendPt(ptsSilver, x, 10+height)
}
if lower == "gold" {
ptsGold = appendPt(ptsGold, x, poolY["gold"])
} else {
ptsGold = appendPt(ptsGold, x, 10+height)
}
}
gridY := []float64{}
for i := 0; i <= 4; i++ {
gridY = append(gridY, 10+float64(i)*(height/4))
}
gridX := []float64{}
for i := 0; i < len(entries); i++ {
gridX = append(gridX, startX+float64(i)*stepX)
}
yTicks := []views.ChartTick{}
for i := 0; i <= 4; i++ {
val := maxVal * float64(4-i) / 4
pos := 10 + float64(i)*(height/4)
yTicks = append(yTicks, views.ChartTick{Pos: pos, Label: fmt.Sprintf("%.0f", val)})
}
xTicks := []views.ChartTick{}
maxTicks := 8
stepIdx := 1
if len(entries) > 1 {
stepIdx = (len(entries)-1)/maxTicks + 1
}
for idx := 0; idx < len(entries); idx += stepIdx {
x := startX + float64(idx)*stepX
label := time.Unix(entries[idx].RawTime, 0).Local().Format("01-02 15:04")
xTicks = append(xTicks, views.ChartTick{Pos: x, Label: label})
}
if len(entries) > 1 {
lastIdx := len(entries) - 1
xLast := startX + float64(lastIdx)*stepX
labelLast := time.Unix(entries[lastIdx].RawTime, 0).Local().Format("01-02 15:04")
if len(xTicks) == 0 || xTicks[len(xTicks)-1].Pos != xLast {
xTicks = append(xTicks, views.ChartTick{Pos: xLast, Label: labelLast})
}
}
return views.VmTraceChart{
PointsVcpu: ptsVcpu,
PointsRam: ptsRam,
PointsTin: ptsTin,
PointsBronze: ptsBronze,
PointsSilver: ptsSilver,
PointsGold: ptsGold,
Width: int(width),
Height: int(height),
GridX: gridX,
GridY: gridY,
XTicks: xTicks,
YTicks: yTicks,
}
}
func firstNonEmpty(vals ...string) string {
for _, v := range vals {
if v != "" {
return v
}
}
return ""
}
func formatMaybeTime(ts int64) string {
if ts == 0 {
return ""
}
return time.Unix(ts, 0).Local().Format("2006-01-02 15:04:05")
}

View File

@@ -25,10 +25,14 @@ func (l *LoggingMiddleware) ServeHTTP(w http.ResponseWriter, r *http.Request) {
start := time.Now()
l.handler.ServeHTTP(w, r)
requestPath := r.URL.RequestURI()
if requestPath == "" {
requestPath = r.URL.Path
}
l.logger.Debug(
"Request recieved",
slog.String("method", r.Method),
slog.String("path", r.URL.Path),
slog.String("request", requestPath),
slog.String("remote", r.RemoteAddr),
slog.Duration("duration", time.Since(start)),
)

View File

@@ -0,0 +1,61 @@
package models
// StatusResponse represents a simple status-only JSON response.
type StatusResponse struct {
Status string `json:"status"`
}
// StatusMessageResponse represents a status + message JSON response.
type StatusMessageResponse struct {
Status string `json:"status"`
Message string `json:"message"`
}
// ErrorResponse represents a standard error JSON response.
type ErrorResponse struct {
Status string `json:"status"`
Message string `json:"message"`
}
// SnapshotMigrationStats mirrors the snapshot registry migration stats payload.
type SnapshotMigrationStats struct {
HourlyRenamed int `json:"HourlyRenamed"`
HourlyRegistered int `json:"HourlyRegistered"`
DailyRegistered int `json:"DailyRegistered"`
MonthlyRegistered int `json:"MonthlyRegistered"`
Errors int `json:"Errors"`
}
// SnapshotMigrationResponse captures snapshot registry migration results.
type SnapshotMigrationResponse struct {
Status string `json:"status"`
Error string `json:"error,omitempty"`
Stats SnapshotMigrationStats `json:"stats"`
}
// SnapshotRegenerateReportsResponse describes the hourly report regeneration response.
type SnapshotRegenerateReportsResponse struct {
Status string `json:"status"`
Total int `json:"total"`
Regenerated int `json:"regenerated"`
Skipped int `json:"skipped"`
Errors int `json:"errors"`
ReportsDir string `json:"reports_dir"`
SnapshotType string `json:"snapshotType"`
}
// SnapshotRepairResponse describes the daily snapshot repair response.
type SnapshotRepairResponse struct {
Status string `json:"status"`
Repaired string `json:"repaired"`
Failed string `json:"failed"`
}
// SnapshotRepairSuiteResponse describes the full repair suite response.
type SnapshotRepairSuiteResponse struct {
Status string `json:"status"`
DailyRepaired string `json:"daily_repaired"`
DailyFailed string `json:"daily_failed"`
MonthlyRefined string `json:"monthly_refined"`
MonthlyFailed string `json:"monthly_failed"`
}

View File

@@ -0,0 +1,33 @@
package models
// DailyCreationMissingByVcenter captures missing CreationTime counts per vCenter.
type DailyCreationMissingByVcenter struct {
Vcenter string `json:"vcenter"`
MissingCount int64 `json:"missing_count"`
}
// DailyCreationMissingSample is a sample daily summary row missing CreationTime.
type DailyCreationMissingSample struct {
Vcenter string `json:"vcenter"`
VmId string `json:"vm_id,omitempty"`
VmUuid string `json:"vm_uuid,omitempty"`
Name string `json:"name,omitempty"`
SamplesPresent int64 `json:"samples_present"`
AvgIsPresent float64 `json:"avg_is_present"`
SnapshotTime int64 `json:"snapshot_time"`
}
// DailyCreationDiagnosticsResponse describes missing CreationTime diagnostics for a daily summary table.
type DailyCreationDiagnosticsResponse struct {
Status string `json:"status"`
Date string `json:"date"`
Table string `json:"table"`
TotalRows int64 `json:"total_rows"`
MissingCreationCount int64 `json:"missing_creation_count"`
MissingCreationPct float64 `json:"missing_creation_pct"`
AvgIsPresentLtOneCount int64 `json:"avg_is_present_lt_one_count"`
MissingCreationPartialCount int64 `json:"missing_creation_partial_count"`
MissingByVcenter []DailyCreationMissingByVcenter `json:"missing_by_vcenter"`
Samples []DailyCreationMissingSample `json:"samples"`
MissingCreationPartialSamples []DailyCreationMissingSample `json:"missing_creation_partial_samples"`
}

View File

@@ -43,14 +43,15 @@ const docTemplate = `{
},
"/api/cleanup/updates": {
"delete": {
"description": "Removes update records that are no longer associated with a VM.",
"description": "Deprecated: Removes update records that are no longer associated with a VM.",
"produces": [
"text/plain"
],
"tags": [
"maintenance"
],
"summary": "Cleanup updates",
"summary": "Cleanup updates (deprecated)",
"deprecated": true,
"responses": {
"200": {
"description": "Cleanup completed",
@@ -69,14 +70,15 @@ const docTemplate = `{
},
"/api/cleanup/vcenter": {
"delete": {
"description": "Removes all inventory entries associated with a vCenter URL.",
"description": "Deprecated: Removes all inventory entries associated with a vCenter URL.",
"produces": [
"application/json"
],
"tags": [
"maintenance"
],
"summary": "Cleanup vCenter inventory",
"summary": "Cleanup vCenter inventory (deprecated)",
"deprecated": true,
"parameters": [
{
"type": "string",
@@ -90,19 +92,60 @@ const docTemplate = `{
"200": {
"description": "Cleanup completed",
"schema": {
"type": "object",
"additionalProperties": {
"type": "string"
}
"$ref": "#/definitions/models.StatusMessageResponse"
}
},
"400": {
"description": "Invalid request",
"schema": {
"type": "object",
"additionalProperties": {
"type": "string"
}
"$ref": "#/definitions/models.ErrorResponse"
}
}
}
}
},
"/api/diagnostics/daily-creation": {
"get": {
"description": "Returns counts of daily summary rows missing CreationTime and sample rows for the given date.",
"produces": [
"application/json"
],
"tags": [
"diagnostics"
],
"summary": "Daily summary CreationTime diagnostics",
"parameters": [
{
"type": "string",
"description": "Daily date (YYYY-MM-DD)",
"name": "date",
"in": "query",
"required": true
}
],
"responses": {
"200": {
"description": "Diagnostics result",
"schema": {
"$ref": "#/definitions/models.DailyCreationDiagnosticsResponse"
}
},
"400": {
"description": "Invalid request",
"schema": {
"$ref": "#/definitions/models.ErrorResponse"
}
},
"404": {
"description": "Summary not found",
"schema": {
"$ref": "#/definitions/models.ErrorResponse"
}
},
"500": {
"description": "Server error",
"schema": {
"$ref": "#/definitions/models.ErrorResponse"
}
}
}
@@ -139,19 +182,13 @@ const docTemplate = `{
"200": {
"description": "Ciphertext response",
"schema": {
"type": "object",
"additionalProperties": {
"type": "string"
}
"$ref": "#/definitions/models.StatusMessageResponse"
}
},
"500": {
"description": "Server error",
"schema": {
"type": "object",
"additionalProperties": {
"type": "string"
}
"$ref": "#/definitions/models.ErrorResponse"
}
}
}
@@ -159,7 +196,7 @@ const docTemplate = `{
},
"/api/event/vm/create": {
"post": {
"description": "Parses a VM create CloudEvent and stores the event data.",
"description": "Deprecated: Parses a VM create CloudEvent and stores the event data.",
"consumes": [
"application/json"
],
@@ -169,7 +206,8 @@ const docTemplate = `{
"tags": [
"events"
],
"summary": "Record VM create event",
"summary": "Record VM create event (deprecated)",
"deprecated": true,
"parameters": [
{
"description": "CloudEvent payload",
@@ -205,7 +243,7 @@ const docTemplate = `{
},
"/api/event/vm/delete": {
"post": {
"description": "Parses a VM delete CloudEvent and marks the VM as deleted in inventory.",
"description": "Deprecated: Parses a VM delete CloudEvent and marks the VM as deleted in inventory.",
"consumes": [
"application/json"
],
@@ -215,7 +253,8 @@ const docTemplate = `{
"tags": [
"events"
],
"summary": "Record VM delete event",
"summary": "Record VM delete event (deprecated)",
"deprecated": true,
"parameters": [
{
"description": "CloudEvent payload",
@@ -251,7 +290,7 @@ const docTemplate = `{
},
"/api/event/vm/modify": {
"post": {
"description": "Parses a VM modify CloudEvent and creates an update record when relevant changes are detected.",
"description": "Deprecated: Parses a VM modify CloudEvent and creates an update record when relevant changes are detected.",
"consumes": [
"application/json"
],
@@ -261,7 +300,8 @@ const docTemplate = `{
"tags": [
"events"
],
"summary": "Record VM modify event",
"summary": "Record VM modify event (deprecated)",
"deprecated": true,
"parameters": [
{
"description": "CloudEvent payload",
@@ -277,28 +317,19 @@ const docTemplate = `{
"200": {
"description": "Modify event processed",
"schema": {
"type": "object",
"additionalProperties": {
"type": "string"
}
"$ref": "#/definitions/models.StatusMessageResponse"
}
},
"202": {
"description": "No relevant changes",
"schema": {
"type": "object",
"additionalProperties": {
"type": "string"
}
"$ref": "#/definitions/models.StatusMessageResponse"
}
},
"500": {
"description": "Server error",
"schema": {
"type": "object",
"additionalProperties": {
"type": "string"
}
"$ref": "#/definitions/models.ErrorResponse"
}
}
}
@@ -306,7 +337,7 @@ const docTemplate = `{
},
"/api/event/vm/move": {
"post": {
"description": "Parses a VM move CloudEvent and creates an update record.",
"description": "Deprecated: Parses a VM move CloudEvent and creates an update record.",
"consumes": [
"application/json"
],
@@ -316,7 +347,8 @@ const docTemplate = `{
"tags": [
"events"
],
"summary": "Record VM move event",
"summary": "Record VM move event (deprecated)",
"deprecated": true,
"parameters": [
{
"description": "CloudEvent payload",
@@ -332,28 +364,19 @@ const docTemplate = `{
"200": {
"description": "Move event processed",
"schema": {
"type": "object",
"additionalProperties": {
"type": "string"
}
"$ref": "#/definitions/models.StatusMessageResponse"
}
},
"400": {
"description": "Invalid request",
"schema": {
"type": "object",
"additionalProperties": {
"type": "string"
}
"$ref": "#/definitions/models.ErrorResponse"
}
},
"500": {
"description": "Server error",
"schema": {
"type": "object",
"additionalProperties": {
"type": "string"
}
"$ref": "#/definitions/models.ErrorResponse"
}
}
}
@@ -387,19 +410,13 @@ const docTemplate = `{
"200": {
"description": "Import processed",
"schema": {
"type": "object",
"additionalProperties": {
"type": "string"
}
"$ref": "#/definitions/models.StatusMessageResponse"
}
},
"500": {
"description": "Server error",
"schema": {
"type": "object",
"additionalProperties": {
"type": "string"
}
"$ref": "#/definitions/models.ErrorResponse"
}
}
}
@@ -435,19 +452,13 @@ const docTemplate = `{
"200": {
"description": "Cleanup completed",
"schema": {
"type": "object",
"additionalProperties": {
"type": "string"
}
"$ref": "#/definitions/models.StatusMessageResponse"
}
},
"400": {
"description": "Invalid request",
"schema": {
"type": "object",
"additionalProperties": {
"type": "string"
}
"$ref": "#/definitions/models.ErrorResponse"
}
}
}
@@ -499,10 +510,7 @@ const docTemplate = `{
"500": {
"description": "Report generation failed",
"schema": {
"type": "object",
"additionalProperties": {
"type": "string"
}
"$ref": "#/definitions/models.ErrorResponse"
}
}
}
@@ -537,19 +545,13 @@ const docTemplate = `{
"400": {
"description": "Invalid request",
"schema": {
"type": "object",
"additionalProperties": {
"type": "string"
}
"$ref": "#/definitions/models.ErrorResponse"
}
},
"500": {
"description": "Server error",
"schema": {
"type": "object",
"additionalProperties": {
"type": "string"
}
"$ref": "#/definitions/models.ErrorResponse"
}
}
}
@@ -575,15 +577,219 @@ const docTemplate = `{
"500": {
"description": "Report generation failed",
"schema": {
"type": "object",
"additionalProperties": {
"type": "string"
}
"$ref": "#/definitions/models.ErrorResponse"
}
}
}
}
},
"/api/snapshots/aggregate": {
"post": {
"description": "Forces regeneration of a daily or monthly summary table for a specified date or month.",
"produces": [
"application/json"
],
"tags": [
"snapshots"
],
"summary": "Force snapshot aggregation",
"parameters": [
{
"type": "string",
"description": "Aggregation type: daily or monthly",
"name": "type",
"in": "query",
"required": true
},
{
"type": "string",
"description": "Daily date (YYYY-MM-DD) or monthly date (YYYY-MM)",
"name": "date",
"in": "query",
"required": true
},
{
"type": "string",
"description": "Monthly aggregation granularity: hourly or daily",
"name": "granularity",
"in": "query"
}
],
"responses": {
"200": {
"description": "Aggregation complete",
"schema": {
"$ref": "#/definitions/models.StatusResponse"
}
},
"400": {
"description": "Invalid request",
"schema": {
"$ref": "#/definitions/models.ErrorResponse"
}
},
"500": {
"description": "Server error",
"schema": {
"$ref": "#/definitions/models.ErrorResponse"
}
}
}
}
},
"/api/snapshots/hourly/force": {
"post": {
"description": "Manually trigger an hourly snapshot for all configured vCenters. Requires confirmation text to avoid accidental execution.",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"snapshots"
],
"summary": "Trigger hourly snapshot (manual)",
"parameters": [
{
"type": "string",
"description": "Confirmation text; must be 'FORCE'",
"name": "confirm",
"in": "query",
"required": true
}
],
"responses": {
"200": {
"description": "Snapshot started",
"schema": {
"$ref": "#/definitions/models.StatusResponse"
}
},
"400": {
"description": "Invalid request",
"schema": {
"$ref": "#/definitions/models.ErrorResponse"
}
},
"500": {
"description": "Server error",
"schema": {
"$ref": "#/definitions/models.ErrorResponse"
}
}
}
}
},
"/api/snapshots/migrate": {
"post": {
"description": "Rebuilds the snapshot registry from existing tables and renames hourly tables to epoch-based names.",
"produces": [
"application/json"
],
"tags": [
"snapshots"
],
"summary": "Migrate snapshot registry",
"responses": {
"200": {
"description": "Migration results",
"schema": {
"$ref": "#/definitions/models.SnapshotMigrationResponse"
}
},
"500": {
"description": "Server error",
"schema": {
"$ref": "#/definitions/models.SnapshotMigrationResponse"
}
}
}
}
},
"/api/snapshots/regenerate-hourly-reports": {
"post": {
"description": "Regenerates XLSX reports for hourly snapshots when the report files are missing or empty.",
"produces": [
"application/json"
],
"tags": [
"snapshots"
],
"summary": "Regenerate hourly snapshot reports",
"responses": {
"200": {
"description": "Regeneration summary",
"schema": {
"$ref": "#/definitions/models.SnapshotRegenerateReportsResponse"
}
},
"500": {
"description": "Server error",
"schema": {
"$ref": "#/definitions/models.ErrorResponse"
}
}
}
}
},
"/api/snapshots/repair": {
"post": {
"description": "Backfills SnapshotTime and lifecycle info for existing daily summary tables and reruns monthly lifecycle refinement using hourly data.",
"produces": [
"application/json"
],
"tags": [
"snapshots"
],
"summary": "Repair daily summaries",
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/models.SnapshotRepairResponse"
}
}
}
}
},
"/api/snapshots/repair/all": {
"post": {
"description": "Rebuilds snapshot registry, backfills per-vCenter totals, repairs daily summaries (SnapshotTime/lifecycle), and refines monthly lifecycle.",
"produces": [
"application/json"
],
"tags": [
"snapshots"
],
"summary": "Run full snapshot repair suite",
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/models.SnapshotRepairSuiteResponse"
}
}
}
}
},
"/metrics": {
"get": {
"description": "Exposes Prometheus metrics for vctp.",
"produces": [
"text/plain"
],
"tags": [
"metrics"
],
"summary": "Prometheus metrics",
"responses": {
"200": {
"description": "Prometheus metrics"
}
}
}
},
"/snapshots/daily": {
"get": {
"description": "Lists daily summary snapshot tables.",
@@ -661,6 +867,119 @@ const docTemplate = `{
}
}
}
},
"/vcenters": {
"get": {
"description": "Lists all vCenters with recorded snapshot totals.",
"produces": [
"text/html"
],
"tags": [
"vcenters"
],
"summary": "List vCenters",
"responses": {
"200": {
"description": "HTML page",
"schema": {
"type": "string"
}
}
}
}
},
"/vcenters/totals": {
"get": {
"description": "Shows per-snapshot totals for a vCenter.",
"produces": [
"text/html"
],
"tags": [
"vcenters"
],
"summary": "vCenter totals",
"parameters": [
{
"type": "string",
"description": "vCenter URL",
"name": "vcenter",
"in": "query",
"required": true
},
{
"type": "string",
"description": "hourly|daily|monthly (default: hourly)",
"name": "type",
"in": "query"
},
{
"type": "integer",
"description": "Limit results (default 200)",
"name": "limit",
"in": "query"
}
],
"responses": {
"200": {
"description": "HTML page",
"schema": {
"type": "string"
}
},
"400": {
"description": "Missing vcenter",
"schema": {
"type": "string"
}
}
}
}
},
"/vm/trace": {
"get": {
"description": "Shows VM resource history across snapshots, with chart and table.",
"produces": [
"text/html"
],
"tags": [
"vm"
],
"summary": "Trace VM history",
"parameters": [
{
"type": "string",
"description": "VM ID",
"name": "vm_id",
"in": "query"
},
{
"type": "string",
"description": "VM UUID",
"name": "vm_uuid",
"in": "query"
},
{
"type": "string",
"description": "VM name",
"name": "name",
"in": "query"
}
],
"responses": {
"200": {
"description": "HTML page",
"schema": {
"type": "string"
}
},
"400": {
"description": "Missing identifier",
"schema": {
"type": "string"
}
}
}
}
}
},
"definitions": {
@@ -862,6 +1181,101 @@ const docTemplate = `{
}
}
},
"models.DailyCreationDiagnosticsResponse": {
"type": "object",
"properties": {
"avg_is_present_lt_one_count": {
"type": "integer"
},
"date": {
"type": "string"
},
"missing_by_vcenter": {
"type": "array",
"items": {
"$ref": "#/definitions/models.DailyCreationMissingByVcenter"
}
},
"missing_creation_count": {
"type": "integer"
},
"missing_creation_partial_count": {
"type": "integer"
},
"missing_creation_partial_samples": {
"type": "array",
"items": {
"$ref": "#/definitions/models.DailyCreationMissingSample"
}
},
"missing_creation_pct": {
"type": "number"
},
"samples": {
"type": "array",
"items": {
"$ref": "#/definitions/models.DailyCreationMissingSample"
}
},
"status": {
"type": "string"
},
"table": {
"type": "string"
},
"total_rows": {
"type": "integer"
}
}
},
"models.DailyCreationMissingByVcenter": {
"type": "object",
"properties": {
"missing_count": {
"type": "integer"
},
"vcenter": {
"type": "string"
}
}
},
"models.DailyCreationMissingSample": {
"type": "object",
"properties": {
"avg_is_present": {
"type": "number"
},
"name": {
"type": "string"
},
"samples_present": {
"type": "integer"
},
"snapshot_time": {
"type": "integer"
},
"vcenter": {
"type": "string"
},
"vm_id": {
"type": "string"
},
"vm_uuid": {
"type": "string"
}
}
},
"models.ErrorResponse": {
"type": "object",
"properties": {
"message": {
"type": "string"
},
"status": {
"type": "string"
}
}
},
"models.ImportReceived": {
"type": "object",
"properties": {
@@ -902,6 +1316,119 @@ const docTemplate = `{
"type": "string"
}
}
},
"models.SnapshotMigrationResponse": {
"type": "object",
"properties": {
"error": {
"type": "string"
},
"stats": {
"$ref": "#/definitions/models.SnapshotMigrationStats"
},
"status": {
"type": "string"
}
}
},
"models.SnapshotMigrationStats": {
"type": "object",
"properties": {
"DailyRegistered": {
"type": "integer"
},
"Errors": {
"type": "integer"
},
"HourlyRegistered": {
"type": "integer"
},
"HourlyRenamed": {
"type": "integer"
},
"MonthlyRegistered": {
"type": "integer"
}
}
},
"models.SnapshotRegenerateReportsResponse": {
"type": "object",
"properties": {
"errors": {
"type": "integer"
},
"regenerated": {
"type": "integer"
},
"reports_dir": {
"type": "string"
},
"skipped": {
"type": "integer"
},
"snapshotType": {
"type": "string"
},
"status": {
"type": "string"
},
"total": {
"type": "integer"
}
}
},
"models.SnapshotRepairResponse": {
"type": "object",
"properties": {
"failed": {
"type": "string"
},
"repaired": {
"type": "string"
},
"status": {
"type": "string"
}
}
},
"models.SnapshotRepairSuiteResponse": {
"type": "object",
"properties": {
"daily_failed": {
"type": "string"
},
"daily_repaired": {
"type": "string"
},
"monthly_failed": {
"type": "string"
},
"monthly_refined": {
"type": "string"
},
"status": {
"type": "string"
}
}
},
"models.StatusMessageResponse": {
"type": "object",
"properties": {
"message": {
"type": "string"
},
"status": {
"type": "string"
}
}
},
"models.StatusResponse": {
"type": "object",
"properties": {
"status": {
"type": "string"
}
}
}
}
}`

View File

@@ -32,14 +32,15 @@
},
"/api/cleanup/updates": {
"delete": {
"description": "Removes update records that are no longer associated with a VM.",
"description": "Deprecated: Removes update records that are no longer associated with a VM.",
"produces": [
"text/plain"
],
"tags": [
"maintenance"
],
"summary": "Cleanup updates",
"summary": "Cleanup updates (deprecated)",
"deprecated": true,
"responses": {
"200": {
"description": "Cleanup completed",
@@ -58,14 +59,15 @@
},
"/api/cleanup/vcenter": {
"delete": {
"description": "Removes all inventory entries associated with a vCenter URL.",
"description": "Deprecated: Removes all inventory entries associated with a vCenter URL.",
"produces": [
"application/json"
],
"tags": [
"maintenance"
],
"summary": "Cleanup vCenter inventory",
"summary": "Cleanup vCenter inventory (deprecated)",
"deprecated": true,
"parameters": [
{
"type": "string",
@@ -79,19 +81,60 @@
"200": {
"description": "Cleanup completed",
"schema": {
"type": "object",
"additionalProperties": {
"type": "string"
}
"$ref": "#/definitions/models.StatusMessageResponse"
}
},
"400": {
"description": "Invalid request",
"schema": {
"type": "object",
"additionalProperties": {
"type": "string"
}
"$ref": "#/definitions/models.ErrorResponse"
}
}
}
}
},
"/api/diagnostics/daily-creation": {
"get": {
"description": "Returns counts of daily summary rows missing CreationTime and sample rows for the given date.",
"produces": [
"application/json"
],
"tags": [
"diagnostics"
],
"summary": "Daily summary CreationTime diagnostics",
"parameters": [
{
"type": "string",
"description": "Daily date (YYYY-MM-DD)",
"name": "date",
"in": "query",
"required": true
}
],
"responses": {
"200": {
"description": "Diagnostics result",
"schema": {
"$ref": "#/definitions/models.DailyCreationDiagnosticsResponse"
}
},
"400": {
"description": "Invalid request",
"schema": {
"$ref": "#/definitions/models.ErrorResponse"
}
},
"404": {
"description": "Summary not found",
"schema": {
"$ref": "#/definitions/models.ErrorResponse"
}
},
"500": {
"description": "Server error",
"schema": {
"$ref": "#/definitions/models.ErrorResponse"
}
}
}
@@ -128,19 +171,13 @@
"200": {
"description": "Ciphertext response",
"schema": {
"type": "object",
"additionalProperties": {
"type": "string"
}
"$ref": "#/definitions/models.StatusMessageResponse"
}
},
"500": {
"description": "Server error",
"schema": {
"type": "object",
"additionalProperties": {
"type": "string"
}
"$ref": "#/definitions/models.ErrorResponse"
}
}
}
@@ -148,7 +185,7 @@
},
"/api/event/vm/create": {
"post": {
"description": "Parses a VM create CloudEvent and stores the event data.",
"description": "Deprecated: Parses a VM create CloudEvent and stores the event data.",
"consumes": [
"application/json"
],
@@ -158,7 +195,8 @@
"tags": [
"events"
],
"summary": "Record VM create event",
"summary": "Record VM create event (deprecated)",
"deprecated": true,
"parameters": [
{
"description": "CloudEvent payload",
@@ -194,7 +232,7 @@
},
"/api/event/vm/delete": {
"post": {
"description": "Parses a VM delete CloudEvent and marks the VM as deleted in inventory.",
"description": "Deprecated: Parses a VM delete CloudEvent and marks the VM as deleted in inventory.",
"consumes": [
"application/json"
],
@@ -204,7 +242,8 @@
"tags": [
"events"
],
"summary": "Record VM delete event",
"summary": "Record VM delete event (deprecated)",
"deprecated": true,
"parameters": [
{
"description": "CloudEvent payload",
@@ -240,7 +279,7 @@
},
"/api/event/vm/modify": {
"post": {
"description": "Parses a VM modify CloudEvent and creates an update record when relevant changes are detected.",
"description": "Deprecated: Parses a VM modify CloudEvent and creates an update record when relevant changes are detected.",
"consumes": [
"application/json"
],
@@ -250,7 +289,8 @@
"tags": [
"events"
],
"summary": "Record VM modify event",
"summary": "Record VM modify event (deprecated)",
"deprecated": true,
"parameters": [
{
"description": "CloudEvent payload",
@@ -266,28 +306,19 @@
"200": {
"description": "Modify event processed",
"schema": {
"type": "object",
"additionalProperties": {
"type": "string"
}
"$ref": "#/definitions/models.StatusMessageResponse"
}
},
"202": {
"description": "No relevant changes",
"schema": {
"type": "object",
"additionalProperties": {
"type": "string"
}
"$ref": "#/definitions/models.StatusMessageResponse"
}
},
"500": {
"description": "Server error",
"schema": {
"type": "object",
"additionalProperties": {
"type": "string"
}
"$ref": "#/definitions/models.ErrorResponse"
}
}
}
@@ -295,7 +326,7 @@
},
"/api/event/vm/move": {
"post": {
"description": "Parses a VM move CloudEvent and creates an update record.",
"description": "Deprecated: Parses a VM move CloudEvent and creates an update record.",
"consumes": [
"application/json"
],
@@ -305,7 +336,8 @@
"tags": [
"events"
],
"summary": "Record VM move event",
"summary": "Record VM move event (deprecated)",
"deprecated": true,
"parameters": [
{
"description": "CloudEvent payload",
@@ -321,28 +353,19 @@
"200": {
"description": "Move event processed",
"schema": {
"type": "object",
"additionalProperties": {
"type": "string"
}
"$ref": "#/definitions/models.StatusMessageResponse"
}
},
"400": {
"description": "Invalid request",
"schema": {
"type": "object",
"additionalProperties": {
"type": "string"
}
"$ref": "#/definitions/models.ErrorResponse"
}
},
"500": {
"description": "Server error",
"schema": {
"type": "object",
"additionalProperties": {
"type": "string"
}
"$ref": "#/definitions/models.ErrorResponse"
}
}
}
@@ -376,19 +399,13 @@
"200": {
"description": "Import processed",
"schema": {
"type": "object",
"additionalProperties": {
"type": "string"
}
"$ref": "#/definitions/models.StatusMessageResponse"
}
},
"500": {
"description": "Server error",
"schema": {
"type": "object",
"additionalProperties": {
"type": "string"
}
"$ref": "#/definitions/models.ErrorResponse"
}
}
}
@@ -424,19 +441,13 @@
"200": {
"description": "Cleanup completed",
"schema": {
"type": "object",
"additionalProperties": {
"type": "string"
}
"$ref": "#/definitions/models.StatusMessageResponse"
}
},
"400": {
"description": "Invalid request",
"schema": {
"type": "object",
"additionalProperties": {
"type": "string"
}
"$ref": "#/definitions/models.ErrorResponse"
}
}
}
@@ -488,10 +499,7 @@
"500": {
"description": "Report generation failed",
"schema": {
"type": "object",
"additionalProperties": {
"type": "string"
}
"$ref": "#/definitions/models.ErrorResponse"
}
}
}
@@ -526,19 +534,13 @@
"400": {
"description": "Invalid request",
"schema": {
"type": "object",
"additionalProperties": {
"type": "string"
}
"$ref": "#/definitions/models.ErrorResponse"
}
},
"500": {
"description": "Server error",
"schema": {
"type": "object",
"additionalProperties": {
"type": "string"
}
"$ref": "#/definitions/models.ErrorResponse"
}
}
}
@@ -564,15 +566,219 @@
"500": {
"description": "Report generation failed",
"schema": {
"type": "object",
"additionalProperties": {
"type": "string"
}
"$ref": "#/definitions/models.ErrorResponse"
}
}
}
}
},
"/api/snapshots/aggregate": {
"post": {
"description": "Forces regeneration of a daily or monthly summary table for a specified date or month.",
"produces": [
"application/json"
],
"tags": [
"snapshots"
],
"summary": "Force snapshot aggregation",
"parameters": [
{
"type": "string",
"description": "Aggregation type: daily or monthly",
"name": "type",
"in": "query",
"required": true
},
{
"type": "string",
"description": "Daily date (YYYY-MM-DD) or monthly date (YYYY-MM)",
"name": "date",
"in": "query",
"required": true
},
{
"type": "string",
"description": "Monthly aggregation granularity: hourly or daily",
"name": "granularity",
"in": "query"
}
],
"responses": {
"200": {
"description": "Aggregation complete",
"schema": {
"$ref": "#/definitions/models.StatusResponse"
}
},
"400": {
"description": "Invalid request",
"schema": {
"$ref": "#/definitions/models.ErrorResponse"
}
},
"500": {
"description": "Server error",
"schema": {
"$ref": "#/definitions/models.ErrorResponse"
}
}
}
}
},
"/api/snapshots/hourly/force": {
"post": {
"description": "Manually trigger an hourly snapshot for all configured vCenters. Requires confirmation text to avoid accidental execution.",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"snapshots"
],
"summary": "Trigger hourly snapshot (manual)",
"parameters": [
{
"type": "string",
"description": "Confirmation text; must be 'FORCE'",
"name": "confirm",
"in": "query",
"required": true
}
],
"responses": {
"200": {
"description": "Snapshot started",
"schema": {
"$ref": "#/definitions/models.StatusResponse"
}
},
"400": {
"description": "Invalid request",
"schema": {
"$ref": "#/definitions/models.ErrorResponse"
}
},
"500": {
"description": "Server error",
"schema": {
"$ref": "#/definitions/models.ErrorResponse"
}
}
}
}
},
"/api/snapshots/migrate": {
"post": {
"description": "Rebuilds the snapshot registry from existing tables and renames hourly tables to epoch-based names.",
"produces": [
"application/json"
],
"tags": [
"snapshots"
],
"summary": "Migrate snapshot registry",
"responses": {
"200": {
"description": "Migration results",
"schema": {
"$ref": "#/definitions/models.SnapshotMigrationResponse"
}
},
"500": {
"description": "Server error",
"schema": {
"$ref": "#/definitions/models.SnapshotMigrationResponse"
}
}
}
}
},
"/api/snapshots/regenerate-hourly-reports": {
"post": {
"description": "Regenerates XLSX reports for hourly snapshots when the report files are missing or empty.",
"produces": [
"application/json"
],
"tags": [
"snapshots"
],
"summary": "Regenerate hourly snapshot reports",
"responses": {
"200": {
"description": "Regeneration summary",
"schema": {
"$ref": "#/definitions/models.SnapshotRegenerateReportsResponse"
}
},
"500": {
"description": "Server error",
"schema": {
"$ref": "#/definitions/models.ErrorResponse"
}
}
}
}
},
"/api/snapshots/repair": {
"post": {
"description": "Backfills SnapshotTime and lifecycle info for existing daily summary tables and reruns monthly lifecycle refinement using hourly data.",
"produces": [
"application/json"
],
"tags": [
"snapshots"
],
"summary": "Repair daily summaries",
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/models.SnapshotRepairResponse"
}
}
}
}
},
"/api/snapshots/repair/all": {
"post": {
"description": "Rebuilds snapshot registry, backfills per-vCenter totals, repairs daily summaries (SnapshotTime/lifecycle), and refines monthly lifecycle.",
"produces": [
"application/json"
],
"tags": [
"snapshots"
],
"summary": "Run full snapshot repair suite",
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/models.SnapshotRepairSuiteResponse"
}
}
}
}
},
"/metrics": {
"get": {
"description": "Exposes Prometheus metrics for vctp.",
"produces": [
"text/plain"
],
"tags": [
"metrics"
],
"summary": "Prometheus metrics",
"responses": {
"200": {
"description": "Prometheus metrics"
}
}
}
},
"/snapshots/daily": {
"get": {
"description": "Lists daily summary snapshot tables.",
@@ -650,6 +856,119 @@
}
}
}
},
"/vcenters": {
"get": {
"description": "Lists all vCenters with recorded snapshot totals.",
"produces": [
"text/html"
],
"tags": [
"vcenters"
],
"summary": "List vCenters",
"responses": {
"200": {
"description": "HTML page",
"schema": {
"type": "string"
}
}
}
}
},
"/vcenters/totals": {
"get": {
"description": "Shows per-snapshot totals for a vCenter.",
"produces": [
"text/html"
],
"tags": [
"vcenters"
],
"summary": "vCenter totals",
"parameters": [
{
"type": "string",
"description": "vCenter URL",
"name": "vcenter",
"in": "query",
"required": true
},
{
"type": "string",
"description": "hourly|daily|monthly (default: hourly)",
"name": "type",
"in": "query"
},
{
"type": "integer",
"description": "Limit results (default 200)",
"name": "limit",
"in": "query"
}
],
"responses": {
"200": {
"description": "HTML page",
"schema": {
"type": "string"
}
},
"400": {
"description": "Missing vcenter",
"schema": {
"type": "string"
}
}
}
}
},
"/vm/trace": {
"get": {
"description": "Shows VM resource history across snapshots, with chart and table.",
"produces": [
"text/html"
],
"tags": [
"vm"
],
"summary": "Trace VM history",
"parameters": [
{
"type": "string",
"description": "VM ID",
"name": "vm_id",
"in": "query"
},
{
"type": "string",
"description": "VM UUID",
"name": "vm_uuid",
"in": "query"
},
{
"type": "string",
"description": "VM name",
"name": "name",
"in": "query"
}
],
"responses": {
"200": {
"description": "HTML page",
"schema": {
"type": "string"
}
},
"400": {
"description": "Missing identifier",
"schema": {
"type": "string"
}
}
}
}
}
},
"definitions": {
@@ -851,6 +1170,101 @@
}
}
},
"models.DailyCreationDiagnosticsResponse": {
"type": "object",
"properties": {
"avg_is_present_lt_one_count": {
"type": "integer"
},
"date": {
"type": "string"
},
"missing_by_vcenter": {
"type": "array",
"items": {
"$ref": "#/definitions/models.DailyCreationMissingByVcenter"
}
},
"missing_creation_count": {
"type": "integer"
},
"missing_creation_partial_count": {
"type": "integer"
},
"missing_creation_partial_samples": {
"type": "array",
"items": {
"$ref": "#/definitions/models.DailyCreationMissingSample"
}
},
"missing_creation_pct": {
"type": "number"
},
"samples": {
"type": "array",
"items": {
"$ref": "#/definitions/models.DailyCreationMissingSample"
}
},
"status": {
"type": "string"
},
"table": {
"type": "string"
},
"total_rows": {
"type": "integer"
}
}
},
"models.DailyCreationMissingByVcenter": {
"type": "object",
"properties": {
"missing_count": {
"type": "integer"
},
"vcenter": {
"type": "string"
}
}
},
"models.DailyCreationMissingSample": {
"type": "object",
"properties": {
"avg_is_present": {
"type": "number"
},
"name": {
"type": "string"
},
"samples_present": {
"type": "integer"
},
"snapshot_time": {
"type": "integer"
},
"vcenter": {
"type": "string"
},
"vm_id": {
"type": "string"
},
"vm_uuid": {
"type": "string"
}
}
},
"models.ErrorResponse": {
"type": "object",
"properties": {
"message": {
"type": "string"
},
"status": {
"type": "string"
}
}
},
"models.ImportReceived": {
"type": "object",
"properties": {
@@ -891,6 +1305,119 @@
"type": "string"
}
}
},
"models.SnapshotMigrationResponse": {
"type": "object",
"properties": {
"error": {
"type": "string"
},
"stats": {
"$ref": "#/definitions/models.SnapshotMigrationStats"
},
"status": {
"type": "string"
}
}
},
"models.SnapshotMigrationStats": {
"type": "object",
"properties": {
"DailyRegistered": {
"type": "integer"
},
"Errors": {
"type": "integer"
},
"HourlyRegistered": {
"type": "integer"
},
"HourlyRenamed": {
"type": "integer"
},
"MonthlyRegistered": {
"type": "integer"
}
}
},
"models.SnapshotRegenerateReportsResponse": {
"type": "object",
"properties": {
"errors": {
"type": "integer"
},
"regenerated": {
"type": "integer"
},
"reports_dir": {
"type": "string"
},
"skipped": {
"type": "integer"
},
"snapshotType": {
"type": "string"
},
"status": {
"type": "string"
},
"total": {
"type": "integer"
}
}
},
"models.SnapshotRepairResponse": {
"type": "object",
"properties": {
"failed": {
"type": "string"
},
"repaired": {
"type": "string"
},
"status": {
"type": "string"
}
}
},
"models.SnapshotRepairSuiteResponse": {
"type": "object",
"properties": {
"daily_failed": {
"type": "string"
},
"daily_repaired": {
"type": "string"
},
"monthly_failed": {
"type": "string"
},
"monthly_refined": {
"type": "string"
},
"status": {
"type": "string"
}
}
},
"models.StatusMessageResponse": {
"type": "object",
"properties": {
"message": {
"type": "string"
},
"status": {
"type": "string"
}
}
},
"models.StatusResponse": {
"type": "object",
"properties": {
"status": {
"type": "string"
}
}
}
}
}

View File

@@ -126,6 +126,68 @@ definitions:
modified:
type: string
type: object
models.DailyCreationDiagnosticsResponse:
properties:
avg_is_present_lt_one_count:
type: integer
date:
type: string
missing_by_vcenter:
items:
$ref: '#/definitions/models.DailyCreationMissingByVcenter'
type: array
missing_creation_count:
type: integer
missing_creation_partial_count:
type: integer
missing_creation_partial_samples:
items:
$ref: '#/definitions/models.DailyCreationMissingSample'
type: array
missing_creation_pct:
type: number
samples:
items:
$ref: '#/definitions/models.DailyCreationMissingSample'
type: array
status:
type: string
table:
type: string
total_rows:
type: integer
type: object
models.DailyCreationMissingByVcenter:
properties:
missing_count:
type: integer
vcenter:
type: string
type: object
models.DailyCreationMissingSample:
properties:
avg_is_present:
type: number
name:
type: string
samples_present:
type: integer
snapshot_time:
type: integer
vcenter:
type: string
vm_id:
type: string
vm_uuid:
type: string
type: object
models.ErrorResponse:
properties:
message:
type: string
status:
type: string
type: object
models.ImportReceived:
properties:
Cluster:
@@ -153,6 +215,79 @@ definitions:
VmId:
type: string
type: object
models.SnapshotMigrationResponse:
properties:
error:
type: string
stats:
$ref: '#/definitions/models.SnapshotMigrationStats'
status:
type: string
type: object
models.SnapshotMigrationStats:
properties:
DailyRegistered:
type: integer
Errors:
type: integer
HourlyRegistered:
type: integer
HourlyRenamed:
type: integer
MonthlyRegistered:
type: integer
type: object
models.SnapshotRegenerateReportsResponse:
properties:
errors:
type: integer
regenerated:
type: integer
reports_dir:
type: string
skipped:
type: integer
snapshotType:
type: string
status:
type: string
total:
type: integer
type: object
models.SnapshotRepairResponse:
properties:
failed:
type: string
repaired:
type: string
status:
type: string
type: object
models.SnapshotRepairSuiteResponse:
properties:
daily_failed:
type: string
daily_repaired:
type: string
monthly_failed:
type: string
monthly_refined:
type: string
status:
type: string
type: object
models.StatusMessageResponse:
properties:
message:
type: string
status:
type: string
type: object
models.StatusResponse:
properties:
status:
type: string
type: object
info:
contact: {}
paths:
@@ -175,7 +310,9 @@ paths:
- ui
/api/cleanup/updates:
delete:
description: Removes update records that are no longer associated with a VM.
deprecated: true
description: 'Deprecated: Removes update records that are no longer associated
with a VM.'
produces:
- text/plain
responses:
@@ -187,12 +324,14 @@ paths:
description: Server error
schema:
type: string
summary: Cleanup updates
summary: Cleanup updates (deprecated)
tags:
- maintenance
/api/cleanup/vcenter:
delete:
description: Removes all inventory entries associated with a vCenter URL.
deprecated: true
description: 'Deprecated: Removes all inventory entries associated with a vCenter
URL.'
parameters:
- description: vCenter URL
in: query
@@ -205,18 +344,46 @@ paths:
"200":
description: Cleanup completed
schema:
additionalProperties:
type: string
type: object
$ref: '#/definitions/models.StatusMessageResponse'
"400":
description: Invalid request
schema:
additionalProperties:
type: string
type: object
summary: Cleanup vCenter inventory
$ref: '#/definitions/models.ErrorResponse'
summary: Cleanup vCenter inventory (deprecated)
tags:
- maintenance
/api/diagnostics/daily-creation:
get:
description: Returns counts of daily summary rows missing CreationTime and sample
rows for the given date.
parameters:
- description: Daily date (YYYY-MM-DD)
in: query
name: date
required: true
type: string
produces:
- application/json
responses:
"200":
description: Diagnostics result
schema:
$ref: '#/definitions/models.DailyCreationDiagnosticsResponse'
"400":
description: Invalid request
schema:
$ref: '#/definitions/models.ErrorResponse'
"404":
description: Summary not found
schema:
$ref: '#/definitions/models.ErrorResponse'
"500":
description: Server error
schema:
$ref: '#/definitions/models.ErrorResponse'
summary: Daily summary CreationTime diagnostics
tags:
- diagnostics
/api/encrypt:
post:
consumes:
@@ -237,15 +404,11 @@ paths:
"200":
description: Ciphertext response
schema:
additionalProperties:
type: string
type: object
$ref: '#/definitions/models.StatusMessageResponse'
"500":
description: Server error
schema:
additionalProperties:
type: string
type: object
$ref: '#/definitions/models.ErrorResponse'
summary: Encrypt data
tags:
- crypto
@@ -253,7 +416,9 @@ paths:
post:
consumes:
- application/json
description: Parses a VM create CloudEvent and stores the event data.
deprecated: true
description: 'Deprecated: Parses a VM create CloudEvent and stores the event
data.'
parameters:
- description: CloudEvent payload
in: body
@@ -276,14 +441,16 @@ paths:
description: Server error
schema:
type: string
summary: Record VM create event
summary: Record VM create event (deprecated)
tags:
- events
/api/event/vm/delete:
post:
consumes:
- application/json
description: Parses a VM delete CloudEvent and marks the VM as deleted in inventory.
deprecated: true
description: 'Deprecated: Parses a VM delete CloudEvent and marks the VM as
deleted in inventory.'
parameters:
- description: CloudEvent payload
in: body
@@ -306,15 +473,16 @@ paths:
description: Server error
schema:
type: string
summary: Record VM delete event
summary: Record VM delete event (deprecated)
tags:
- events
/api/event/vm/modify:
post:
consumes:
- application/json
description: Parses a VM modify CloudEvent and creates an update record when
relevant changes are detected.
deprecated: true
description: 'Deprecated: Parses a VM modify CloudEvent and creates an update
record when relevant changes are detected.'
parameters:
- description: CloudEvent payload
in: body
@@ -328,29 +496,25 @@ paths:
"200":
description: Modify event processed
schema:
additionalProperties:
type: string
type: object
$ref: '#/definitions/models.StatusMessageResponse'
"202":
description: No relevant changes
schema:
additionalProperties:
type: string
type: object
$ref: '#/definitions/models.StatusMessageResponse'
"500":
description: Server error
schema:
additionalProperties:
type: string
type: object
summary: Record VM modify event
$ref: '#/definitions/models.ErrorResponse'
summary: Record VM modify event (deprecated)
tags:
- events
/api/event/vm/move:
post:
consumes:
- application/json
description: Parses a VM move CloudEvent and creates an update record.
deprecated: true
description: 'Deprecated: Parses a VM move CloudEvent and creates an update
record.'
parameters:
- description: CloudEvent payload
in: body
@@ -364,22 +528,16 @@ paths:
"200":
description: Move event processed
schema:
additionalProperties:
type: string
type: object
$ref: '#/definitions/models.StatusMessageResponse'
"400":
description: Invalid request
schema:
additionalProperties:
type: string
type: object
$ref: '#/definitions/models.ErrorResponse'
"500":
description: Server error
schema:
additionalProperties:
type: string
type: object
summary: Record VM move event
$ref: '#/definitions/models.ErrorResponse'
summary: Record VM move event (deprecated)
tags:
- events
/api/import/vm:
@@ -400,15 +558,11 @@ paths:
"200":
description: Import processed
schema:
additionalProperties:
type: string
type: object
$ref: '#/definitions/models.StatusMessageResponse'
"500":
description: Server error
schema:
additionalProperties:
type: string
type: object
$ref: '#/definitions/models.ErrorResponse'
summary: Import VMs
tags:
- inventory
@@ -432,15 +586,11 @@ paths:
"200":
description: Cleanup completed
schema:
additionalProperties:
type: string
type: object
$ref: '#/definitions/models.StatusMessageResponse'
"400":
description: Invalid request
schema:
additionalProperties:
type: string
type: object
$ref: '#/definitions/models.ErrorResponse'
summary: Cleanup VM inventory entry
tags:
- inventory
@@ -474,9 +624,7 @@ paths:
"500":
description: Report generation failed
schema:
additionalProperties:
type: string
type: object
$ref: '#/definitions/models.ErrorResponse'
summary: Download inventory report
tags:
- reports
@@ -499,15 +647,11 @@ paths:
"400":
description: Invalid request
schema:
additionalProperties:
type: string
type: object
$ref: '#/definitions/models.ErrorResponse'
"500":
description: Server error
schema:
additionalProperties:
type: string
type: object
$ref: '#/definitions/models.ErrorResponse'
summary: Download snapshot report
tags:
- snapshots
@@ -524,12 +668,152 @@ paths:
"500":
description: Report generation failed
schema:
additionalProperties:
type: string
type: object
$ref: '#/definitions/models.ErrorResponse'
summary: Download updates report
tags:
- reports
/api/snapshots/aggregate:
post:
description: Forces regeneration of a daily or monthly summary table for a specified
date or month.
parameters:
- description: 'Aggregation type: daily or monthly'
in: query
name: type
required: true
type: string
- description: Daily date (YYYY-MM-DD) or monthly date (YYYY-MM)
in: query
name: date
required: true
type: string
- description: 'Monthly aggregation granularity: hourly or daily'
in: query
name: granularity
type: string
produces:
- application/json
responses:
"200":
description: Aggregation complete
schema:
$ref: '#/definitions/models.StatusResponse'
"400":
description: Invalid request
schema:
$ref: '#/definitions/models.ErrorResponse'
"500":
description: Server error
schema:
$ref: '#/definitions/models.ErrorResponse'
summary: Force snapshot aggregation
tags:
- snapshots
/api/snapshots/hourly/force:
post:
consumes:
- application/json
description: Manually trigger an hourly snapshot for all configured vCenters.
Requires confirmation text to avoid accidental execution.
parameters:
- description: Confirmation text; must be 'FORCE'
in: query
name: confirm
required: true
type: string
produces:
- application/json
responses:
"200":
description: Snapshot started
schema:
$ref: '#/definitions/models.StatusResponse'
"400":
description: Invalid request
schema:
$ref: '#/definitions/models.ErrorResponse'
"500":
description: Server error
schema:
$ref: '#/definitions/models.ErrorResponse'
summary: Trigger hourly snapshot (manual)
tags:
- snapshots
/api/snapshots/migrate:
post:
description: Rebuilds the snapshot registry from existing tables and renames
hourly tables to epoch-based names.
produces:
- application/json
responses:
"200":
description: Migration results
schema:
$ref: '#/definitions/models.SnapshotMigrationResponse'
"500":
description: Server error
schema:
$ref: '#/definitions/models.SnapshotMigrationResponse'
summary: Migrate snapshot registry
tags:
- snapshots
/api/snapshots/regenerate-hourly-reports:
post:
description: Regenerates XLSX reports for hourly snapshots when the report files
are missing or empty.
produces:
- application/json
responses:
"200":
description: Regeneration summary
schema:
$ref: '#/definitions/models.SnapshotRegenerateReportsResponse'
"500":
description: Server error
schema:
$ref: '#/definitions/models.ErrorResponse'
summary: Regenerate hourly snapshot reports
tags:
- snapshots
/api/snapshots/repair:
post:
description: Backfills SnapshotTime and lifecycle info for existing daily summary
tables and reruns monthly lifecycle refinement using hourly data.
produces:
- application/json
responses:
"200":
description: OK
schema:
$ref: '#/definitions/models.SnapshotRepairResponse'
summary: Repair daily summaries
tags:
- snapshots
/api/snapshots/repair/all:
post:
description: Rebuilds snapshot registry, backfills per-vCenter totals, repairs
daily summaries (SnapshotTime/lifecycle), and refines monthly lifecycle.
produces:
- application/json
responses:
"200":
description: OK
schema:
$ref: '#/definitions/models.SnapshotRepairSuiteResponse'
summary: Run full snapshot repair suite
tags:
- snapshots
/metrics:
get:
description: Exposes Prometheus metrics for vctp.
produces:
- text/plain
responses:
"200":
description: Prometheus metrics
summary: Prometheus metrics
tags:
- metrics
/snapshots/daily:
get:
description: Lists daily summary snapshot tables.
@@ -581,4 +865,78 @@ paths:
summary: List monthly snapshots
tags:
- snapshots
/vcenters:
get:
description: Lists all vCenters with recorded snapshot totals.
produces:
- text/html
responses:
"200":
description: HTML page
schema:
type: string
summary: List vCenters
tags:
- vcenters
/vcenters/totals:
get:
description: Shows per-snapshot totals for a vCenter.
parameters:
- description: vCenter URL
in: query
name: vcenter
required: true
type: string
- description: 'hourly|daily|monthly (default: hourly)'
in: query
name: type
type: string
- description: Limit results (default 200)
in: query
name: limit
type: integer
produces:
- text/html
responses:
"200":
description: HTML page
schema:
type: string
"400":
description: Missing vcenter
schema:
type: string
summary: vCenter totals
tags:
- vcenters
/vm/trace:
get:
description: Shows VM resource history across snapshots, with chart and table.
parameters:
- description: VM ID
in: query
name: vm_id
type: string
- description: VM UUID
in: query
name: vm_uuid
type: string
- description: VM name
in: query
name: name
type: string
produces:
- text/html
responses:
"200":
description: HTML page
schema:
type: string
"400":
description: Missing identifier
schema:
type: string
summary: Trace VM history
tags:
- vm
swagger: "2.0"

View File

@@ -5,6 +5,8 @@ import (
"log/slog"
"net/http"
"net/http/pprof"
"os"
"path/filepath"
"vctp/db"
"vctp/dist"
"vctp/internal/secrets"
@@ -28,7 +30,19 @@ func New(logger *slog.Logger, database db.Database, buildTime string, sha1ver st
mux := http.NewServeMux()
reportsDir := settings.Values.Settings.ReportsDir
if reportsDir == "" {
reportsDir = "/var/lib/vctp/reports"
}
if err := os.MkdirAll(reportsDir, 0o755); err != nil {
logger.Warn("failed to create reports directory", "error", err, "path", reportsDir)
}
mux.Handle("/assets/", middleware.CacheMiddleware(http.FileServer(http.FS(dist.AssetsDir))))
mux.Handle("/favicon.ico", middleware.CacheMiddleware(http.FileServer(http.FS(dist.AssetsDir))))
mux.Handle("/favicon-16x16.png", middleware.CacheMiddleware(http.FileServer(http.FS(dist.AssetsDir))))
mux.Handle("/favicon-32x32.png", middleware.CacheMiddleware(http.FileServer(http.FS(dist.AssetsDir))))
mux.Handle("/reports/", http.StripPrefix("/reports/", http.FileServer(http.Dir(filepath.Clean(reportsDir)))))
mux.HandleFunc("/", h.Home)
mux.HandleFunc("/api/event/vm/create", h.VmCreateEvent)
mux.HandleFunc("/api/event/vm/modify", h.VmModifyEvent)
@@ -48,6 +62,17 @@ func New(logger *slog.Logger, database db.Database, buildTime string, sha1ver st
mux.HandleFunc("/api/report/inventory", h.InventoryReportDownload)
mux.HandleFunc("/api/report/updates", h.UpdateReportDownload)
mux.HandleFunc("/api/report/snapshot", h.SnapshotReportDownload)
mux.HandleFunc("/api/snapshots/aggregate", h.SnapshotAggregateForce)
mux.HandleFunc("/api/snapshots/hourly/force", h.SnapshotForceHourly)
mux.HandleFunc("/api/snapshots/migrate", h.SnapshotMigrate)
mux.HandleFunc("/api/snapshots/repair", h.SnapshotRepair)
mux.HandleFunc("/api/snapshots/repair/all", h.SnapshotRepairSuite)
mux.HandleFunc("/api/snapshots/regenerate-hourly-reports", h.SnapshotRegenerateHourlyReports)
mux.HandleFunc("/api/diagnostics/daily-creation", h.DailyCreationDiagnostics)
mux.HandleFunc("/vm/trace", h.VmTrace)
mux.HandleFunc("/vcenters", h.VcenterList)
mux.HandleFunc("/vcenters/totals", h.VcenterTotals)
mux.HandleFunc("/metrics", h.Metrics)
mux.HandleFunc("/snapshots/hourly", h.SnapshotHourlyList)
mux.HandleFunc("/snapshots/daily", h.SnapshotDailyList)
@@ -61,16 +86,16 @@ func New(logger *slog.Logger, database db.Database, buildTime string, sha1ver st
if err != nil {
logger.Error("failed to load swagger ui assets", "error", err)
} else {
mux.Handle("/swagger/", http.StripPrefix("/swagger/", http.FileServer(http.FS(swaggerSub))))
mux.Handle("/swagger/", middleware.CacheMiddleware(http.StripPrefix("/swagger/", http.FileServer(http.FS(swaggerSub)))))
}
mux.HandleFunc("/swagger", func(w http.ResponseWriter, r *http.Request) {
http.Redirect(w, r, "/swagger/", http.StatusPermanentRedirect)
})
mux.HandleFunc("/swagger.json", func(w http.ResponseWriter, r *http.Request) {
mux.Handle("/swagger.json", middleware.CacheMiddleware(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
w.Write(swaggerSpec)
})
})))
// Register pprof handlers
mux.HandleFunc("/debug/pprof/", pprof.Index)

View File

@@ -1,8 +0,0 @@
settings:
tenants_to_filter:
- "DecomVM"
node_charge_clusters:
- ".*CMD.*"
srm_activeactive_vms:
vcenter_addresses:
- "https://vc.lab.local/sdk"

View File

@@ -8,7 +8,6 @@ SUDOERS_FILE="/etc/sudoers.d/${USER}"
# create a group & user if not exists
getent group "$GROUP" >/dev/null || groupadd -r "$GROUP"; /bin/true
getent passwd "$USER" >/dev/null || useradd -r -g "$GROUP" -m -s /bin/bash -c "vctp service" "$USER"
getent passwd tftp >/dev/null || useradd -r -g tftp -s /sbin/nologin tftp
# create vctp config directory if it doesn't exist
[ -d /etc/dtms ] || mkdir -p /etc/dtms
@@ -21,6 +20,7 @@ getent passwd tftp >/dev/null || useradd -r -g tftp -s /sbin/nologin tftp
# create vctp data directory if it doesn't exist
[ -d /var/lib/vctp ] || mkdir -p /var/lib/vctp
[ -d /var/lib/vctp/reports ] || mkdir -p /var/lib/vctp/reports
# set user ownership on vctp data directory if not already done
[ "$(stat -c "%U" /var/lib/vctp)" = "$USER" ] || chown -R "$USER" /var/lib/vctp

View File

@@ -1 +1,3 @@
CPE_OPTS='-config /etc/dtms/vctp.yml -log-level info -log-output text'
CPE_OPTS='-settings /etc/dtms/vctp.yml'
MONTHLY_AGG_GO=0
DAILY_AGG_GO=0

View File

@@ -1,5 +1,5 @@
[Unit]
Description=vCTP monitors VMware VM inventory and event data to build chargeback reports
Description=vSphere Chargeback Tracking Platform
Documentation=https://gitlab.dell.com/
ConditionPathExists=/usr/bin/vctp-linux-amd64
After=network.target
@@ -8,6 +8,7 @@ After=network.target
Type=simple
EnvironmentFile=/etc/default/vctp
User=vctp
Group=dtms
ExecStart=/usr/bin/vctp-linux-amd64 $CPE_OPTS
ExecStartPost=/usr/bin/sleep 3
Restart=always

View File

@@ -1,25 +1,35 @@
settings:
data_location: "/var/lib/cbs"
kickstart_location: "/var/lib/cbs/ks"
database_filename: "/var/lib/cbs/cbs.db"
log_level: "info"
log_output: "text"
database_driver: "sqlite"
database_url: "/var/lib/vctp/db.sqlite3"
reports_dir: /var/lib/vctp/reports
bind_ip:
bind_port: 443
bind_port: 9443
bind_disable_tls: false
tls_cert_filename: "/etc/dtms/cbs.crt"
tls_key_filename: "/etc/dtms/cbs.key"
tftp_root_directory: "/var/lib/tftpboot"
tftp_images_subdirectory: "images"
replacements:
omapi:
key_name: "OMAPI"
key_secret:
special_files:
ldap_groups:
ldap_bind_address: ""
ldap_base_dn: ""
ldap_trust_cert_file: ""
ldap_disable_validation: false
ldap_insecure: false
auth_token_lifespan_hours: 2
auth_api_key: ""
tls_cert_filename: "/var/lib/vctp/vctp.crt"
tls_key_filename: "/var/lib/vctp/vctp.key"
vcenter_username: ""
vcenter_password: ""
vcenter_insecure: false
vcenter_event_polling_seconds: 60
vcenter_inventory_polling_seconds: 7200
vcenter_inventory_snapshot_seconds: 3600
vcenter_inventory_aggregate_seconds: 86400
hourly_snapshot_concurrency: 0
hourly_snapshot_max_age_days: 60
daily_snapshot_max_age_months: 12
snapshot_cleanup_cron: "30 2 * * *"
hourly_snapshot_retry_seconds: 300
hourly_snapshot_max_retries: 3
hourly_job_timeout_seconds: 1200
hourly_snapshot_timeout_seconds: 600
daily_job_timeout_seconds: 900
monthly_job_timeout_seconds: 1200
monthly_aggregation_granularity: "hourly"
monthly_aggregation_cron: "10 3 1 * *"
cleanup_job_timeout_seconds: 600
tenants_to_filter:
node_charge_clusters:
srm_activeactive_vms:
vcenter_addresses:

View File

@@ -1,7 +1,7 @@
name: "vctp"
arch: "amd64"
platform: "linux"
version: "v26.1.1"
version: "v26.1.2"
version_schema: semver
description: vCTP monitors VMware VM inventory and event data to build chargeback reports
maintainer: "@coadn"
@@ -36,5 +36,3 @@ scripts:
depends:
- systemd
- tftp-server
- dhcp-server