Compare commits
200 Commits
40fb860385
...
dev
| Author | SHA1 | Date | |
|---|---|---|---|
| 32ced35130 | |||
| ff783fb45a | |||
| 49484900ac | |||
| aa6abb8cb2 | |||
| 1f2783fc86 | |||
| b9eae50f69 | |||
| c566456ebd | |||
| ee01d8deac | |||
| 93b5769145 | |||
|
|
38480e52c0 | ||
|
|
6981bd9994 | ||
|
|
fe96172253 | ||
|
|
35b4a50cf6 | ||
| 73ec80bb6f | |||
| 0d509179aa | |||
| e6c7596239 | |||
| b39865325a | |||
| b4a3c0fb3a | |||
| 2caf2763f6 | |||
| 25564efa54 | |||
| 871d7c2024 | |||
| 3671860b7d | |||
| 3e2d95d3b9 | |||
| 8a3481b966 | |||
| 13adc159a2 | |||
| c8f04efd51 | |||
|
|
68ee2838e4 | ||
|
|
b0592a2539 | ||
|
|
baea0cc85c | ||
|
|
ceadf42048 | ||
|
|
374d4921e1 | ||
|
|
7dc8f598c3 | ||
|
|
148df38219 | ||
| 0a2c529111 | |||
| 3cdf368bc4 | |||
| 32d4a352dc | |||
| b77f8671da | |||
| 715b293894 | |||
| 2483091861 | |||
| 00805513c9 | |||
| fd9cc185ce | |||
| c7c7fd3dc9 | |||
| d683d23bfc | |||
| c8bb30c788 | |||
| 7ea02be91a | |||
| 0517ef88c3 | |||
| a9e522cc84 | |||
| e186644db7 | |||
| 22fa250a43 | |||
| 1874b2c621 | |||
| a12fe5cad0 | |||
| 1cd1046433 | |||
| 6af49471b2 | |||
| b4c52e296c | |||
| 7b7afbf1d5 | |||
| 0820cbb65e | |||
| f171c7f0eb | |||
| 7c76825813 | |||
| 9dc94bd405 | |||
| 6ee848edb5 | |||
| 63794be38d | |||
| 7273961cfc | |||
| d55916766b | |||
| ab01c0fc4d | |||
| 588a552e4c | |||
| 871904f63e | |||
| 268919219e | |||
| f0bacab729 | |||
| 75a5f31a2f | |||
| 1b91c73a18 | |||
| 2ea0f937c5 | |||
| e5e5be37a3 | |||
| 96567f6211 | |||
| 7971098caf | |||
| 645a20829f | |||
| debac1f684 | |||
| 8dee30ea97 | |||
| bba308ad28 | |||
| 3f985dcd4d | |||
| 0beafb5b00 | |||
| ea68331208 | |||
| 4d754ee263 | |||
| 11f7d36bfc | |||
| 50e9921955 | |||
| 457d9395f0 | |||
| 8b2c8ae85d | |||
| 434c7136e9 | |||
| 877b65f10b | |||
| 8df1d145f8 | |||
| 9be3a3d807 | |||
| 1fca81a7b3 | |||
| 56f021590d | |||
| 44ae2094f3 | |||
| 417c7c8127 | |||
| 7fac6e3920 | |||
| 98899e306f | |||
| cfc4efee0e | |||
| b9ab34db0a | |||
| 013ae4568e | |||
| 5c34a9eacd | |||
| 13af853c45 | |||
| 5130d37632 | |||
| b297b8293c | |||
| 7b600b2359 | |||
| aa4567d7c1 | |||
| ca8b39ba0e | |||
| 7400e08c54 | |||
| ffe0c01fd7 | |||
| 5cc89968d9 | |||
| 0f0bdf19c3 | |||
| 6d1bb09167 | |||
| 3ceba1a117 | |||
| a81613a8c2 | |||
| ea1eeb5c21 | |||
| afec4aacb0 | |||
| dc120f2bdc | |||
| c8ae94fb43 | |||
| b46369811b | |||
| fb40abfd48 | |||
| b07ed9ee09 | |||
| 2a9489619d | |||
| f86ec3d615 | |||
| 309db2f1a6 | |||
| 37d921f635 | |||
| c9375f3099 | |||
| 81271873f3 | |||
| 665750548f | |||
| ce1f28d9c3 | |||
| 1ecdb10cf7 | |||
| cc6601146a | |||
| 9cdde0b278 | |||
| f9b8e25c2f | |||
| f94339446d | |||
| 77c1928436 | |||
| f80dfe9027 | |||
| b9c1f65971 | |||
| 3bc7f922d3 | |||
| f28fed831a | |||
| 8f43603613 | |||
| 380707cf23 | |||
| 6a41528f41 | |||
| 5875550802 | |||
| 7665227ac6 | |||
| 9802419713 | |||
| ea63ffa178 | |||
| 6f5d21fa71 | |||
| 3d86092816 | |||
| 5afbe9bb30 | |||
| c4eedb55b7 | |||
| a91642b450 | |||
| fb47006809 | |||
| 3501967c9e | |||
| 5a00f4a8c7 | |||
| a7dc838c83 | |||
| d76bcf5ca5 | |||
| 78e1da3149 | |||
| a18cca1f0e | |||
| c691763430 | |||
| b371e28469 | |||
| 54ff68590c | |||
| f88b812fa9 | |||
| dcbbff830d | |||
| 44c4bb2d66 | |||
| 3b0206b1e9 | |||
| bc93fa4bad | |||
| 0c2aecd989 | |||
| e1703e401b | |||
| 8931cb4891 | |||
| 00d474b937 | |||
| f712c7254f | |||
| dd13fd6759 | |||
| 3b53455343 | |||
| 2354d85a37 | |||
| b8abc7e6fd | |||
| 8e399de31e | |||
| fd64990e8e | |||
| 3c5aa418df | |||
| 7cc16819f7 | |||
| c7c890f6bb | |||
| 47bc8acace | |||
| 2bae3e7541 | |||
| 08568e3600 | |||
| c122e775a3 | |||
| fb4a7a790d | |||
| c0e6eec89d | |||
| 6d86a93539 | |||
| a84c403a69 | |||
| e47718cd7f | |||
| 4efdf50433 | |||
| d2aac0c6d4 | |||
| afb85ff34a | |||
| 659347ad87 | |||
| cfa9c45e56 | |||
| 32e3bc6e66 | |||
| ab24b5f6b9 | |||
| 56cf2e8366 | |||
| 282459ccf8 | |||
| 57980a860a | |||
| 85bb431de1 | |||
| 19d5b2406e |
36
.drone.sh
36
.drone.sh
@@ -1,36 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# disable CGO for cross-compiling
|
||||
export CGO_ENABLED=0
|
||||
|
||||
commit=$(git rev-parse HEAD)
|
||||
#tag=$(git describe --tags --abbrev=0)
|
||||
buildtime=$(TZ=Australia/Sydney date +%Y-%m-%dT%T%z)
|
||||
git_version=$(git describe --tags --always --long --dirty)
|
||||
package_name=vctp
|
||||
|
||||
#platforms=("linux/amd64" "darwin/amd64")
|
||||
platforms=("linux/amd64")
|
||||
|
||||
echo Building $package_name with git version: $git_version
|
||||
for platform in "${platforms[@]}"
|
||||
do
|
||||
platform_split=(${platform//\// })
|
||||
GOOS=${platform_split[0]}
|
||||
GOARCH=${platform_split[1]}
|
||||
output_name=$package_name'-'$GOOS'-'$GOARCH
|
||||
if [ $GOOS = "windows" ]; then
|
||||
output_name+='.exe'
|
||||
fi
|
||||
|
||||
echo "build commences"
|
||||
env GOOS=$GOOS GOARCH=$GOARCH go build -trimpath -ldflags="-X main.sha1ver=$commit -X main.buildTime=$buildtime" -o build/$output_name $package
|
||||
if [ $? -ne 0 ]; then
|
||||
echo 'An error has occurred! Aborting the script execution...'
|
||||
exit 1
|
||||
fi
|
||||
echo "build complete at $buildtime : $output_name"
|
||||
sha256sum build/$output_name > build/${output_name}_checksum.txt
|
||||
done
|
||||
|
||||
ls -lah build
|
||||
41
.drone.yml
41
.drone.yml
@@ -4,7 +4,7 @@ name: default
|
||||
|
||||
steps:
|
||||
- name: restore-cache-with-filesystem
|
||||
image: meltwater/drone-cache
|
||||
image: cache.coadcorp.com/meltwater/drone-cache
|
||||
pull: true
|
||||
settings:
|
||||
backend: "filesystem"
|
||||
@@ -17,28 +17,50 @@ steps:
|
||||
mount:
|
||||
- pkg.mod
|
||||
- pkg.build
|
||||
- pkg.tools
|
||||
volumes:
|
||||
- name: cache
|
||||
path: /go
|
||||
|
||||
- name: build
|
||||
image: golang
|
||||
image: cache.coadcorp.com/library/golang
|
||||
environment:
|
||||
CGO_ENABLED: 0
|
||||
GOMODCACHE: '/drone/src/pkg.mod'
|
||||
GOCACHE: '/drone/src/pkg.build'
|
||||
GOBIN: '/drone/src/pkg.tools'
|
||||
volumes:
|
||||
- name: shared
|
||||
path: /shared
|
||||
commands:
|
||||
#- cp /shared/index.html ./www/
|
||||
#- go install github.com/sqlc-dev/sqlc/cmd/sqlc@latest
|
||||
#- sqlc generate
|
||||
- chmod +x .drone.sh
|
||||
- ./.drone.sh
|
||||
- export PATH=/drone/src/pkg.tools:$PATH
|
||||
- go install github.com/a-h/templ/cmd/templ@latest
|
||||
- go install github.com/sqlc-dev/sqlc/cmd/sqlc@latest
|
||||
- go install github.com/swaggo/swag/cmd/swag@latest
|
||||
# - go install github.com/goreleaser/nfpm/v2/cmd/nfpm@latest
|
||||
- sqlc generate
|
||||
- templ generate -path ./components
|
||||
- swag init --exclude "pkg.mod,pkg.build,pkg.tools" -o server/router/docs
|
||||
- chmod +x ./scripts/*.sh
|
||||
- ./scripts/update-swagger-ui.sh
|
||||
- ./scripts/drone.sh
|
||||
- cp ./build/vctp-linux-amd64 /shared/
|
||||
|
||||
- name: rpm
|
||||
image: ghcr.io/goreleaser/nfpm
|
||||
environment:
|
||||
TZ: UTC
|
||||
volumes:
|
||||
- name: shared
|
||||
path: /shared
|
||||
commands:
|
||||
- cp /shared/vctp-linux-amd64 ./build/vctp-linux-amd64
|
||||
#- find .
|
||||
- nfpm package --config vctp.yml --packager rpm --target ./build/
|
||||
- ls -lah ./build/
|
||||
|
||||
- name: dell-sftp-deploy
|
||||
image: hypervtechnics/drone-sftp
|
||||
image: cache.coadcorp.com/hypervtechnics/drone-sftp
|
||||
settings:
|
||||
host: deft.dell.com
|
||||
username:
|
||||
@@ -54,7 +76,7 @@ steps:
|
||||
verbose: true
|
||||
|
||||
- name: rebuild-cache-with-filesystem
|
||||
image: meltwater/drone-cache
|
||||
image: cache.coadcorp.com/meltwater/drone-cache
|
||||
pull: true
|
||||
#when:
|
||||
# event:
|
||||
@@ -69,6 +91,7 @@ steps:
|
||||
mount:
|
||||
- pkg.mod
|
||||
- pkg.build
|
||||
- pkg.tools
|
||||
volumes:
|
||||
- name: cache
|
||||
path: /go
|
||||
|
||||
92
.github/workflows/ci.yml
vendored
92
.github/workflows/ci.yml
vendored
@@ -1,92 +0,0 @@
|
||||
name: CI
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
paths-ignore:
|
||||
- '.github/**'
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
jobs:
|
||||
lint:
|
||||
name: Lint
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.22.x
|
||||
- run: go mod download
|
||||
- run: go install github.com/a-h/templ/cmd/templ@v0.2.771
|
||||
- run: make generate-templ
|
||||
- uses: sqlc-dev/setup-sqlc@v4
|
||||
with:
|
||||
sqlc-version: '1.27.0'
|
||||
- run: sqlc vet
|
||||
- run: sqlc generate
|
||||
- name: Lint
|
||||
uses: golangci/golangci-lint-action@v3
|
||||
with:
|
||||
version: v1.54
|
||||
skip-pkg-cache: true
|
||||
test:
|
||||
name: Test
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.22.x
|
||||
- run: go mod download
|
||||
- run: go install github.com/a-h/templ/cmd/templ@v0.2.771
|
||||
- run: make generate-templ
|
||||
- uses: sqlc-dev/setup-sqlc@v4
|
||||
with:
|
||||
sqlc-version: '1.27.0'
|
||||
- run: sqlc generate
|
||||
- name: Test
|
||||
run: go test -race ./...
|
||||
e2e:
|
||||
name: End-to-End
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.22.x
|
||||
- run: go mod download
|
||||
- run: go install github.com/a-h/templ/cmd/templ@v0.2.771
|
||||
- run: templ generate -path ./components
|
||||
- uses: sqlc-dev/setup-sqlc@v4
|
||||
with:
|
||||
sqlc-version: '1.27.0'
|
||||
- run: sqlc generate
|
||||
- run: go test ./... -tags=e2e
|
||||
docker-publish:
|
||||
name: Publish Docker
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
if: github.event_name == 'push' && github.ref == 'refs/heads/main'
|
||||
needs:
|
||||
- lint
|
||||
- test
|
||||
- e2e
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: docker/metadata-action@v5
|
||||
id: meta
|
||||
with:
|
||||
images: ghcr.io/piszmog/vctp
|
||||
- uses: docker/build-push-action@v5
|
||||
with:
|
||||
push: true
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
77
.github/workflows/release.yml
vendored
77
.github/workflows/release.yml
vendored
@@ -1,77 +0,0 @@
|
||||
name: Release
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
version:
|
||||
description: The version to release (e.g. v1.0.0)
|
||||
required: true
|
||||
type: string
|
||||
|
||||
jobs:
|
||||
release:
|
||||
name: Release
|
||||
permissions:
|
||||
contents: write
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.22.x
|
||||
- run: go mod download
|
||||
- run: go install github.com/a-h/templ/cmd/templ@v0.2.771
|
||||
- name: Generate Templ Files
|
||||
run: make generate-templ
|
||||
- name: Generate CSS
|
||||
run: |
|
||||
curl -sLO https://github.com/tailwindlabs/tailwindcss/releases/latest/download/tailwindcss-linux-x64
|
||||
chmod +x tailwindcss-linux-x64
|
||||
mv tailwindcss-linux-x64 tailwindcss
|
||||
./tailwindcss -i ./styles/input.css -o ./dist/assets/css/output@${{ github.event.inputs.version }}.css --minify
|
||||
- uses: sqlc-dev/setup-sqlc@v4
|
||||
with:
|
||||
sqlc-version: '1.27.0'
|
||||
- run: sqlc generate
|
||||
- name: Build Application
|
||||
run: go build -o ./app -ldflags="-s -w -X version.Value=${{ github.event.inputs.version }}"
|
||||
- name: Create Tag
|
||||
uses: piszmog/create-tag@v1
|
||||
with:
|
||||
version: ${{ github.event.inputs.version }}
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Release
|
||||
uses: softprops/action-gh-release@v2
|
||||
with:
|
||||
name: ${{ github.event.inputs.version }}
|
||||
tag_name: ${{ github.event.inputs.version }}
|
||||
generate_release_notes: true
|
||||
files: app
|
||||
publish:
|
||||
name: Publish Docker
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
needs:
|
||||
- release
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: docker/metadata-action@v5
|
||||
id: meta
|
||||
with:
|
||||
images: ghcr.io/piszmog/my-app
|
||||
tags: |
|
||||
type=raw,value=${{ github.event.inputs.version }}
|
||||
- uses: docker/build-push-action@v5
|
||||
with:
|
||||
push: true
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
build-args: |
|
||||
VERSION=$${{ github.event.inputs.version }}
|
||||
5
.gitignore
vendored
5
.gitignore
vendored
@@ -10,6 +10,9 @@
|
||||
*.dylib
|
||||
vctp
|
||||
build/
|
||||
reports/
|
||||
reports/*.xlsx
|
||||
settings.yaml
|
||||
|
||||
# Certificates
|
||||
*.pem
|
||||
@@ -42,7 +45,7 @@ appengine-generated/
|
||||
tmp/
|
||||
pb_data/
|
||||
|
||||
# General
|
||||
# Generalis
|
||||
.DS_Store
|
||||
.AppleDouble
|
||||
.LSOverride
|
||||
|
||||
325
README.md
325
README.md
@@ -1,110 +1,135 @@
|
||||
# Go + HTMX Template
|
||||
# Overview
|
||||
vCTP is a vSphere Chargeback Tracking Platform, designed for a specific customer, so some decisions may not be applicable for your use case.
|
||||
|
||||
This is built from the template https://github.com/Piszmog/go-htmx-template that comes with everything you need to build a Web Application using Go (templ) and HTMX.
|
||||
## Snapshots and Reports
|
||||
- Hourly snapshots capture inventory per vCenter (concurrency via `hourly_snapshot_concurrency`).
|
||||
- Daily summaries aggregate the hourly snapshots for the day; monthly summaries aggregate daily summaries for the month (or hourly snapshots if configured).
|
||||
- Snapshots are registered in `snapshot_registry` so regeneration via `/api/snapshots/aggregate` can locate the correct tables (fallback scanning is also supported).
|
||||
- Reports (XLSX with totals/charts) are generated automatically after hourly, daily, and monthly jobs and written to a reports directory.
|
||||
- Hourly totals in reports are interval-based: each row represents `[HH:00, HH+1:00)` and uses the first snapshot at or after the hour end (including cross-day snapshots) to prorate VM presence by creation/deletion overlap.
|
||||
- Monthly aggregation reports include a Daily Totals sheet with full-day interval labels (`YYYY-MM-DD to YYYY-MM-DD`) and prorated totals derived from daily summaries.
|
||||
- Prometheus metrics are exposed at `/metrics`:
|
||||
- Snapshots/aggregations: `vctp_hourly_snapshots_total`, `vctp_hourly_snapshots_failed_total`, `vctp_hourly_snapshot_last_unix`, `vctp_hourly_snapshot_last_rows`, `vctp_daily_aggregations_total`, `vctp_daily_aggregations_failed_total`, `vctp_daily_aggregation_duration_seconds`, `vctp_monthly_aggregations_total`, `vctp_monthly_aggregations_failed_total`, `vctp_monthly_aggregation_duration_seconds`, `vctp_reports_available`
|
||||
- vCenter health/perf: `vctp_vcenter_connect_failures_total{vcenter}`, `vctp_vcenter_snapshot_duration_seconds{vcenter}`, `vctp_vcenter_inventory_size{vcenter}`
|
||||
|
||||
The template comes with a basic structure of using a SQL DB (`sqlc`), E2E testing (playwright), and styling (tailwindcss).
|
||||
## Prorating and Aggregation Logic
|
||||
Daily aggregation runs per VM using sample counts for the day:
|
||||
- `SamplesPresent`: count of snapshot samples in which the VM appears.
|
||||
- `TotalSamples`: count of unique snapshot timestamps for the vCenter in the day.
|
||||
- `AvgIsPresent`: `SamplesPresent / TotalSamples` (0 when `TotalSamples` is 0).
|
||||
- `AvgVcpuCount`, `AvgRamGB`, `AvgProvisionedDisk` (daily): `sum(values_per_sample) / TotalSamples` to time‑weight config changes and prorate partial‑day VMs.
|
||||
- `PoolTinPct`, `PoolBronzePct`, `PoolSilverPct`, `PoolGoldPct` (daily): `(pool_hits / SamplesPresent) * 100`, so pool percentages reflect only the time the VM existed.
|
||||
- `CreationTime`: only set when vCenter provides it; otherwise it remains `0`.
|
||||
|
||||
## Getting Started
|
||||
Monthly aggregation builds on daily summaries (or the daily rollup cache):
|
||||
- For each VM, daily averages are converted to weighted sums: `daily_avg * daily_total_samples`.
|
||||
- Monthly averages are `sum(weighted_sums) / monthly_total_samples` (per vCenter).
|
||||
- Pool percentages are weighted the same way: `(daily_pool_pct / 100) * daily_total_samples`, summed, then divided by `monthly_total_samples` and multiplied by 100.
|
||||
|
||||
Clone https://github.com/Piszmog/go-htmx-template
|
||||
## RPM Layout (summary)
|
||||
The RPM installs the service and defaults under `/usr/bin`, config under `/etc/dtms`, and data under `/var/lib/vctp`:
|
||||
- Binary: `/usr/bin/vctp-linux-amd64`
|
||||
- Systemd unit: `/etc/systemd/system/vctp.service`
|
||||
- Defaults/env: `/etc/dtms/vctp.yml` (override with `-settings`), `/etc/default/vctp` (environment)
|
||||
- TLS cert/key: `/etc/dtms/vctp.crt` and `/etc/dtms/vctp.key` (generated if absent)
|
||||
- Data: SQLite DB and reports default to `/var/lib/vctp` (reports under `/var/lib/vctp/reports`)
|
||||
- Scripts: preinstall/postinstall handle directory creation and permissions.
|
||||
|
||||
Once cloned, run the `update_module.sh` script to change the module to your module name.
|
||||
# Settings File
|
||||
Configuration now lives in the YAML settings file. By default the service reads
|
||||
`/etc/dtms/vctp.yml`, or you can override it with the `-settings` flag.
|
||||
|
||||
```shell
|
||||
./update_module my-new-module
|
||||
vctp -settings /path/to/vctp.yml
|
||||
```
|
||||
|
||||
## Technologies
|
||||
If you just want to run a single inventory snapshot across all configured vCenters and
|
||||
exit (no scheduler/server), use:
|
||||
|
||||
A few different technologies are configured to help getting off the ground easier.
|
||||
|
||||
- [sqlc](https://sqlc.dev/) for database layer
|
||||
- Stubbed to use SQLite
|
||||
- This can be easily swapped with [sqlx](https://jmoiron.github.io/sqlx/)
|
||||
- The script `upgrade_sqlc.sh` is available to upgrade GitHub Workflow files to latest sqlc version
|
||||
- [Tailwind CSS](https://tailwindcss.com/) for styling
|
||||
- Output is generated with the [CLI](https://tailwindcss.com/docs/installation)
|
||||
- [templ](https://templ.guide/) for creating HTML
|
||||
- The script `upgrade_templ.sh` is available to make upgrading easier
|
||||
- [HTMX](https://htmx.org/) for HTML interaction
|
||||
- The script `upgrade_htmx.sh` is available to make upgrading easier
|
||||
- [goose](https://github.com/pressly/goose) for DB migrations
|
||||
|
||||
TODO: investigate https://github.com/DATA-DOG/go-sqlmock for testing
|
||||
|
||||
Technologies we're no longer using:
|
||||
- [golang migrate](https://github.com/golang-migrate/migrate) for DB migrations
|
||||
- [playwright-go](https://github.com/playwright-community/playwright-go) for E2E testing.
|
||||
|
||||
Everything else uses the standard library.
|
||||
|
||||
## Structure
|
||||
(Now out of date)
|
||||
|
||||
```text
|
||||
.
|
||||
├── Makefile
|
||||
├── components
|
||||
│ ├── core
|
||||
│ │ └── html.templ
|
||||
│ └── home
|
||||
│ └── home.templ
|
||||
├── db
|
||||
│ ├── db.go
|
||||
│ ├── local.go
|
||||
│ ├── migrations
|
||||
│ │ ├── 20240407203525_init.down.sql
|
||||
│ │ └── 20240407203525_init.up.sql
|
||||
│ └── queries
|
||||
│ └── query.sql
|
||||
├── db.sqlite3
|
||||
├── dist
|
||||
│ ├── assets
|
||||
│ │ └── js
|
||||
│ │ └── htmx@1.9.10.min.js
|
||||
│ └── dist.go
|
||||
├── e2e
|
||||
│ ├── e2e_test.go
|
||||
│ ├── home_test.go
|
||||
│ └── testdata
|
||||
│ └── seed.sql
|
||||
├── go.mod
|
||||
├── go.sum
|
||||
├── log
|
||||
│ └── log.go
|
||||
├── main.go
|
||||
├── server
|
||||
│ ├── handler
|
||||
│ │ ├── handler.go
|
||||
│ │ └── home.go
|
||||
│ ├── middleware
|
||||
│ │ ├── cache.go
|
||||
│ │ ├── logging.go
|
||||
│ │ └── middleware.go
|
||||
│ ├── router
|
||||
│ │ └── router.go
|
||||
│ └── server.go
|
||||
├── sqlc.yml
|
||||
├── styles
|
||||
│ └── input.css
|
||||
├── tailwind.config.js
|
||||
└── version
|
||||
└── version.go
|
||||
```shell
|
||||
vctp -settings /path/to/vctp.yml -run-inventory
|
||||
```
|
||||
|
||||
### Components
|
||||
## Database Configuration
|
||||
By default the app uses SQLite and creates/opens `db.sqlite3`. You can opt into PostgreSQL
|
||||
by updating the settings file:
|
||||
|
||||
This is where `templ` files live. Anything you want to render to the user goes here. Note, all
|
||||
`*.go` files will be ignored by `git` (configured in `.gitignore`).
|
||||
- `settings.database_driver`: `sqlite` (default) or `postgres`
|
||||
- `settings.database_url`: SQLite file path/DSN or PostgreSQL DSN
|
||||
|
||||
### DB
|
||||
Examples:
|
||||
```yaml
|
||||
settings:
|
||||
database_driver: sqlite
|
||||
database_url: ./db.sqlite3
|
||||
|
||||
This is the directory that `sqlc` generates to. Update `queries.sql` to build
|
||||
your database operations.
|
||||
settings:
|
||||
database_driver: postgres
|
||||
database_url: postgres://user:pass@localhost:5432/vctp?sslmode=disable
|
||||
```
|
||||
|
||||
Once `queries.sql` is updated, run `make generate-sql` to update the generated models
|
||||
PostgreSQL migrations live in `db/migrations_postgres`, while SQLite migrations remain in
|
||||
`db/migrations`.
|
||||
|
||||
#### DB Migrations
|
||||
## Snapshot Retention
|
||||
Hourly and daily snapshot table retention can be configured in the settings file:
|
||||
|
||||
- `settings.hourly_snapshot_max_age_days` (default: 60)
|
||||
- `settings.daily_snapshot_max_age_months` (default: 12)
|
||||
|
||||
## Settings Reference
|
||||
All configuration lives under the top-level `settings:` key in `vctp.yml`.
|
||||
|
||||
General:
|
||||
- `settings.log_level`: logging verbosity (e.g., `debug`, `info`, `warn`, `error`)
|
||||
- `settings.log_output`: log format, `text` or `json`
|
||||
|
||||
Database:
|
||||
- `settings.database_driver`: `sqlite` or `postgres`
|
||||
- `settings.database_url`: SQLite file path/DSN or PostgreSQL DSN
|
||||
|
||||
HTTP/TLS:
|
||||
- `settings.bind_ip`: IP address to bind the HTTP server
|
||||
- `settings.bind_port`: TCP port to bind the HTTP server
|
||||
- `settings.bind_disable_tls`: `true` to serve plain HTTP (no TLS)
|
||||
- `settings.tls_cert_filename`: PEM certificate path (TLS mode)
|
||||
- `settings.tls_key_filename`: PEM private key path (TLS mode)
|
||||
|
||||
vCenter:
|
||||
- `settings.vcenter_username`: vCenter username
|
||||
- `settings.vcenter_password`: vCenter password (encrypted at startup)
|
||||
- `settings.vcenter_insecure`: `true` to skip TLS verification
|
||||
- `settings.vcenter_event_polling_seconds`: event polling interval (0 disables)
|
||||
- `settings.vcenter_inventory_polling_seconds`: inventory polling interval (0 disables)
|
||||
- `settings.vcenter_inventory_snapshot_seconds`: hourly snapshot cadence (seconds)
|
||||
- `settings.vcenter_inventory_aggregate_seconds`: daily aggregation cadence (seconds)
|
||||
- `settings.vcenter_addresses`: list of vCenter SDK URLs to monitor
|
||||
|
||||
Snapshots:
|
||||
- `settings.hourly_snapshot_concurrency`: max concurrent vCenter snapshots (0 = unlimited)
|
||||
- `settings.hourly_snapshot_max_age_days`: retention for hourly tables
|
||||
- `settings.daily_snapshot_max_age_months`: retention for daily tables
|
||||
- `settings.snapshot_cleanup_cron`: cron expression for cleanup job
|
||||
- `settings.reports_dir`: directory to store generated XLSX reports (default: `/var/lib/vctp/reports`)
|
||||
- `settings.hourly_snapshot_retry_seconds`: interval for retrying failed hourly snapshots (default: 300 seconds)
|
||||
- `settings.hourly_snapshot_max_retries`: maximum retry attempts per vCenter snapshot (default: 3)
|
||||
|
||||
Filters/chargeback:
|
||||
- `settings.tenants_to_filter`: list of tenant name patterns to exclude
|
||||
- `settings.node_charge_clusters`: list of cluster name patterns for node chargeback
|
||||
- `settings.srm_activeactive_vms`: list of SRM Active/Active VM name patterns
|
||||
|
||||
# Developer setup
|
||||
|
||||
## Pre-requisite tools
|
||||
|
||||
```shell
|
||||
go install github.com/a-h/templ/cmd/templ@latest
|
||||
go install github.com/sqlc-dev/sqlc/cmd/sqlc@latest
|
||||
go install github.com/swaggo/swag/cmd/swag@latest
|
||||
```
|
||||
|
||||
## Database
|
||||
This project now uses [goose](https://github.com/pressly/goose) for DB migrations.
|
||||
|
||||
Install via `brew install goose` on a mac, or install via golang with command `go install github.com/pressly/goose/v3/cmd/goose@latest`
|
||||
@@ -114,116 +139,20 @@ Create a new up/down migration file with this command
|
||||
goose -dir db/migrations sqlite3 ./db.sqlite3 create init sql
|
||||
```
|
||||
|
||||
### Dist
|
||||
|
||||
This is where your assets live. Any Javascript, images, or styling needs to go in the
|
||||
`dist/assets` directory. The directory will be embedded into the application.
|
||||
|
||||
Note, the `dist/assets/css` will be ignored by `git` (configured in `.gitignore`) since the
|
||||
files that are written to this directory are done by the Tailwind CSS CLI. Custom styles should
|
||||
go in the `styles/input.css` file.
|
||||
|
||||
### E2E
|
||||
|
||||
To test the UI, the `e2e` directory contains the Go tests for performing End to end testing. To
|
||||
run the tests, run the command
|
||||
|
||||
```shell
|
||||
go test -v ./... -tags=e2e
|
||||
sqlc generate
|
||||
```
|
||||
|
||||
The end to end tests, will start up the app, on a random port, seeding the database using the
|
||||
`seed.sql` file. Once the tests are complete, the app will be stopped.
|
||||
## HTML templates
|
||||
Run `templ generate -path ./components` to generate code based on template files
|
||||
|
||||
The E2E tests use Playwright (Go) for better integration into the Go tooling.
|
||||
## Documentation
|
||||
Run `swag init --exclude "pkg.mod,pkg.build,pkg.tools" -o server/router/docs`
|
||||
|
||||
### Log
|
||||
|
||||
This contains helper function to create a `slog.Logger`. Log level and output type can be set
|
||||
with then environment variables `LOG_LEVEL` and `LOG_OUTPUT`. The logger will write to
|
||||
`stdout`.
|
||||
|
||||
### Server
|
||||
|
||||
This contains everything related to the HTTP server. It comes with a graceful shutdown handler
|
||||
that handles `SIGINT`.
|
||||
|
||||
#### Router
|
||||
|
||||
This package sets up the routing for the application, such as the `/assets/` path and `/` path.
|
||||
It uses the standard libraries mux for routing. You can easily swap out for other HTTP
|
||||
routers such as [gorilla/mux](https://github.com/gorilla/mux).
|
||||
|
||||
#### Middleware
|
||||
|
||||
This package contains any middleware to configured with routes.
|
||||
|
||||
#### Handler
|
||||
|
||||
This package contains the handler to handle the actual routes.
|
||||
|
||||
#### Styles
|
||||
|
||||
This contains the `input.css` that the Tailwind CSS CLI uses to generate your output CSS.
|
||||
Update `input.css` with any custom CSS you need and it will be included in the output CSS.
|
||||
|
||||
#### Version
|
||||
|
||||
This package allows you to set a version at build time. If not set, the version defaults to
|
||||
`dev`. To set the version run the following command,
|
||||
|
||||
```shell
|
||||
go build -o ./app -ldflags="-X version.Value=1.0.0"
|
||||
```
|
||||
|
||||
See the `Makefile` for building the application.
|
||||
|
||||
## Run
|
||||
|
||||
There are a couple builtin ways to run the application - using `air` or the `Makefile` helper
|
||||
commands.
|
||||
|
||||
### Prerequisites
|
||||
|
||||
- Install [templ](https://templ.guide/quick-start/installation) - `go install github.com/a-h/templ/cmd/templ@latest`
|
||||
- Install [sqlc](https://docs.sqlc.dev/en/stable/overview/install.html)
|
||||
- Install [tailwindcss CLI](https://tailwindcss.com/docs/installation)
|
||||
|
||||
#### tailwindcss
|
||||
```shell
|
||||
# Example for macOS arm64
|
||||
curl -sLO https://github.com/tailwindlabs/tailwindcss/releases/latest/download/tailwindcss-macos-arm64
|
||||
chmod +x tailwindcss-macos-arm64
|
||||
sudo mv tailwindcss-macos-arm64 /usr/local/bin/tailwindcss
|
||||
```
|
||||
|
||||
### Makefile
|
||||
|
||||
You can also run with the provided `Makefile`. There are commands to generate `templ` files and
|
||||
tailwind output css.
|
||||
|
||||
```shell
|
||||
# Generate and watch templ
|
||||
make generate-templ-watch
|
||||
|
||||
# Genrate and watch tailwindcss
|
||||
make generate-tailwind-watch
|
||||
|
||||
# Run application
|
||||
make run
|
||||
```
|
||||
|
||||
## Github Workflow
|
||||
|
||||
The repository comes with two Github workflows as well. One called `ci.yml` that lints and
|
||||
tests your code. The other called `release.yml` that creates a tag, GitHub Release, and
|
||||
attaches the Linux binary to the Release.
|
||||
|
||||
Note, the version of `github.com/a-h/templ/cmd/templ` matches the version in `go.mod`. If these
|
||||
do not match, the build will fail. When upgrading your `templ` version, make sure to update
|
||||
`ci.yml` and `release.yml`.
|
||||
|
||||
### GoReleaser
|
||||
|
||||
If you need to compile for more than Linux, see [GoReleaser](https://goreleaser.com/) for a
|
||||
better release process.
|
||||
## CI/CD (Drone)
|
||||
- `.drone.yml` defines a Docker pipeline:
|
||||
- Restore/build caches for Go modules/tools.
|
||||
- Build step installs generators (`templ`, `sqlc`, `swag`), regenerates code/docs, runs project scripts, and produces the `vctp-linux-amd64` binary.
|
||||
- RPM step packages via `nfpm` using `vctp.yml`, emits RPMs into `./build/`.
|
||||
- Optional SFTP deploy step uploads build artifacts (e.g., `vctp*`) to a remote host.
|
||||
- Cache rebuild step preserves Go caches across runs.
|
||||
|
||||
1
components/core/.gitignore
vendored
Normal file
1
components/core/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
*.go
|
||||
@@ -3,7 +3,7 @@ package core
|
||||
templ Footer() {
|
||||
<footer class="fixed p-1 bottom-0 bg-gray-100 w-full border-t">
|
||||
<div class="rounded-lg p-4 text-xs italic text-gray-700 text-center">
|
||||
© Go Fullstack
|
||||
© Nathan Coad (nathan.coad@dell.com)
|
||||
</div>
|
||||
</footer>
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by templ - DO NOT EDIT.
|
||||
|
||||
// templ: version: v0.2.778
|
||||
// templ: version: v0.3.977
|
||||
package core
|
||||
|
||||
//lint:file-ignore SA4006 This context is only used if a nested component is present.
|
||||
@@ -29,11 +29,11 @@ func Footer() templ.Component {
|
||||
templ_7745c5c3_Var1 = templ.NopComponent
|
||||
}
|
||||
ctx = templ.ClearChildren(ctx)
|
||||
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString("<footer class=\"fixed p-1 bottom-0 bg-gray-100 w-full border-t\"><div class=\"rounded-lg p-4 text-xs italic text-gray-700 text-center\">© Go Fullstack</div></footer>")
|
||||
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 1, "<footer class=\"fixed p-1 bottom-0 bg-gray-100 w-full border-t\"><div class=\"rounded-lg p-4 text-xs italic text-gray-700 text-center\">© Nathan Coad (nathan.coad@dell.com)</div></footer>")
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
return templ_7745c5c3_Err
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -6,9 +6,13 @@ templ Header() {
|
||||
<head>
|
||||
<meta charset="UTF-8"/>
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0"/>
|
||||
<meta name="description" content="Hello world"/>
|
||||
<title>Test Page</title>
|
||||
<meta name="description" content="vCTP API endpoint"/>
|
||||
<title>vCTP API</title>
|
||||
<link rel="icon" href="/favicon.ico"/>
|
||||
<link rel="icon" type="image/png" sizes="16x16" href="/favicon-16x16.png"/>
|
||||
<link rel="icon" type="image/png" sizes="32x32" href="/favicon-32x32.png"/>
|
||||
<script src="/assets/js/htmx@v2.0.2.min.js"></script>
|
||||
<link href={ "/assets/css/output@" + version.Value + ".css" } rel="stylesheet"/>
|
||||
<link href="/assets/css/web3.css" rel="stylesheet"/>
|
||||
</head>
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by templ - DO NOT EDIT.
|
||||
|
||||
// templ: version: v0.2.778
|
||||
// templ: version: v0.3.977
|
||||
package core
|
||||
|
||||
//lint:file-ignore SA4006 This context is only used if a nested component is present.
|
||||
@@ -31,24 +31,24 @@ func Header() templ.Component {
|
||||
templ_7745c5c3_Var1 = templ.NopComponent
|
||||
}
|
||||
ctx = templ.ClearChildren(ctx)
|
||||
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString("<head><meta charset=\"UTF-8\"><meta name=\"viewport\" content=\"width=device-width, initial-scale=1.0\"><meta name=\"description\" content=\"Hello world\"><title>Test Page</title><script src=\"/assets/js/htmx@v2.0.2.min.js\"></script><link href=\"")
|
||||
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 1, "<head><meta charset=\"UTF-8\"><meta name=\"viewport\" content=\"width=device-width, initial-scale=1.0\"><meta name=\"description\" content=\"vCTP API endpoint\"><title>vCTP API</title><link rel=\"icon\" href=\"/favicon.ico\"><link rel=\"icon\" type=\"image/png\" sizes=\"16x16\" href=\"/favicon-16x16.png\"><link rel=\"icon\" type=\"image/png\" sizes=\"32x32\" href=\"/favicon-32x32.png\"><script src=\"/assets/js/htmx@v2.0.2.min.js\"></script><link href=\"")
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
var templ_7745c5c3_Var2 string
|
||||
templ_7745c5c3_Var2, templ_7745c5c3_Err = templ.JoinStringErrs("/assets/css/output@" + version.Value + ".css")
|
||||
var templ_7745c5c3_Var2 templ.SafeURL
|
||||
templ_7745c5c3_Var2, templ_7745c5c3_Err = templ.JoinURLErrs("/assets/css/output@" + version.Value + ".css")
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ.Error{Err: templ_7745c5c3_Err, FileName: `core/header.templ`, Line: 12, Col: 61}
|
||||
return templ.Error{Err: templ_7745c5c3_Err, FileName: `core/header.templ`, Line: 15, Col: 61}
|
||||
}
|
||||
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var2))
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString("\" rel=\"stylesheet\"></head>")
|
||||
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 2, "\" rel=\"stylesheet\"><link href=\"/assets/css/web3.css\" rel=\"stylesheet\"></head>")
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
return templ_7745c5c3_Err
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
1
components/views/.gitignore
vendored
Normal file
1
components/views/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
*.go
|
||||
@@ -14,16 +14,73 @@ templ Index(info BuildInfo) {
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
@core.Header()
|
||||
<body class="flex flex-col min-h-screen">
|
||||
<main class="flex-grow">
|
||||
<div>
|
||||
<h1 class="text-5xl font-bold">Build Information</h1>
|
||||
<p class="mt-4"><strong>Build Time:</strong> {info.BuildTime}</p>
|
||||
<p class="mt-4"><strong>SHA1 Version:</strong> {info.SHA1Ver}</p>
|
||||
<p class="mt-4"><strong>Go Runtime Version:</strong> {info.GoVersion}</p>
|
||||
</div>
|
||||
</main>
|
||||
</body>
|
||||
@core.Footer()
|
||||
<body class="flex flex-col min-h-screen web2-bg">
|
||||
<main class="flex-grow web2-shell space-y-8">
|
||||
<section class="web2-header">
|
||||
<div class="flex flex-col gap-4 md:flex-row md:items-center md:justify-between">
|
||||
<div>
|
||||
<div class="web2-pill">vCTP Console</div>
|
||||
<h1 class="mt-3 text-4xl font-bold">Chargeback Intelligence Dashboard</h1>
|
||||
<p class="mt-2 text-sm text-slate-600">Point in time snapshots of consumption.</p>
|
||||
</div>
|
||||
<div class="web2-button-group">
|
||||
<a class="web2-button" href="/snapshots/hourly">Hourly Snapshots</a>
|
||||
<a class="web2-button" href="/snapshots/daily">Daily Snapshots</a>
|
||||
<a class="web2-button" href="/snapshots/monthly">Monthly Snapshots</a>
|
||||
<a class="web2-button" href="/vm/trace">VM Trace</a>
|
||||
<a class="web2-button" href="/vcenters">vCenters</a>
|
||||
<a class="web2-button" href="/swagger/">Swagger UI</a>
|
||||
</div>
|
||||
</div>
|
||||
</section>
|
||||
|
||||
<section class="grid gap-6 md:grid-cols-3">
|
||||
<div class="web2-card">
|
||||
<p class="text-xs uppercase tracking-[0.2em] text-slate-400">Build Time</p>
|
||||
<p class="mt-3 text-xl font-semibold">{info.BuildTime}</p>
|
||||
</div>
|
||||
<div class="web2-card">
|
||||
<p class="text-xs uppercase tracking-[0.2em] text-slate-400">SHA1 Version</p>
|
||||
<p class="mt-3 text-xl font-semibold">{info.SHA1Ver}</p>
|
||||
</div>
|
||||
<div class="web2-card">
|
||||
<p class="text-xs uppercase tracking-[0.2em] text-slate-400">Go Runtime</p>
|
||||
<p class="mt-3 text-xl font-semibold">{info.GoVersion}</p>
|
||||
</div>
|
||||
</section>
|
||||
|
||||
<section class="grid gap-6 lg:grid-cols-3">
|
||||
<div class="web2-card">
|
||||
<h2 class="text-lg font-semibold mb-2">Overview</h2>
|
||||
<p class="mt-2 text-sm text-slate-600">
|
||||
vCTP is a vSphere Chargeback Tracking Platform.
|
||||
</p>
|
||||
</div>
|
||||
<div class="web2-card">
|
||||
<h2 class="text-lg font-semibold mb-2">Snapshots and Reports</h2>
|
||||
<div class="mt-3 text-sm text-slate-600 web2-paragraphs">
|
||||
<p>Hourly snapshots capture inventory per vCenter (concurrency via <code class="web2-code">hourly_snapshot_concurrency</code>).</p>
|
||||
<p>Daily summaries aggregate the hourly snapshots for the day; monthly summaries aggregate daily summaries for the month (or hourly snapshots if configured).</p>
|
||||
<p>Snapshots are registered in <code class="web2-code">snapshot_registry</code> so regeneration via <code class="web2-code">/api/snapshots/aggregate</code> can locate the correct tables (fallback scanning is also supported).</p>
|
||||
<p>Reports (XLSX with totals/charts) are generated automatically after hourly, daily, and monthly jobs and written to a reports directory.</p>
|
||||
<p>Hourly totals are interval-based: each row represents <code class="web2-code">[HH:00, HH+1:00)</code> and uses the first snapshot at or after the hour end (including cross-day snapshots) to prorate VM presence.</p>
|
||||
<p>Monthly aggregation reports include a Daily Totals sheet with full-day interval labels (YYYY-MM-DD to YYYY-MM-DD) and prorated totals.</p>
|
||||
</div>
|
||||
</div>
|
||||
<div class="web2-card">
|
||||
<h2 class="text-lg font-semibold mb-2">Prorating and Aggregation</h2>
|
||||
<div class="mt-3 space-y-2 text-sm text-slate-600 web2-paragraphs">
|
||||
<p>SamplesPresent is the count of snapshots in which the VM appears; TotalSamples is the count of unique snapshot times for the vCenter.</p>
|
||||
<p>AvgIsPresent = SamplesPresent / TotalSamples (0 when TotalSamples is 0).</p>
|
||||
<p>Daily AvgVcpuCount/AvgRamGB/AvgProvisionedDisk = sum of per-sample values divided by TotalSamples (time-weighted).</p>
|
||||
<p>Daily pool percentages use pool hits divided by SamplesPresent, so they reflect only the time the VM existed.</p>
|
||||
<p>Monthly aggregation weights daily averages by daily total samples, then divides by monthly total samples.</p>
|
||||
<p>CreationTime is only set when vCenter provides it; otherwise it remains 0.</p>
|
||||
</div>
|
||||
</div>
|
||||
</section>
|
||||
</main>
|
||||
</body>
|
||||
@core.Footer()
|
||||
</html>
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by templ - DO NOT EDIT.
|
||||
|
||||
// templ: version: v0.2.778
|
||||
// templ: version: v0.3.977
|
||||
package views
|
||||
|
||||
//lint:file-ignore SA4006 This context is only used if a nested component is present.
|
||||
@@ -39,7 +39,7 @@ func Index(info BuildInfo) templ.Component {
|
||||
templ_7745c5c3_Var1 = templ.NopComponent
|
||||
}
|
||||
ctx = templ.ClearChildren(ctx)
|
||||
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString("<!doctype html><html lang=\"en\">")
|
||||
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 1, "<!doctype html><html lang=\"en\">")
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
@@ -47,46 +47,46 @@ func Index(info BuildInfo) templ.Component {
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString("<body class=\"flex flex-col min-h-screen\"><main class=\"flex-grow\"><div><h1 class=\"text-5xl font-bold\">Build Information</h1><p class=\"mt-4\"><strong>Build Time:</strong> ")
|
||||
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 2, "<body class=\"flex flex-col min-h-screen web2-bg\"><main class=\"flex-grow web2-shell space-y-8\"><section class=\"web2-header\"><div class=\"flex flex-col gap-4 md:flex-row md:items-center md:justify-between\"><div><div class=\"web2-pill\">vCTP Console</div><h1 class=\"mt-3 text-4xl font-bold\">Chargeback Intelligence Dashboard</h1><p class=\"mt-2 text-sm text-slate-600\">Point in time snapshots of consumption.</p></div><div class=\"web2-button-group\"><a class=\"web2-button\" href=\"/snapshots/hourly\">Hourly Snapshots</a> <a class=\"web2-button\" href=\"/snapshots/daily\">Daily Snapshots</a> <a class=\"web2-button\" href=\"/snapshots/monthly\">Monthly Snapshots</a> <a class=\"web2-button\" href=\"/vm/trace\">VM Trace</a> <a class=\"web2-button\" href=\"/vcenters\">vCenters</a> <a class=\"web2-button\" href=\"/swagger/\">Swagger UI</a></div></div></section><section class=\"grid gap-6 md:grid-cols-3\"><div class=\"web2-card\"><p class=\"text-xs uppercase tracking-[0.2em] text-slate-400\">Build Time</p><p class=\"mt-3 text-xl font-semibold\">")
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
var templ_7745c5c3_Var2 string
|
||||
templ_7745c5c3_Var2, templ_7745c5c3_Err = templ.JoinStringErrs(info.BuildTime)
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/index.templ`, Line: 21, Col: 80}
|
||||
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/index.templ`, Line: 40, Col: 59}
|
||||
}
|
||||
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var2))
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString("</p><p class=\"mt-4\"><strong>SHA1 Version:</strong> ")
|
||||
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 3, "</p></div><div class=\"web2-card\"><p class=\"text-xs uppercase tracking-[0.2em] text-slate-400\">SHA1 Version</p><p class=\"mt-3 text-xl font-semibold\">")
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
var templ_7745c5c3_Var3 string
|
||||
templ_7745c5c3_Var3, templ_7745c5c3_Err = templ.JoinStringErrs(info.SHA1Ver)
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/index.templ`, Line: 22, Col: 80}
|
||||
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/index.templ`, Line: 44, Col: 57}
|
||||
}
|
||||
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var3))
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString("</p><p class=\"mt-4\"><strong>Go Runtime Version:</strong> ")
|
||||
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 4, "</p></div><div class=\"web2-card\"><p class=\"text-xs uppercase tracking-[0.2em] text-slate-400\">Go Runtime</p><p class=\"mt-3 text-xl font-semibold\">")
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
var templ_7745c5c3_Var4 string
|
||||
templ_7745c5c3_Var4, templ_7745c5c3_Err = templ.JoinStringErrs(info.GoVersion)
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/index.templ`, Line: 23, Col: 88}
|
||||
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/index.templ`, Line: 48, Col: 59}
|
||||
}
|
||||
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var4))
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString("</p></div></main></body>")
|
||||
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 5, "</p></div></section><section class=\"grid gap-6 lg:grid-cols-3\"><div class=\"web2-card\"><h2 class=\"text-lg font-semibold mb-2\">Overview</h2><p class=\"mt-2 text-sm text-slate-600\">vCTP is a vSphere Chargeback Tracking Platform.</p></div><div class=\"web2-card\"><h2 class=\"text-lg font-semibold mb-2\">Snapshots and Reports</h2><div class=\"mt-3 text-sm text-slate-600 web2-paragraphs\"><p>Hourly snapshots capture inventory per vCenter (concurrency via <code class=\"web2-code\">hourly_snapshot_concurrency</code>).</p><p>Daily summaries aggregate the hourly snapshots for the day; monthly summaries aggregate daily summaries for the month (or hourly snapshots if configured).</p><p>Snapshots are registered in <code class=\"web2-code\">snapshot_registry</code> so regeneration via <code class=\"web2-code\">/api/snapshots/aggregate</code> can locate the correct tables (fallback scanning is also supported).</p><p>Reports (XLSX with totals/charts) are generated automatically after hourly, daily, and monthly jobs and written to a reports directory.</p><p>Hourly totals are interval-based: each row represents <code class=\"web2-code\">[HH:00, HH+1:00)</code> and uses the first snapshot at or after the hour end (including cross-day snapshots) to prorate VM presence.</p><p>Monthly aggregation reports include a Daily Totals sheet with full-day interval labels (YYYY-MM-DD to YYYY-MM-DD) and prorated totals.</p></div></div><div class=\"web2-card\"><h2 class=\"text-lg font-semibold mb-2\">Prorating and Aggregation</h2><div class=\"mt-3 space-y-2 text-sm text-slate-600 web2-paragraphs\"><p>SamplesPresent is the count of snapshots in which the VM appears; TotalSamples is the count of unique snapshot times for the vCenter.</p><p>AvgIsPresent = SamplesPresent / TotalSamples (0 when TotalSamples is 0).</p><p>Daily AvgVcpuCount/AvgRamGB/AvgProvisionedDisk = sum of per-sample values divided by TotalSamples (time-weighted).</p><p>Daily pool percentages use pool hits divided by SamplesPresent, so they reflect only the time the VM existed.</p><p>Monthly aggregation weights daily averages by daily total samples, then divides by monthly total samples.</p><p>CreationTime is only set when vCenter provides it; otherwise it remains 0.</p></div></div></section></main></body>")
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
@@ -94,11 +94,11 @@ func Index(info BuildInfo) templ.Component {
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString("</html>")
|
||||
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 6, "</html>")
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
return templ_7745c5c3_Err
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
285
components/views/snapshots.templ
Normal file
285
components/views/snapshots.templ
Normal file
@@ -0,0 +1,285 @@
|
||||
package views
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"vctp/components/core"
|
||||
)
|
||||
|
||||
type SnapshotEntry struct {
|
||||
Label string
|
||||
Link string
|
||||
Count int64
|
||||
Group string
|
||||
}
|
||||
|
||||
type VcenterLink struct {
|
||||
Name string
|
||||
Link string
|
||||
}
|
||||
|
||||
type VcenterTotalsEntry struct {
|
||||
Snapshot string
|
||||
RawTime int64
|
||||
VmCount int64
|
||||
VcpuTotal int64
|
||||
RamTotalGB int64
|
||||
}
|
||||
|
||||
type VcenterTotalsMeta struct {
|
||||
ViewType string
|
||||
TypeLabel string
|
||||
HourlyLink string
|
||||
DailyLink string
|
||||
MonthlyLink string
|
||||
HourlyClass string
|
||||
DailyClass string
|
||||
MonthlyClass string
|
||||
}
|
||||
|
||||
type VcenterChartData struct {
|
||||
PointsVm string
|
||||
PointsVcpu string
|
||||
PointsRam string
|
||||
Width int
|
||||
Height int
|
||||
GridX []float64
|
||||
GridY []float64
|
||||
YTicks []ChartTick
|
||||
XTicks []ChartTick
|
||||
}
|
||||
|
||||
type ChartTick struct {
|
||||
Pos float64
|
||||
Label string
|
||||
}
|
||||
|
||||
templ SnapshotHourlyList(entries []SnapshotEntry) {
|
||||
@SnapshotListPage("Hourly Inventory Snapshots", "inventory snapshots captured hourly", entries)
|
||||
}
|
||||
|
||||
templ SnapshotDailyList(entries []SnapshotEntry) {
|
||||
@SnapshotListPage("Daily Inventory Snapshots", "daily summaries of hourly inventory snapshots", entries)
|
||||
}
|
||||
|
||||
templ SnapshotMonthlyList(entries []SnapshotEntry) {
|
||||
@SnapshotListPage("Monthly Inventory Snapshots", "monthly summary aggregated from daily snapshots", entries)
|
||||
}
|
||||
|
||||
templ SnapshotListPage(title string, subtitle string, entries []SnapshotEntry) {
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
@core.Header()
|
||||
<body class="flex flex-col min-h-screen web2-bg">
|
||||
<main class="flex-grow web2-shell space-y-8">
|
||||
<section class="web2-header">
|
||||
<div class="flex flex-col gap-4 md:flex-row md:items-center md:justify-between">
|
||||
<div>
|
||||
<div class="web2-pill">Snapshot Library</div>
|
||||
<h1 class="mt-3 text-4xl font-bold">{title}</h1>
|
||||
<p class="mt-2 text-sm text-slate-600">{subtitle}</p>
|
||||
</div>
|
||||
<a class="web2-button" href="/">Back to Dashboard</a>
|
||||
</div>
|
||||
</section>
|
||||
|
||||
<section class="web2-card">
|
||||
<div class="flex items-center justify-between gap-3 mb-4 flex-wrap">
|
||||
<h2 class="text-lg font-semibold">Available Exports</h2>
|
||||
<span class="web2-badge">{len(entries)} files</span>
|
||||
</div>
|
||||
<div class="overflow-hidden border border-slate-200 rounded">
|
||||
<table class="web2-table">
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Snapshot</th>
|
||||
<th>Records</th>
|
||||
<th class="text-right">Download</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
for i, entry := range entries {
|
||||
if entry.Group != "" && (i == 0 || entries[i-1].Group != entry.Group) {
|
||||
<tr class="web2-group-row">
|
||||
<td colspan="3" class="font-semibold text-slate-700">{entry.Group}</td>
|
||||
</tr>
|
||||
}
|
||||
<tr>
|
||||
<td>
|
||||
<div class="flex flex-col">
|
||||
<span class="text-sm font-semibold text-slate-700">{entry.Label}</span>
|
||||
</div>
|
||||
</td>
|
||||
<td>
|
||||
<span class="web2-badge">{entry.Count} records</span>
|
||||
</td>
|
||||
<td class="text-right">
|
||||
<a class="web2-link" href={entry.Link}>Download XLSX</a>
|
||||
</td>
|
||||
</tr>
|
||||
}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
</section>
|
||||
</main>
|
||||
</body>
|
||||
@core.Footer()
|
||||
</html>
|
||||
}
|
||||
|
||||
templ VcenterList(links []VcenterLink) {
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
@core.Header()
|
||||
<body class="flex flex-col min-h-screen web2-bg">
|
||||
<main class="flex-grow web2-shell space-y-8">
|
||||
<section class="web2-header">
|
||||
<div class="flex flex-col gap-4 md:flex-row md:items-center md:justify-between">
|
||||
<div>
|
||||
<div class="web2-pill">vCenter Inventory</div>
|
||||
<h1 class="mt-3 text-4xl font-bold">Monitored vCenters</h1>
|
||||
<p class="mt-2 text-sm text-slate-600">Select a vCenter to view snapshot totals over time.</p>
|
||||
</div>
|
||||
<a class="web2-button" href="/">Back to Dashboard</a>
|
||||
</div>
|
||||
</section>
|
||||
|
||||
<section class="web2-card">
|
||||
<div class="flex items-center justify-between gap-3 mb-4 flex-wrap">
|
||||
<h2 class="text-lg font-semibold">vCenters</h2>
|
||||
<span class="web2-badge">{len(links)} total</span>
|
||||
</div>
|
||||
<div class="overflow-hidden border border-slate-200 rounded">
|
||||
<table class="web2-table">
|
||||
<thead>
|
||||
<tr>
|
||||
<th>vCenter</th>
|
||||
<th class="text-right">Totals</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
for _, link := range links {
|
||||
<tr>
|
||||
<td class="font-semibold text-slate-700">{link.Name}</td>
|
||||
<td class="text-right">
|
||||
<a class="web2-link" href={link.Link}>View Totals</a>
|
||||
</td>
|
||||
</tr>
|
||||
}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
</section>
|
||||
</main>
|
||||
</body>
|
||||
@core.Footer()
|
||||
</html>
|
||||
}
|
||||
|
||||
templ VcenterTotalsPage(vcenter string, entries []VcenterTotalsEntry, chart VcenterChartData, meta VcenterTotalsMeta) {
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
@core.Header()
|
||||
<body class="flex flex-col min-h-screen web2-bg">
|
||||
<main class="flex-grow web2-shell space-y-8 max-w-screen-2xl mx-auto" style="max-width: 1400px;">
|
||||
<section class="web2-header">
|
||||
<div class="flex flex-col gap-4 md:flex-row md:items-center md:justify-between">
|
||||
<div>
|
||||
<div class="web2-pill">vCenter Totals</div>
|
||||
<h1 class="mt-3 text-4xl font-bold">Totals for {vcenter}</h1>
|
||||
<p class="mt-2 text-sm text-slate-600">{meta.TypeLabel} snapshots of VM count, vCPU, and RAM over time.</p>
|
||||
</div>
|
||||
<div class="flex gap-3">
|
||||
<a class="web2-button secondary" href="/vcenters">All vCenters</a>
|
||||
<a class="web2-button" href="/">Dashboard</a>
|
||||
</div>
|
||||
</div>
|
||||
<div class="web3-button-group mt-8 mb-3">
|
||||
<a class={meta.HourlyClass} href={meta.HourlyLink}>Hourly</a>
|
||||
<a class={meta.DailyClass} href={meta.DailyLink}>Daily</a>
|
||||
<a class={meta.MonthlyClass} href={meta.MonthlyLink}>Monthly</a>
|
||||
</div>
|
||||
</section>
|
||||
|
||||
<section class="web2-card">
|
||||
<div class="flex items-center justify-between gap-3 mb-4 flex-wrap">
|
||||
<h2 class="text-lg font-semibold">{meta.TypeLabel} Snapshots</h2>
|
||||
<span class="web2-badge">{len(entries)} records</span>
|
||||
</div>
|
||||
if chart.PointsVm != "" {
|
||||
<div class="mb-6 overflow-auto">
|
||||
<svg width="100%" height={fmt.Sprintf("%d", chart.Height+80)} viewBox={"0 0 " + fmt.Sprintf("%d", chart.Width) + " " + fmt.Sprintf("%d", chart.Height+70)} role="img" aria-label="Totals over time">
|
||||
<defs>
|
||||
<linearGradient id="grid" x1="0" y1="0" x2="0" y2="1">
|
||||
<stop offset="0%" stop-color="#e2e8f0" stop-opacity="0.6"></stop>
|
||||
</linearGradient>
|
||||
</defs>
|
||||
<rect x="40" y="10" width={fmt.Sprintf("%d", chart.Width-60)} height={fmt.Sprintf("%d", chart.Height)} fill="white" stroke="#e2e8f0"></rect>
|
||||
<!-- grid lines -->
|
||||
<g stroke="#e2e8f0" stroke-width="1" stroke-dasharray="2,4">
|
||||
for _, y := range chart.GridY {
|
||||
<line x1="40" y1={fmt.Sprintf("%.1f", y)} x2={fmt.Sprintf("%d", chart.Width-20)} y2={fmt.Sprintf("%.1f", y)} />
|
||||
}
|
||||
for _, x := range chart.GridX {
|
||||
<line x1={fmt.Sprintf("%.1f", x)} y1="10" x2={fmt.Sprintf("%.1f", x)} y2={fmt.Sprintf("%d", chart.Height+10)} />
|
||||
}
|
||||
</g>
|
||||
<!-- axes -->
|
||||
<line x1="40" y1={fmt.Sprintf("%d", chart.Height+10)} x2={fmt.Sprintf("%d", chart.Width-20)} y2={fmt.Sprintf("%d", chart.Height+10)} stroke="#94a3b8" stroke-width="1.5"></line>
|
||||
<line x1="40" y1="10" x2="40" y2={fmt.Sprintf("%d", chart.Height+10)} stroke="#94a3b8" stroke-width="1.5"></line>
|
||||
<!-- data -->
|
||||
<polyline points={chart.PointsVm} fill="none" stroke="#2563eb" stroke-width="2.5"></polyline>
|
||||
<polyline points={chart.PointsVcpu} fill="none" stroke="#16a34a" stroke-width="2.5"></polyline>
|
||||
<polyline points={chart.PointsRam} fill="none" stroke="#ea580c" stroke-width="2.5"></polyline>
|
||||
<!-- tick labels -->
|
||||
<g font-size="10" fill="#475569" text-anchor="end">
|
||||
for _, tick := range chart.YTicks {
|
||||
<text x="36" y={fmt.Sprintf("%.1f", tick.Pos+3)}>{tick.Label}</text>
|
||||
}
|
||||
</g>
|
||||
<g font-size="10" fill="#475569" text-anchor="middle">
|
||||
for _, tick := range chart.XTicks {
|
||||
<text x={fmt.Sprintf("%.1f", tick.Pos)} y={fmt.Sprintf("%d", chart.Height+24)}>{tick.Label}</text>
|
||||
}
|
||||
</g>
|
||||
<!-- legend -->
|
||||
<g font-size="12" fill="#475569" transform={"translate(40 " + fmt.Sprintf("%d", chart.Height+54) + ")"}>
|
||||
<rect x="0" y="0" width="14" height="8" fill="#2563eb"></rect><text x="22" y="12">VMs</text>
|
||||
<rect x="90" y="0" width="14" height="8" fill="#16a34a"></rect><text x="112" y="12">vCPU</text>
|
||||
<rect x="180" y="0" width="14" height="8" fill="#ea580c"></rect><text x="202" y="12">RAM (GB)</text>
|
||||
</g>
|
||||
<!-- axis labels -->
|
||||
<text x="15" y="20" transform={"rotate(-90 15 20)"} font-size="12" fill="#475569">Totals</text>
|
||||
<text x={fmt.Sprintf("%d", chart.Width/2)} y={fmt.Sprintf("%d", chart.Height+70)} font-size="12" fill="#475569">Snapshot sequence (newest right)</text>
|
||||
</svg>
|
||||
</div>
|
||||
}
|
||||
|
||||
<div class="overflow-hidden border border-slate-200 rounded">
|
||||
<table class="web2-table">
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Snapshot Time</th>
|
||||
<th class="text-right">VMs</th>
|
||||
<th class="text-right">vCPUs</th>
|
||||
<th class="text-right">RAM (GB)</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
for _, entry := range entries {
|
||||
<tr>
|
||||
<td>{entry.Snapshot}</td>
|
||||
<td class="text-right">{entry.VmCount}</td>
|
||||
<td class="text-right">{entry.VcpuTotal}</td>
|
||||
<td class="text-right">{entry.RamTotalGB}</td>
|
||||
</tr>
|
||||
}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
</section>
|
||||
</main>
|
||||
</body>
|
||||
@core.Footer()
|
||||
</html>
|
||||
}
|
||||
1045
components/views/snapshots_templ.go
Normal file
1045
components/views/snapshots_templ.go
Normal file
File diff suppressed because it is too large
Load Diff
173
components/views/vm_trace.templ
Normal file
173
components/views/vm_trace.templ
Normal file
@@ -0,0 +1,173 @@
|
||||
package views
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"vctp/components/core"
|
||||
)
|
||||
|
||||
type VmTraceEntry struct {
|
||||
Snapshot string
|
||||
RawTime int64
|
||||
Name string
|
||||
VmId string
|
||||
VmUuid string
|
||||
Vcenter string
|
||||
ResourcePool string
|
||||
VcpuCount int64
|
||||
RamGB int64
|
||||
ProvisionedDisk float64
|
||||
CreationTime string
|
||||
DeletionTime string
|
||||
}
|
||||
|
||||
type VmTraceChart struct {
|
||||
PointsVcpu string
|
||||
PointsRam string
|
||||
PointsTin string
|
||||
PointsBronze string
|
||||
PointsSilver string
|
||||
PointsGold string
|
||||
Width int
|
||||
Height int
|
||||
GridX []float64
|
||||
GridY []float64
|
||||
XTicks []ChartTick
|
||||
YTicks []ChartTick
|
||||
}
|
||||
|
||||
templ VmTracePage(query string, display_query string, vm_id string, vm_uuid string, vm_name string, creationLabel string, deletionLabel string, creationApprox bool, entries []VmTraceEntry, chart VmTraceChart) {
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
@core.Header()
|
||||
<body class="flex flex-col min-h-screen web2-bg">
|
||||
<main class="flex-grow web2-shell space-y-8 max-w-screen-2xl mx-auto" style="max-width: 1400px;">
|
||||
<section class="web2-header">
|
||||
<div class="flex flex-col gap-4 md:flex-row md:items-center md:justify-between">
|
||||
<div>
|
||||
<div class="web2-pill">VM Trace</div>
|
||||
<h1 class="mt-3 text-4xl font-bold">Snapshot history{display_query}</h1>
|
||||
<p class="mt-2 text-sm text-slate-600">Timeline of vCPU, RAM, and resource pool changes across snapshots.</p>
|
||||
</div>
|
||||
<div class="flex gap-3 flex-wrap">
|
||||
<a class="web2-button" href="/">Dashboard</a>
|
||||
</div>
|
||||
</div>
|
||||
<form method="get" action="/vm/trace" class="mt-4 grid gap-3 md:grid-cols-3">
|
||||
<div class="flex flex-col gap-1">
|
||||
<label class="text-sm text-slate-600" for="vm_id">VM ID</label>
|
||||
<input class="web2-card border border-slate-200 px-3 py-2 rounded" type="text" id="vm_id" name="vm_id" value={vm_id} placeholder="vm-12345"/>
|
||||
</div>
|
||||
<div class="flex flex-col gap-1">
|
||||
<label class="text-sm text-slate-600" for="vm_uuid">VM UUID</label>
|
||||
<input class="web2-card border border-slate-200 px-3 py-2 rounded" type="text" id="vm_uuid" name="vm_uuid" value={vm_uuid} placeholder="uuid..."/>
|
||||
</div>
|
||||
<div class="flex flex-col gap-1">
|
||||
<label class="text-sm text-slate-600" for="name">Name</label>
|
||||
<input class="web2-card border border-slate-200 px-3 py-2 rounded" type="text" id="name" name="name" value={vm_name} placeholder="VM name"/>
|
||||
</div>
|
||||
<div class="md:col-span-3 flex gap-2">
|
||||
<button class="web3-button active" type="submit">Load VM Trace</button>
|
||||
<a class="web3-button" href="/vm/trace">Clear</a>
|
||||
</div>
|
||||
</form>
|
||||
</section>
|
||||
|
||||
<section class="web2-card">
|
||||
<div class="flex items-center justify-between gap-3 mb-4 flex-wrap">
|
||||
<h2 class="text-lg font-semibold">Snapshot Timeline</h2>
|
||||
<span class="web2-badge">{len(entries)} samples</span>
|
||||
</div>
|
||||
if chart.PointsVcpu != "" {
|
||||
<div class="mb-6 overflow-auto">
|
||||
<svg width="100%" height="360" viewBox={"0 0 " + fmt.Sprintf("%d", chart.Width) + " 320"} role="img" aria-label="VM timeline">
|
||||
<rect x="40" y="10" width={fmt.Sprintf("%d", chart.Width-60)} height={fmt.Sprintf("%d", chart.Height)} fill="white" stroke="#e2e8f0"></rect>
|
||||
<g stroke="#e2e8f0" stroke-width="1" stroke-dasharray="2,4">
|
||||
for _, y := range chart.GridY {
|
||||
<line x1="40" y1={fmt.Sprintf("%.1f", y)} x2={fmt.Sprintf("%d", chart.Width-20)} y2={fmt.Sprintf("%.1f", y)} />
|
||||
}
|
||||
for _, x := range chart.GridX {
|
||||
<line x1={fmt.Sprintf("%.1f", x)} y1="10" x2={fmt.Sprintf("%.1f", x)} y2={fmt.Sprintf("%d", chart.Height+10)} />
|
||||
}
|
||||
</g>
|
||||
<line x1="40" y1={fmt.Sprintf("%d", chart.Height+10)} x2={fmt.Sprintf("%d", chart.Width-20)} y2={fmt.Sprintf("%d", chart.Height+10)} stroke="#94a3b8" stroke-width="1.5"></line>
|
||||
<line x1="40" y1="10" x2="40" y2={fmt.Sprintf("%d", chart.Height+10)} stroke="#94a3b8" stroke-width="1.5"></line>
|
||||
<polyline points={chart.PointsVcpu} fill="none" stroke="#2563eb" stroke-width="2.5"></polyline>
|
||||
<polyline points={chart.PointsRam} fill="none" stroke="#16a34a" stroke-width="2.5"></polyline>
|
||||
<polyline points={chart.PointsTin} fill="none" stroke="#0ea5e9" stroke-width="1.5" stroke-dasharray="4,4"></polyline>
|
||||
<polyline points={chart.PointsBronze} fill="none" stroke="#a855f7" stroke-width="1.5" stroke-dasharray="4,4"></polyline>
|
||||
<polyline points={chart.PointsSilver} fill="none" stroke="#94a3b8" stroke-width="1.5" stroke-dasharray="4,4"></polyline>
|
||||
<polyline points={chart.PointsGold} fill="none" stroke="#f59e0b" stroke-width="1.5" stroke-dasharray="4,4"></polyline>
|
||||
<g font-size="10" fill="#475569" text-anchor="end">
|
||||
for _, tick := range chart.YTicks {
|
||||
<text x="36" y={fmt.Sprintf("%.1f", tick.Pos+3)}>{tick.Label}</text>
|
||||
}
|
||||
</g>
|
||||
<g font-size="10" fill="#475569" text-anchor="middle">
|
||||
for _, tick := range chart.XTicks {
|
||||
<text x={fmt.Sprintf("%.1f", tick.Pos)} y={fmt.Sprintf("%d", chart.Height+24)}>{tick.Label}</text>
|
||||
}
|
||||
</g>
|
||||
<g font-size="12" fill="#475569" transform={"translate(40 " + fmt.Sprintf("%d", chart.Height+50) + ")"}>
|
||||
<rect x="0" y="0" width="14" height="8" fill="#2563eb"></rect><text x="22" y="12">vCPU</text>
|
||||
<rect x="90" y="0" width="14" height="8" fill="#16a34a"></rect><text x="112" y="12">RAM (GB)</text>
|
||||
<rect x="200" y="0" width="14" height="8" fill="#0ea5e9"></rect><text x="222" y="12">Tin</text>
|
||||
<rect x="260" y="0" width="14" height="8" fill="#a855f7"></rect><text x="282" y="12">Bronze</text>
|
||||
<rect x="340" y="0" width="14" height="8" fill="#94a3b8"></rect><text x="362" y="12">Silver</text>
|
||||
<rect x="420" y="0" width="14" height="8" fill="#f59e0b"></rect><text x="442" y="12">Gold</text>
|
||||
</g>
|
||||
<text x="15" y="20" transform={"rotate(-90 15 20)"} font-size="12" fill="#475569">Resources / Pool</text>
|
||||
<text x={fmt.Sprintf("%d", chart.Width/2)} y={fmt.Sprintf("%d", chart.Height+70)} font-size="12" fill="#475569">Snapshots (oldest left, newest right)</text>
|
||||
</svg>
|
||||
</div>
|
||||
}
|
||||
<div class="grid gap-3 md:grid-cols-2 mb-4">
|
||||
<div class="web2-card">
|
||||
<p class="text-xs uppercase tracking-[0.15em] text-slate-500">Creation time</p>
|
||||
<p class="mt-2 text-base font-semibold text-slate-800">{creationLabel}</p>
|
||||
if creationApprox {
|
||||
<p class="text-xs text-slate-500 mt-1">Approximate (earliest snapshot)</p>
|
||||
}
|
||||
</div>
|
||||
<div class="web2-card">
|
||||
<p class="text-xs uppercase tracking-[0.15em] text-slate-500">Deletion time</p>
|
||||
<p class="mt-2 text-base font-semibold text-slate-800">{deletionLabel}</p>
|
||||
</div>
|
||||
</div>
|
||||
<div class="overflow-hidden border border-slate-200 rounded">
|
||||
<table class="web2-table">
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Snapshot</th>
|
||||
<th>VM Name</th>
|
||||
<th>VmId</th>
|
||||
<th>VmUuid</th>
|
||||
<th>Vcenter</th>
|
||||
<th>Resource Pool</th>
|
||||
<th class="text-right">vCPUs</th>
|
||||
<th class="text-right">RAM (GB)</th>
|
||||
<th class="text-right">Disk</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
for _, e := range entries {
|
||||
<tr>
|
||||
<td>{e.Snapshot}</td>
|
||||
<td>{e.Name}</td>
|
||||
<td>{e.VmId}</td>
|
||||
<td>{e.VmUuid}</td>
|
||||
<td>{e.Vcenter}</td>
|
||||
<td>{e.ResourcePool}</td>
|
||||
<td class="text-right">{e.VcpuCount}</td>
|
||||
<td class="text-right">{e.RamGB}</td>
|
||||
<td class="text-right">{fmt.Sprintf("%.1f", e.ProvisionedDisk)}</td>
|
||||
</tr>
|
||||
}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
</section>
|
||||
</main>
|
||||
</body>
|
||||
@core.Footer()
|
||||
</html>
|
||||
}
|
||||
729
components/views/vm_trace_templ.go
Normal file
729
components/views/vm_trace_templ.go
Normal file
@@ -0,0 +1,729 @@
|
||||
// Code generated by templ - DO NOT EDIT.
|
||||
|
||||
// templ: version: v0.3.977
|
||||
package views
|
||||
|
||||
//lint:file-ignore SA4006 This context is only used if a nested component is present.
|
||||
|
||||
import "github.com/a-h/templ"
|
||||
import templruntime "github.com/a-h/templ/runtime"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"vctp/components/core"
|
||||
)
|
||||
|
||||
type VmTraceEntry struct {
|
||||
Snapshot string
|
||||
RawTime int64
|
||||
Name string
|
||||
VmId string
|
||||
VmUuid string
|
||||
Vcenter string
|
||||
ResourcePool string
|
||||
VcpuCount int64
|
||||
RamGB int64
|
||||
ProvisionedDisk float64
|
||||
CreationTime string
|
||||
DeletionTime string
|
||||
}
|
||||
|
||||
type VmTraceChart struct {
|
||||
PointsVcpu string
|
||||
PointsRam string
|
||||
PointsTin string
|
||||
PointsBronze string
|
||||
PointsSilver string
|
||||
PointsGold string
|
||||
Width int
|
||||
Height int
|
||||
GridX []float64
|
||||
GridY []float64
|
||||
XTicks []ChartTick
|
||||
YTicks []ChartTick
|
||||
}
|
||||
|
||||
func VmTracePage(query string, display_query string, vm_id string, vm_uuid string, vm_name string, creationLabel string, deletionLabel string, creationApprox bool, entries []VmTraceEntry, chart VmTraceChart) templ.Component {
|
||||
return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) {
|
||||
templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context
|
||||
if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil {
|
||||
return templ_7745c5c3_CtxErr
|
||||
}
|
||||
templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W)
|
||||
if !templ_7745c5c3_IsBuffer {
|
||||
defer func() {
|
||||
templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer)
|
||||
if templ_7745c5c3_Err == nil {
|
||||
templ_7745c5c3_Err = templ_7745c5c3_BufErr
|
||||
}
|
||||
}()
|
||||
}
|
||||
ctx = templ.InitializeContext(ctx)
|
||||
templ_7745c5c3_Var1 := templ.GetChildren(ctx)
|
||||
if templ_7745c5c3_Var1 == nil {
|
||||
templ_7745c5c3_Var1 = templ.NopComponent
|
||||
}
|
||||
ctx = templ.ClearChildren(ctx)
|
||||
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 1, "<!doctype html><html lang=\"en\">")
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
templ_7745c5c3_Err = core.Header().Render(ctx, templ_7745c5c3_Buffer)
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 2, "<body class=\"flex flex-col min-h-screen web2-bg\"><main class=\"flex-grow web2-shell space-y-8 max-w-screen-2xl mx-auto\" style=\"max-width: 1400px;\"><section class=\"web2-header\"><div class=\"flex flex-col gap-4 md:flex-row md:items-center md:justify-between\"><div><div class=\"web2-pill\">VM Trace</div><h1 class=\"mt-3 text-4xl font-bold\">Snapshot history")
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
var templ_7745c5c3_Var2 string
|
||||
templ_7745c5c3_Var2, templ_7745c5c3_Err = templ.JoinStringErrs(display_query)
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 48, Col: 74}
|
||||
}
|
||||
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var2))
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 3, "</h1><p class=\"mt-2 text-sm text-slate-600\">Timeline of vCPU, RAM, and resource pool changes across snapshots.</p></div><div class=\"flex gap-3 flex-wrap\"><a class=\"web2-button\" href=\"/\">Dashboard</a></div></div><form method=\"get\" action=\"/vm/trace\" class=\"mt-4 grid gap-3 md:grid-cols-3\"><div class=\"flex flex-col gap-1\"><label class=\"text-sm text-slate-600\" for=\"vm_id\">VM ID</label> <input class=\"web2-card border border-slate-200 px-3 py-2 rounded\" type=\"text\" id=\"vm_id\" name=\"vm_id\" value=\"")
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
var templ_7745c5c3_Var3 string
|
||||
templ_7745c5c3_Var3, templ_7745c5c3_Err = templ.JoinStringErrs(vm_id)
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 58, Col: 123}
|
||||
}
|
||||
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var3))
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 4, "\" placeholder=\"vm-12345\"></div><div class=\"flex flex-col gap-1\"><label class=\"text-sm text-slate-600\" for=\"vm_uuid\">VM UUID</label> <input class=\"web2-card border border-slate-200 px-3 py-2 rounded\" type=\"text\" id=\"vm_uuid\" name=\"vm_uuid\" value=\"")
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
var templ_7745c5c3_Var4 string
|
||||
templ_7745c5c3_Var4, templ_7745c5c3_Err = templ.JoinStringErrs(vm_uuid)
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 62, Col: 129}
|
||||
}
|
||||
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var4))
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 5, "\" placeholder=\"uuid...\"></div><div class=\"flex flex-col gap-1\"><label class=\"text-sm text-slate-600\" for=\"name\">Name</label> <input class=\"web2-card border border-slate-200 px-3 py-2 rounded\" type=\"text\" id=\"name\" name=\"name\" value=\"")
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
var templ_7745c5c3_Var5 string
|
||||
templ_7745c5c3_Var5, templ_7745c5c3_Err = templ.JoinStringErrs(vm_name)
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 66, Col: 123}
|
||||
}
|
||||
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var5))
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 6, "\" placeholder=\"VM name\"></div><div class=\"md:col-span-3 flex gap-2\"><button class=\"web3-button active\" type=\"submit\">Load VM Trace</button> <a class=\"web3-button\" href=\"/vm/trace\">Clear</a></div></form></section><section class=\"web2-card\"><div class=\"flex items-center justify-between gap-3 mb-4 flex-wrap\"><h2 class=\"text-lg font-semibold\">Snapshot Timeline</h2><span class=\"web2-badge\">")
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
var templ_7745c5c3_Var6 string
|
||||
templ_7745c5c3_Var6, templ_7745c5c3_Err = templ.JoinStringErrs(len(entries))
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 78, Col: 44}
|
||||
}
|
||||
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var6))
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 7, " samples</span></div>")
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
if chart.PointsVcpu != "" {
|
||||
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 8, "<div class=\"mb-6 overflow-auto\"><svg width=\"100%\" height=\"360\" viewBox=\"")
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
var templ_7745c5c3_Var7 string
|
||||
templ_7745c5c3_Var7, templ_7745c5c3_Err = templ.JoinStringErrs("0 0 " + fmt.Sprintf("%d", chart.Width) + " 320")
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 82, Col: 95}
|
||||
}
|
||||
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var7))
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 9, "\" role=\"img\" aria-label=\"VM timeline\"><rect x=\"40\" y=\"10\" width=\"")
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
var templ_7745c5c3_Var8 string
|
||||
templ_7745c5c3_Var8, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", chart.Width-60))
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 83, Col: 68}
|
||||
}
|
||||
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var8))
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 10, "\" height=\"")
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
var templ_7745c5c3_Var9 string
|
||||
templ_7745c5c3_Var9, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", chart.Height))
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 83, Col: 109}
|
||||
}
|
||||
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var9))
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 11, "\" fill=\"white\" stroke=\"#e2e8f0\"></rect> <g stroke=\"#e2e8f0\" stroke-width=\"1\" stroke-dasharray=\"2,4\">")
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
for _, y := range chart.GridY {
|
||||
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 12, "<line x1=\"40\" y1=\"")
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
var templ_7745c5c3_Var10 string
|
||||
templ_7745c5c3_Var10, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%.1f", y))
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 86, Col: 50}
|
||||
}
|
||||
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var10))
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 13, "\" x2=\"")
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
var templ_7745c5c3_Var11 string
|
||||
templ_7745c5c3_Var11, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", chart.Width-20))
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 86, Col: 89}
|
||||
}
|
||||
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var11))
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 14, "\" y2=\"")
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
var templ_7745c5c3_Var12 string
|
||||
templ_7745c5c3_Var12, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%.1f", y))
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 86, Col: 117}
|
||||
}
|
||||
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var12))
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 15, "\"></line> ")
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
}
|
||||
for _, x := range chart.GridX {
|
||||
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 16, "<line x1=\"")
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
var templ_7745c5c3_Var13 string
|
||||
templ_7745c5c3_Var13, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%.1f", x))
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 89, Col: 42}
|
||||
}
|
||||
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var13))
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 17, "\" y1=\"10\" x2=\"")
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
var templ_7745c5c3_Var14 string
|
||||
templ_7745c5c3_Var14, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%.1f", x))
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 89, Col: 78}
|
||||
}
|
||||
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var14))
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 18, "\" y2=\"")
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
var templ_7745c5c3_Var15 string
|
||||
templ_7745c5c3_Var15, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", chart.Height+10))
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 89, Col: 118}
|
||||
}
|
||||
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var15))
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 19, "\"></line>")
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
}
|
||||
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 20, "</g> <line x1=\"40\" y1=\"")
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
var templ_7745c5c3_Var16 string
|
||||
templ_7745c5c3_Var16, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", chart.Height+10))
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 92, Col: 60}
|
||||
}
|
||||
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var16))
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 21, "\" x2=\"")
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
var templ_7745c5c3_Var17 string
|
||||
templ_7745c5c3_Var17, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", chart.Width-20))
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 92, Col: 99}
|
||||
}
|
||||
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var17))
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 22, "\" y2=\"")
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
var templ_7745c5c3_Var18 string
|
||||
templ_7745c5c3_Var18, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", chart.Height+10))
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 92, Col: 139}
|
||||
}
|
||||
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var18))
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 23, "\" stroke=\"#94a3b8\" stroke-width=\"1.5\"></line> <line x1=\"40\" y1=\"10\" x2=\"40\" y2=\"")
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
var templ_7745c5c3_Var19 string
|
||||
templ_7745c5c3_Var19, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", chart.Height+10))
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 93, Col: 76}
|
||||
}
|
||||
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var19))
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 24, "\" stroke=\"#94a3b8\" stroke-width=\"1.5\"></line> <polyline points=\"")
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
var templ_7745c5c3_Var20 string
|
||||
templ_7745c5c3_Var20, templ_7745c5c3_Err = templ.JoinStringErrs(chart.PointsVcpu)
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 94, Col: 42}
|
||||
}
|
||||
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var20))
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 25, "\" fill=\"none\" stroke=\"#2563eb\" stroke-width=\"2.5\"></polyline> <polyline points=\"")
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
var templ_7745c5c3_Var21 string
|
||||
templ_7745c5c3_Var21, templ_7745c5c3_Err = templ.JoinStringErrs(chart.PointsRam)
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 95, Col: 41}
|
||||
}
|
||||
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var21))
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 26, "\" fill=\"none\" stroke=\"#16a34a\" stroke-width=\"2.5\"></polyline> <polyline points=\"")
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
var templ_7745c5c3_Var22 string
|
||||
templ_7745c5c3_Var22, templ_7745c5c3_Err = templ.JoinStringErrs(chart.PointsTin)
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 96, Col: 41}
|
||||
}
|
||||
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var22))
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 27, "\" fill=\"none\" stroke=\"#0ea5e9\" stroke-width=\"1.5\" stroke-dasharray=\"4,4\"></polyline> <polyline points=\"")
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
var templ_7745c5c3_Var23 string
|
||||
templ_7745c5c3_Var23, templ_7745c5c3_Err = templ.JoinStringErrs(chart.PointsBronze)
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 97, Col: 44}
|
||||
}
|
||||
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var23))
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 28, "\" fill=\"none\" stroke=\"#a855f7\" stroke-width=\"1.5\" stroke-dasharray=\"4,4\"></polyline> <polyline points=\"")
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
var templ_7745c5c3_Var24 string
|
||||
templ_7745c5c3_Var24, templ_7745c5c3_Err = templ.JoinStringErrs(chart.PointsSilver)
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 98, Col: 44}
|
||||
}
|
||||
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var24))
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 29, "\" fill=\"none\" stroke=\"#94a3b8\" stroke-width=\"1.5\" stroke-dasharray=\"4,4\"></polyline> <polyline points=\"")
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
var templ_7745c5c3_Var25 string
|
||||
templ_7745c5c3_Var25, templ_7745c5c3_Err = templ.JoinStringErrs(chart.PointsGold)
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 99, Col: 42}
|
||||
}
|
||||
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var25))
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 30, "\" fill=\"none\" stroke=\"#f59e0b\" stroke-width=\"1.5\" stroke-dasharray=\"4,4\"></polyline> <g font-size=\"10\" fill=\"#475569\" text-anchor=\"end\">")
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
for _, tick := range chart.YTicks {
|
||||
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 31, "<text x=\"36\" y=\"")
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
var templ_7745c5c3_Var26 string
|
||||
templ_7745c5c3_Var26, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%.1f", tick.Pos+3))
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 102, Col: 57}
|
||||
}
|
||||
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var26))
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 32, "\">")
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
var templ_7745c5c3_Var27 string
|
||||
templ_7745c5c3_Var27, templ_7745c5c3_Err = templ.JoinStringErrs(tick.Label)
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 102, Col: 70}
|
||||
}
|
||||
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var27))
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 33, "</text>")
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
}
|
||||
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 34, "</g> <g font-size=\"10\" fill=\"#475569\" text-anchor=\"middle\">")
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
for _, tick := range chart.XTicks {
|
||||
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 35, "<text x=\"")
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
var templ_7745c5c3_Var28 string
|
||||
templ_7745c5c3_Var28, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%.1f", tick.Pos))
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 107, Col: 48}
|
||||
}
|
||||
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var28))
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 36, "\" y=\"")
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
var templ_7745c5c3_Var29 string
|
||||
templ_7745c5c3_Var29, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", chart.Height+24))
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 107, Col: 87}
|
||||
}
|
||||
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var29))
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 37, "\">")
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
var templ_7745c5c3_Var30 string
|
||||
templ_7745c5c3_Var30, templ_7745c5c3_Err = templ.JoinStringErrs(tick.Label)
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 107, Col: 100}
|
||||
}
|
||||
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var30))
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 38, "</text>")
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
}
|
||||
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 39, "</g> <g font-size=\"12\" fill=\"#475569\" transform=\"")
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
var templ_7745c5c3_Var31 string
|
||||
templ_7745c5c3_Var31, templ_7745c5c3_Err = templ.JoinStringErrs("translate(40 " + fmt.Sprintf("%d", chart.Height+50) + ")")
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 110, Col: 110}
|
||||
}
|
||||
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var31))
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 40, "\"><rect x=\"0\" y=\"0\" width=\"14\" height=\"8\" fill=\"#2563eb\"></rect><text x=\"22\" y=\"12\">vCPU</text> <rect x=\"90\" y=\"0\" width=\"14\" height=\"8\" fill=\"#16a34a\"></rect><text x=\"112\" y=\"12\">RAM (GB)</text> <rect x=\"200\" y=\"0\" width=\"14\" height=\"8\" fill=\"#0ea5e9\"></rect><text x=\"222\" y=\"12\">Tin</text> <rect x=\"260\" y=\"0\" width=\"14\" height=\"8\" fill=\"#a855f7\"></rect><text x=\"282\" y=\"12\">Bronze</text> <rect x=\"340\" y=\"0\" width=\"14\" height=\"8\" fill=\"#94a3b8\"></rect><text x=\"362\" y=\"12\">Silver</text> <rect x=\"420\" y=\"0\" width=\"14\" height=\"8\" fill=\"#f59e0b\"></rect><text x=\"442\" y=\"12\">Gold</text></g> <text x=\"15\" y=\"20\" transform=\"")
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
var templ_7745c5c3_Var32 string
|
||||
templ_7745c5c3_Var32, templ_7745c5c3_Err = templ.JoinStringErrs("rotate(-90 15 20)")
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 118, Col: 58}
|
||||
}
|
||||
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var32))
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 41, "\" font-size=\"12\" fill=\"#475569\">Resources / Pool</text> <text x=\"")
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
var templ_7745c5c3_Var33 string
|
||||
templ_7745c5c3_Var33, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", chart.Width/2))
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 119, Col: 49}
|
||||
}
|
||||
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var33))
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 42, "\" y=\"")
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
var templ_7745c5c3_Var34 string
|
||||
templ_7745c5c3_Var34, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%d", chart.Height+70))
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 119, Col: 88}
|
||||
}
|
||||
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var34))
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 43, "\" font-size=\"12\" fill=\"#475569\">Snapshots (oldest left, newest right)</text></svg></div>")
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
}
|
||||
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 44, "<div class=\"grid gap-3 md:grid-cols-2 mb-4\"><div class=\"web2-card\"><p class=\"text-xs uppercase tracking-[0.15em] text-slate-500\">Creation time</p><p class=\"mt-2 text-base font-semibold text-slate-800\">")
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
var templ_7745c5c3_Var35 string
|
||||
templ_7745c5c3_Var35, templ_7745c5c3_Err = templ.JoinStringErrs(creationLabel)
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 126, Col: 76}
|
||||
}
|
||||
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var35))
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 45, "</p>")
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
if creationApprox {
|
||||
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 46, "<p class=\"text-xs text-slate-500 mt-1\">Approximate (earliest snapshot)</p>")
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
}
|
||||
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 47, "</div><div class=\"web2-card\"><p class=\"text-xs uppercase tracking-[0.15em] text-slate-500\">Deletion time</p><p class=\"mt-2 text-base font-semibold text-slate-800\">")
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
var templ_7745c5c3_Var36 string
|
||||
templ_7745c5c3_Var36, templ_7745c5c3_Err = templ.JoinStringErrs(deletionLabel)
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 133, Col: 76}
|
||||
}
|
||||
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var36))
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 48, "</p></div></div><div class=\"overflow-hidden border border-slate-200 rounded\"><table class=\"web2-table\"><thead><tr><th>Snapshot</th><th>VM Name</th><th>VmId</th><th>VmUuid</th><th>Vcenter</th><th>Resource Pool</th><th class=\"text-right\">vCPUs</th><th class=\"text-right\">RAM (GB)</th><th class=\"text-right\">Disk</th></tr></thead> <tbody>")
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
for _, e := range entries {
|
||||
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 49, "<tr><td>")
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
var templ_7745c5c3_Var37 string
|
||||
templ_7745c5c3_Var37, templ_7745c5c3_Err = templ.JoinStringErrs(e.Snapshot)
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 154, Col: 25}
|
||||
}
|
||||
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var37))
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 50, "</td><td>")
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
var templ_7745c5c3_Var38 string
|
||||
templ_7745c5c3_Var38, templ_7745c5c3_Err = templ.JoinStringErrs(e.Name)
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 155, Col: 21}
|
||||
}
|
||||
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var38))
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 51, "</td><td>")
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
var templ_7745c5c3_Var39 string
|
||||
templ_7745c5c3_Var39, templ_7745c5c3_Err = templ.JoinStringErrs(e.VmId)
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 156, Col: 21}
|
||||
}
|
||||
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var39))
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 52, "</td><td>")
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
var templ_7745c5c3_Var40 string
|
||||
templ_7745c5c3_Var40, templ_7745c5c3_Err = templ.JoinStringErrs(e.VmUuid)
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 157, Col: 23}
|
||||
}
|
||||
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var40))
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 53, "</td><td>")
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
var templ_7745c5c3_Var41 string
|
||||
templ_7745c5c3_Var41, templ_7745c5c3_Err = templ.JoinStringErrs(e.Vcenter)
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 158, Col: 24}
|
||||
}
|
||||
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var41))
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 54, "</td><td>")
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
var templ_7745c5c3_Var42 string
|
||||
templ_7745c5c3_Var42, templ_7745c5c3_Err = templ.JoinStringErrs(e.ResourcePool)
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 159, Col: 29}
|
||||
}
|
||||
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var42))
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 55, "</td><td class=\"text-right\">")
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
var templ_7745c5c3_Var43 string
|
||||
templ_7745c5c3_Var43, templ_7745c5c3_Err = templ.JoinStringErrs(e.VcpuCount)
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 160, Col: 45}
|
||||
}
|
||||
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var43))
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 56, "</td><td class=\"text-right\">")
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
var templ_7745c5c3_Var44 string
|
||||
templ_7745c5c3_Var44, templ_7745c5c3_Err = templ.JoinStringErrs(e.RamGB)
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 161, Col: 41}
|
||||
}
|
||||
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var44))
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 57, "</td><td class=\"text-right\">")
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
var templ_7745c5c3_Var45 string
|
||||
templ_7745c5c3_Var45, templ_7745c5c3_Err = templ.JoinStringErrs(fmt.Sprintf("%.1f", e.ProvisionedDisk))
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/vm_trace.templ`, Line: 162, Col: 72}
|
||||
}
|
||||
_, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var45))
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 58, "</td></tr>")
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
}
|
||||
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 59, "</tbody></table></div></section></main></body>")
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
templ_7745c5c3_Err = core.Footer().Render(ctx, templ_7745c5c3_Buffer)
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 60, "</html>")
|
||||
if templ_7745c5c3_Err != nil {
|
||||
return templ_7745c5c3_Err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
var _ = templruntime.GeneratedTemplate
|
||||
114
db/db.go
114
db/db.go
@@ -3,46 +3,79 @@ package db
|
||||
import (
|
||||
"database/sql"
|
||||
"embed"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"reflect"
|
||||
"vctp/db/queries"
|
||||
"strings"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
//go:embed migrations/*.sql
|
||||
//go:embed migrations migrations_postgres
|
||||
var migrations embed.FS
|
||||
|
||||
type Database interface {
|
||||
DB() *sqlx.DB
|
||||
Queries() *queries.Queries
|
||||
Queries() Querier
|
||||
Logger() *slog.Logger
|
||||
Close() error
|
||||
}
|
||||
|
||||
func New(logger *slog.Logger, url string) (Database, error) {
|
||||
db, err := newLocalDB(logger, url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = db.db.Ping(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return db, nil
|
||||
type Config struct {
|
||||
Driver string
|
||||
DSN string
|
||||
}
|
||||
|
||||
// Migrate runs the migrations on the database. Assumes the database is SQLite.
|
||||
func Migrate(db Database) error {
|
||||
func New(logger *slog.Logger, cfg Config) (Database, error) {
|
||||
driver := normalizeDriver(cfg.Driver)
|
||||
switch driver {
|
||||
case "sqlite":
|
||||
db, err := newLocalDB(logger, cfg.DSN)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = db.db.Ping(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return db, nil
|
||||
case "postgres":
|
||||
db, err := newPostgresDB(logger, cfg.DSN)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = db.db.Ping(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return db, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported database driver: %s", cfg.Driver)
|
||||
}
|
||||
}
|
||||
|
||||
// Migrate runs the migrations on the database.
|
||||
func Migrate(db Database, driver string) error {
|
||||
driver = normalizeDriver(driver)
|
||||
|
||||
goose.SetBaseFS(migrations)
|
||||
|
||||
if err := goose.SetDialect("sqlite3"); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
if err := goose.Up(db.DB().DB, "migrations"); err != nil {
|
||||
panic(err)
|
||||
switch driver {
|
||||
case "sqlite":
|
||||
if err := goose.SetDialect("sqlite3"); err != nil {
|
||||
return fmt.Errorf("failed to set sqlite dialect: %w", err)
|
||||
}
|
||||
if err := goose.Up(db.DB().DB, "migrations"); err != nil {
|
||||
return fmt.Errorf("failed to run sqlite migrations: %w", err)
|
||||
}
|
||||
case "postgres":
|
||||
if err := goose.SetDialect("postgres"); err != nil {
|
||||
return fmt.Errorf("failed to set postgres dialect: %w", err)
|
||||
}
|
||||
if err := goose.Up(db.DB().DB, "migrations_postgres"); err != nil {
|
||||
return fmt.Errorf("failed to run postgres migrations: %w", err)
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("unsupported database driver: %s", driver)
|
||||
}
|
||||
|
||||
// TODO - replace with goose
|
||||
@@ -69,6 +102,18 @@ func Migrate(db Database) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func normalizeDriver(driver string) string {
|
||||
normalized := strings.ToLower(strings.TrimSpace(driver))
|
||||
switch normalized {
|
||||
case "", "sqlite3":
|
||||
return "sqlite"
|
||||
case "postgresql":
|
||||
return "postgres"
|
||||
default:
|
||||
return normalized
|
||||
}
|
||||
}
|
||||
|
||||
// ConvertToSQLParams is a utility function that generically converts a struct to a corresponding sqlc-generated struct
|
||||
func ConvertToSQLParams(input interface{}, output interface{}) {
|
||||
inputVal := reflect.ValueOf(input).Elem()
|
||||
@@ -82,19 +127,48 @@ func ConvertToSQLParams(input interface{}, output interface{}) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Handle fields of type sql.NullString, sql.NullInt64, and normal string/int64 fields
|
||||
switch outputField.Type() {
|
||||
case reflect.TypeOf(sql.NullString{}):
|
||||
// Handle sql.NullString
|
||||
if inputField.Kind() == reflect.Ptr && inputField.IsNil() {
|
||||
outputField.Set(reflect.ValueOf(sql.NullString{Valid: false}))
|
||||
} else {
|
||||
outputField.Set(reflect.ValueOf(sql.NullString{String: inputField.String(), Valid: true}))
|
||||
}
|
||||
|
||||
case reflect.TypeOf(sql.NullInt64{}):
|
||||
// Handle sql.NullInt64
|
||||
if inputField.Int() == 0 {
|
||||
outputField.Set(reflect.ValueOf(sql.NullInt64{Valid: false}))
|
||||
} else {
|
||||
outputField.Set(reflect.ValueOf(sql.NullInt64{Int64: inputField.Int(), Valid: true}))
|
||||
}
|
||||
|
||||
case reflect.TypeOf(sql.NullFloat64{}):
|
||||
// Handle sql.NullFloat64
|
||||
if inputField.Float() == 0 {
|
||||
outputField.Set(reflect.ValueOf(sql.NullFloat64{Valid: false}))
|
||||
} else {
|
||||
outputField.Set(reflect.ValueOf(sql.NullFloat64{Float64: inputField.Float(), Valid: true}))
|
||||
}
|
||||
|
||||
case reflect.TypeOf(""):
|
||||
// Handle normal string fields
|
||||
if inputField.Kind() == reflect.Ptr && inputField.IsNil() {
|
||||
outputField.SetString("") // Set to empty string if input is nil
|
||||
} else {
|
||||
outputField.SetString(inputField.String())
|
||||
}
|
||||
|
||||
case reflect.TypeOf(int64(0)):
|
||||
// Handle normal int64 fields
|
||||
outputField.SetInt(inputField.Int())
|
||||
|
||||
case reflect.TypeOf(float64(0)):
|
||||
// Handle normal float64 fields
|
||||
outputField.SetFloat(inputField.Float())
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
2140
db/helpers.go
Normal file
2140
db/helpers.go
Normal file
File diff suppressed because it is too large
Load Diff
28
db/local.go
28
db/local.go
@@ -2,11 +2,10 @@ package db
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"strings"
|
||||
"vctp/db/queries"
|
||||
|
||||
//_ "github.com/tursodatabase/libsql-client-go/libsql"
|
||||
"github.com/jmoiron/sqlx"
|
||||
_ "modernc.org/sqlite"
|
||||
)
|
||||
@@ -28,7 +27,7 @@ func (d *LocalDB) DB() *sqlx.DB {
|
||||
return d.db
|
||||
}
|
||||
|
||||
func (d *LocalDB) Queries() *queries.Queries {
|
||||
func (d *LocalDB) Queries() Querier {
|
||||
return d.queries
|
||||
}
|
||||
|
||||
@@ -37,12 +36,12 @@ func (d *LocalDB) Logger() *slog.Logger {
|
||||
}
|
||||
|
||||
func (d *LocalDB) Close() error {
|
||||
fmt.Println("Shutting database")
|
||||
d.logger.Debug("test")
|
||||
//fmt.Println("Shutting database")
|
||||
d.logger.Debug("Shutting database")
|
||||
return d.db.Close()
|
||||
}
|
||||
|
||||
func newLocalDB(logger *slog.Logger, path string) (*LocalDB, error) {
|
||||
func newLocalDB(logger *slog.Logger, dsn string) (*LocalDB, error) {
|
||||
|
||||
// TODO - work out if https://kerkour.com/sqlite-for-servers is possible without using sqlx
|
||||
/*
|
||||
@@ -62,8 +61,9 @@ func newLocalDB(logger *slog.Logger, path string) (*LocalDB, error) {
|
||||
readDB.SetMaxOpenConns(max(4, runtime.NumCPU()))
|
||||
*/
|
||||
|
||||
//db, err := sql.Open("libsql", "file:"+path)
|
||||
db, err := sqlx.Open("sqlite", "file:"+path)
|
||||
normalizedDSN := normalizeSqliteDSN(dsn)
|
||||
//db, err := sql.Open("libsql", normalizedDSN)
|
||||
db, err := sqlx.Open("sqlite", normalizedDSN)
|
||||
if err != nil {
|
||||
logger.Error("can't open database connection", "error", err)
|
||||
return nil, err
|
||||
@@ -92,3 +92,15 @@ func newLocalDB(logger *slog.Logger, path string) (*LocalDB, error) {
|
||||
|
||||
return &LocalDB{logger: logger, db: db, queries: queries.New(db)}, nil
|
||||
}
|
||||
|
||||
func normalizeSqliteDSN(dsn string) string {
|
||||
trimmed := strings.TrimSpace(dsn)
|
||||
if trimmed == "" {
|
||||
return "file:db.sqlite3"
|
||||
}
|
||||
lower := strings.ToLower(trimmed)
|
||||
if strings.HasPrefix(lower, "file:") || strings.HasPrefix(lower, "file::memory:") || trimmed == ":memory:" {
|
||||
return trimmed
|
||||
}
|
||||
return "file:" + trimmed
|
||||
}
|
||||
|
||||
@@ -10,7 +10,6 @@ CREATE TABLE IF NOT EXISTS "Inventory" (
|
||||
"CreationTime" INTEGER,
|
||||
"DeletionTime" INTEGER,
|
||||
"ResourcePool" TEXT,
|
||||
"VmType" TEXT,
|
||||
"Datacenter" TEXT,
|
||||
"Cluster" TEXT,
|
||||
"Folder" TEXT,
|
||||
|
||||
@@ -10,8 +10,8 @@ ALTER TABLE "Events" ADD COLUMN VmName TEXT;
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
ALTER TABLE "Events" DROP COLUMN VmName;
|
||||
ALTER TABLE "Updates" DROP COLUMN ComputeResourceId;
|
||||
ALTER TABLE "Updates" DROP COLUMN DatacenterId;
|
||||
ALTER TABLE "Events" DROP COLUMN ComputeResourceId;
|
||||
ALTER TABLE "Events" DROP COLUMN DatacenterId;
|
||||
ALTER TABLE "Events" RENAME COLUMN ComputeResourceName to ComputeResource;
|
||||
ALTER TABLE "Events" RENAME COLUMN DatacenterName to Datacenter;
|
||||
-- +goose StatementEnd
|
||||
|
||||
9
db/migrations/20240915015710_events_type.sql
Normal file
9
db/migrations/20240915015710_events_type.sql
Normal file
@@ -0,0 +1,9 @@
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
ALTER TABLE "Events" ADD COLUMN EventType TEXT;
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
ALTER TABLE "Updates" DROP COLUMN EventType;
|
||||
-- +goose StatementEnd
|
||||
11
db/migrations/20240915232747_extend_inventory.sql
Normal file
11
db/migrations/20240915232747_extend_inventory.sql
Normal file
@@ -0,0 +1,11 @@
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
ALTER TABLE "Inventory" ADD COLUMN IsTemplate INTEGER;
|
||||
ALTER TABLE "Inventory" ADD COLUMN PowerState INTEGER;
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
ALTER TABLE "Inventory" DROP COLUMN PowerState;
|
||||
ALTER TABLE "Inventory" DROP COLUMN IsTemplate;
|
||||
-- +goose StatementEnd
|
||||
9
db/migrations/20240916041639_rename_eventid.sql
Normal file
9
db/migrations/20240916041639_rename_eventid.sql
Normal file
@@ -0,0 +1,9 @@
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
ALTER TABLE "Inventory" RENAME COLUMN EventId to CloudId;
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
ALTER TABLE "Inventory" RENAME COLUMN CloudId to EventId;
|
||||
-- +goose StatementEnd
|
||||
9
db/migrations/20240916045259_updates_disk.sql
Normal file
9
db/migrations/20240916045259_updates_disk.sql
Normal file
@@ -0,0 +1,9 @@
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
ALTER TABLE "Updates" ADD COLUMN NewProvisionedDisk REAL;
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
ALTER TABLE "Updates" DROP COLUMN NewProvisionedDisk;
|
||||
-- +goose StatementEnd
|
||||
9
db/migrations/20240926022714_updates_user.sql
Normal file
9
db/migrations/20240926022714_updates_user.sql
Normal file
@@ -0,0 +1,9 @@
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
ALTER TABLE "Updates" ADD COLUMN UserName TEXT;
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
ALTER TABLE "Updates" DROP COLUMN UserName;
|
||||
-- +goose StatementEnd
|
||||
56
db/migrations/20240927002029_change_inventory.sql
Normal file
56
db/migrations/20240927002029_change_inventory.sql
Normal file
@@ -0,0 +1,56 @@
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
ALTER TABLE "Inventory" RENAME COLUMN IsTemplate TO IsTemplate_old;
|
||||
ALTER TABLE "Inventory" RENAME COLUMN PowerState TO PowerState_old;
|
||||
ALTER TABLE "Inventory" RENAME COLUMN SrmPlaceholder TO SrmPlaceholder_old;
|
||||
ALTER TABLE "Inventory" ADD COLUMN IsTemplate TEXT NOT NULL DEFAULT "FALSE";
|
||||
ALTER TABLE "Inventory" ADD COLUMN PoweredOn TEXT NOT NULL DEFAULT "FALSE";
|
||||
ALTER TABLE "Inventory" ADD COLUMN SrmPlaceholder TEXT NOT NULL DEFAULT "FALSE";
|
||||
UPDATE "Inventory"
|
||||
SET IsTemplate = CASE
|
||||
WHEN IsTemplate_old = 1 THEN 'TRUE'
|
||||
ELSE 'FALSE'
|
||||
END;
|
||||
UPDATE "Inventory"
|
||||
SET PoweredOn = CASE
|
||||
WHEN PowerState_old = 1 THEN 'TRUE'
|
||||
ELSE 'FALSE'
|
||||
END;
|
||||
UPDATE "Inventory"
|
||||
SET SrmPlaceholder = CASE
|
||||
WHEN SrmPlaceholder_old = 1 THEN 'TRUE'
|
||||
ELSE 'FALSE'
|
||||
END;
|
||||
ALTER TABLE "Inventory" DROP COLUMN IsTemplate_old;
|
||||
ALTER TABLE "Inventory" DROP COLUMN PowerState_old;
|
||||
|
||||
ALTER TABLE "Inventory" DROP COLUMN SrmPlaceholder_old;
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
ALTER TABLE "Inventory" RENAME COLUMN IsTemplate TO IsTemplate_old;
|
||||
ALTER TABLE "Inventory" RENAME COLUMN PoweredOn TO PoweredOn_old;
|
||||
ALTER TABLE "Inventory" RENAME COLUMN SrmPlaceholder TO SrmPlaceholder_old;
|
||||
ALTER TABLE "Inventory" ADD COLUMN IsTemplate INTEGER;
|
||||
ALTER TABLE "Inventory" ADD COLUMN PowerState INTEGER;
|
||||
ALTER TABLE "Inventory" ADD COLUMN SrmPlaceholder INTEGER;
|
||||
UPDATE "Inventory"
|
||||
SET IsTemplate = CASE
|
||||
WHEN IsTemplate_old = 'TRUE' THEN 1
|
||||
ELSE 0
|
||||
END;
|
||||
UPDATE "Inventory"
|
||||
SET PowerState = CASE
|
||||
WHEN PoweredOn_old = 'TRUE' THEN 1
|
||||
ELSE 0
|
||||
END;
|
||||
UPDATE "Inventory"
|
||||
SET SrmPlaceholder = CASE
|
||||
WHEN SrmPlaceholder_old = 'TRUE' THEN 1
|
||||
ELSE 0
|
||||
END;
|
||||
ALTER TABLE "Inventory" DROP COLUMN IsTemplate_old;
|
||||
ALTER TABLE "Inventory" DROP COLUMN PoweredOn_old;
|
||||
ALTER TABLE "Inventory" DROP COLUMN SrmPlaceholder_old;
|
||||
-- +goose StatementEnd
|
||||
9
db/migrations/20240930012450_add_uuid.sql
Normal file
9
db/migrations/20240930012450_add_uuid.sql
Normal file
@@ -0,0 +1,9 @@
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
ALTER TABLE "Inventory" ADD COLUMN VmUuid TEXT;
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
ALTER TABLE "Inventory" DROP COLUMN VmUuid;
|
||||
-- +goose StatementEnd
|
||||
18
db/migrations/20240930031506_add_table.sql
Normal file
18
db/migrations/20240930031506_add_table.sql
Normal file
@@ -0,0 +1,18 @@
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
CREATE TABLE IF NOT EXISTS "InventoryHistory" (
|
||||
"Hid" INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
"InventoryId" INTEGER,
|
||||
"ReportDate" INTEGER,
|
||||
"UpdateTime" INTEGER,
|
||||
"PreviousVcpus" INTEGER,
|
||||
"PreviousRam" INTEGER,
|
||||
"PreviousResourcePool" TEXT,
|
||||
"PreviousProvisionedDisk" REAL
|
||||
)
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
DROP TABLE "InventoryHistory";
|
||||
-- +goose StatementEnd
|
||||
9
db/migrations/20241001222729_add_placeholder.sql
Normal file
9
db/migrations/20241001222729_add_placeholder.sql
Normal file
@@ -0,0 +1,9 @@
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
ALTER TABLE "Updates" ADD COLUMN PlaceholderChange TEXT;
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
ALTER TABLE "Updates" DROP COLUMN PlaceholderChange;
|
||||
-- +goose StatementEnd
|
||||
9
db/migrations/20241002032439_add_name.sql
Normal file
9
db/migrations/20241002032439_add_name.sql
Normal file
@@ -0,0 +1,9 @@
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
ALTER TABLE "Updates" ADD COLUMN Name TEXT;
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
ALTER TABLE "Updates" DROP COLUMN Name;
|
||||
-- +goose StatementEnd
|
||||
9
db/migrations/20241002033323_add_change.sql
Normal file
9
db/migrations/20241002033323_add_change.sql
Normal file
@@ -0,0 +1,9 @@
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
ALTER TABLE "Updates" ADD COLUMN RawChangeString BLOB;
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
ALTER TABLE "Updates" DROP COLUMN RawChangeString;
|
||||
-- +goose StatementEnd
|
||||
14
db/migrations/20250115094500_snapshot_registry.sql
Normal file
14
db/migrations/20250115094500_snapshot_registry.sql
Normal file
@@ -0,0 +1,14 @@
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
CREATE TABLE IF NOT EXISTS snapshot_registry (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
snapshot_type TEXT NOT NULL,
|
||||
table_name TEXT NOT NULL UNIQUE,
|
||||
snapshot_time BIGINT NOT NULL
|
||||
);
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
DROP TABLE snapshot_registry;
|
||||
-- +goose StatementEnd
|
||||
48
db/migrations/20250116090000_drop_vmtype.sql
Normal file
48
db/migrations/20250116090000_drop_vmtype.sql
Normal file
@@ -0,0 +1,48 @@
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
PRAGMA foreign_keys=OFF;
|
||||
|
||||
ALTER TABLE "Inventory" RENAME TO "Inventory_old";
|
||||
|
||||
CREATE TABLE IF NOT EXISTS "Inventory" (
|
||||
"Iid" INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
"Name" TEXT NOT NULL,
|
||||
"Vcenter" TEXT NOT NULL,
|
||||
"VmId" TEXT,
|
||||
"EventKey" TEXT,
|
||||
"CloudId" TEXT,
|
||||
"CreationTime" INTEGER,
|
||||
"DeletionTime" INTEGER,
|
||||
"ResourcePool" TEXT,
|
||||
"Datacenter" TEXT,
|
||||
"Cluster" TEXT,
|
||||
"Folder" TEXT,
|
||||
"ProvisionedDisk" REAL,
|
||||
"InitialVcpus" INTEGER,
|
||||
"InitialRam" INTEGER,
|
||||
"IsTemplate" TEXT NOT NULL DEFAULT "FALSE",
|
||||
"PoweredOn" TEXT NOT NULL DEFAULT "FALSE",
|
||||
"SrmPlaceholder" TEXT NOT NULL DEFAULT "FALSE",
|
||||
"VmUuid" TEXT
|
||||
);
|
||||
|
||||
INSERT INTO "Inventory" (
|
||||
"Iid", "Name", "Vcenter", "VmId", "EventKey", "CloudId", "CreationTime", "DeletionTime",
|
||||
"ResourcePool", "Datacenter", "Cluster", "Folder", "ProvisionedDisk", "InitialVcpus",
|
||||
"InitialRam", "IsTemplate", "PoweredOn", "SrmPlaceholder", "VmUuid"
|
||||
)
|
||||
SELECT
|
||||
"Iid", "Name", "Vcenter", "VmId", "EventKey", "CloudId", "CreationTime", "DeletionTime",
|
||||
"ResourcePool", "Datacenter", "Cluster", "Folder", "ProvisionedDisk", "InitialVcpus",
|
||||
"InitialRam", "IsTemplate", "PoweredOn", "SrmPlaceholder", "VmUuid"
|
||||
FROM "Inventory_old";
|
||||
|
||||
DROP TABLE "Inventory_old";
|
||||
|
||||
PRAGMA foreign_keys=ON;
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
ALTER TABLE "Inventory" ADD COLUMN "VmType" TEXT;
|
||||
-- +goose StatementEnd
|
||||
5
db/migrations/20250116101000_snapshot_count.sql
Normal file
5
db/migrations/20250116101000_snapshot_count.sql
Normal file
@@ -0,0 +1,5 @@
|
||||
-- +goose Up
|
||||
ALTER TABLE snapshot_registry ADD COLUMN snapshot_count BIGINT NOT NULL DEFAULT 0;
|
||||
|
||||
-- +goose Down
|
||||
ALTER TABLE snapshot_registry DROP COLUMN snapshot_count;
|
||||
@@ -0,0 +1,5 @@
|
||||
-- +goose Up
|
||||
CREATE INDEX IF NOT EXISTS idx_snapshot_registry_type_time ON snapshot_registry (snapshot_type, snapshot_time);
|
||||
|
||||
-- +goose Down
|
||||
DROP INDEX IF EXISTS idx_snapshot_registry_type_time;
|
||||
37
db/migrations_postgres/20240912012927_init.sql
Normal file
37
db/migrations_postgres/20240912012927_init.sql
Normal file
@@ -0,0 +1,37 @@
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
CREATE TABLE IF NOT EXISTS "Inventory" (
|
||||
"Iid" BIGSERIAL PRIMARY KEY,
|
||||
"Name" TEXT NOT NULL,
|
||||
"Vcenter" TEXT NOT NULL,
|
||||
"VmId" TEXT,
|
||||
"EventKey" TEXT,
|
||||
"EventId" TEXT,
|
||||
"CreationTime" BIGINT,
|
||||
"DeletionTime" BIGINT,
|
||||
"ResourcePool" TEXT,
|
||||
"Datacenter" TEXT,
|
||||
"Cluster" TEXT,
|
||||
"Folder" TEXT,
|
||||
"ProvisionedDisk" REAL,
|
||||
"InitialVcpus" INTEGER,
|
||||
"InitialRam" INTEGER,
|
||||
"SrmPlaceholder" INTEGER
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS "Updates" (
|
||||
"Uid" BIGSERIAL PRIMARY KEY,
|
||||
"InventoryId" INTEGER,
|
||||
"UpdateTime" BIGINT,
|
||||
"UpdateType" TEXT NOT NULL,
|
||||
"NewVcpus" INTEGER,
|
||||
"NewRam" INTEGER,
|
||||
"NewResourcePool" TEXT
|
||||
);
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
DROP TABLE "Inventory";
|
||||
DROP TABLE "Updates";
|
||||
-- +goose StatementEnd
|
||||
11
db/migrations_postgres/20240912231739_extend_updates.sql
Normal file
11
db/migrations_postgres/20240912231739_extend_updates.sql
Normal file
@@ -0,0 +1,11 @@
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
ALTER TABLE "Updates" ADD COLUMN "EventKey" TEXT;
|
||||
ALTER TABLE "Updates" ADD COLUMN "EventId" TEXT;
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
ALTER TABLE "Updates" DROP COLUMN "EventKey";
|
||||
ALTER TABLE "Updates" DROP COLUMN "EventId";
|
||||
-- +goose StatementEnd
|
||||
21
db/migrations_postgres/20240913021038_events.sql
Normal file
21
db/migrations_postgres/20240913021038_events.sql
Normal file
@@ -0,0 +1,21 @@
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
CREATE TABLE IF NOT EXISTS "Events" (
|
||||
"Eid" BIGSERIAL PRIMARY KEY,
|
||||
"CloudId" TEXT NOT NULL,
|
||||
"Source" TEXT NOT NULL,
|
||||
"EventTime" BIGINT,
|
||||
"ChainId" TEXT NOT NULL,
|
||||
"VmId" TEXT,
|
||||
"EventKey" TEXT,
|
||||
"Datacenter" TEXT,
|
||||
"ComputeResource" TEXT,
|
||||
"UserName" TEXT,
|
||||
"Processed" INTEGER NOT NULL DEFAULT 0
|
||||
);
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
DROP TABLE "Events";
|
||||
-- +goose StatementEnd
|
||||
17
db/migrations_postgres/20240913043145_extend_events.sql
Normal file
17
db/migrations_postgres/20240913043145_extend_events.sql
Normal file
@@ -0,0 +1,17 @@
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
ALTER TABLE "Events" RENAME COLUMN "Datacenter" TO "DatacenterName";
|
||||
ALTER TABLE "Events" RENAME COLUMN "ComputeResource" TO "ComputeResourceName";
|
||||
ALTER TABLE "Events" ADD COLUMN "DatacenterId" TEXT;
|
||||
ALTER TABLE "Events" ADD COLUMN "ComputeResourceId" TEXT;
|
||||
ALTER TABLE "Events" ADD COLUMN "VmName" TEXT;
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
ALTER TABLE "Events" DROP COLUMN "VmName";
|
||||
ALTER TABLE "Events" DROP COLUMN "ComputeResourceId";
|
||||
ALTER TABLE "Events" DROP COLUMN "DatacenterId";
|
||||
ALTER TABLE "Events" RENAME COLUMN "ComputeResourceName" TO "ComputeResource";
|
||||
ALTER TABLE "Events" RENAME COLUMN "DatacenterName" TO "Datacenter";
|
||||
-- +goose StatementEnd
|
||||
9
db/migrations_postgres/20240915015710_events_type.sql
Normal file
9
db/migrations_postgres/20240915015710_events_type.sql
Normal file
@@ -0,0 +1,9 @@
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
ALTER TABLE "Events" ADD COLUMN "EventType" TEXT;
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
ALTER TABLE "Updates" DROP COLUMN "EventType";
|
||||
-- +goose StatementEnd
|
||||
11
db/migrations_postgres/20240915232747_extend_inventory.sql
Normal file
11
db/migrations_postgres/20240915232747_extend_inventory.sql
Normal file
@@ -0,0 +1,11 @@
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
ALTER TABLE "Inventory" ADD COLUMN "IsTemplate" INTEGER;
|
||||
ALTER TABLE "Inventory" ADD COLUMN "PowerState" INTEGER;
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
ALTER TABLE "Inventory" DROP COLUMN "PowerState";
|
||||
ALTER TABLE "Inventory" DROP COLUMN "IsTemplate";
|
||||
-- +goose StatementEnd
|
||||
9
db/migrations_postgres/20240916041639_rename_eventid.sql
Normal file
9
db/migrations_postgres/20240916041639_rename_eventid.sql
Normal file
@@ -0,0 +1,9 @@
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
ALTER TABLE "Inventory" RENAME COLUMN "EventId" TO "CloudId";
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
ALTER TABLE "Inventory" RENAME COLUMN "CloudId" TO "EventId";
|
||||
-- +goose StatementEnd
|
||||
9
db/migrations_postgres/20240916045259_updates_disk.sql
Normal file
9
db/migrations_postgres/20240916045259_updates_disk.sql
Normal file
@@ -0,0 +1,9 @@
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
ALTER TABLE "Updates" ADD COLUMN "NewProvisionedDisk" REAL;
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
ALTER TABLE "Updates" DROP COLUMN "NewProvisionedDisk";
|
||||
-- +goose StatementEnd
|
||||
9
db/migrations_postgres/20240926022714_updates_user.sql
Normal file
9
db/migrations_postgres/20240926022714_updates_user.sql
Normal file
@@ -0,0 +1,9 @@
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
ALTER TABLE "Updates" ADD COLUMN "UserName" TEXT;
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
ALTER TABLE "Updates" DROP COLUMN "UserName";
|
||||
-- +goose StatementEnd
|
||||
55
db/migrations_postgres/20240927002029_change_inventory.sql
Normal file
55
db/migrations_postgres/20240927002029_change_inventory.sql
Normal file
@@ -0,0 +1,55 @@
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
ALTER TABLE "Inventory" RENAME COLUMN "IsTemplate" TO "IsTemplate_old";
|
||||
ALTER TABLE "Inventory" RENAME COLUMN "PowerState" TO "PowerState_old";
|
||||
ALTER TABLE "Inventory" RENAME COLUMN "SrmPlaceholder" TO "SrmPlaceholder_old";
|
||||
ALTER TABLE "Inventory" ADD COLUMN "IsTemplate" TEXT NOT NULL DEFAULT 'FALSE';
|
||||
ALTER TABLE "Inventory" ADD COLUMN "PoweredOn" TEXT NOT NULL DEFAULT 'FALSE';
|
||||
ALTER TABLE "Inventory" ADD COLUMN "SrmPlaceholder" TEXT NOT NULL DEFAULT 'FALSE';
|
||||
UPDATE "Inventory"
|
||||
SET "IsTemplate" = CASE
|
||||
WHEN "IsTemplate_old" = 1 THEN 'TRUE'
|
||||
ELSE 'FALSE'
|
||||
END;
|
||||
UPDATE "Inventory"
|
||||
SET "PoweredOn" = CASE
|
||||
WHEN "PowerState_old" = 1 THEN 'TRUE'
|
||||
ELSE 'FALSE'
|
||||
END;
|
||||
UPDATE "Inventory"
|
||||
SET "SrmPlaceholder" = CASE
|
||||
WHEN "SrmPlaceholder_old" = 1 THEN 'TRUE'
|
||||
ELSE 'FALSE'
|
||||
END;
|
||||
ALTER TABLE "Inventory" DROP COLUMN "IsTemplate_old";
|
||||
ALTER TABLE "Inventory" DROP COLUMN "PowerState_old";
|
||||
ALTER TABLE "Inventory" DROP COLUMN "SrmPlaceholder_old";
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
ALTER TABLE "Inventory" RENAME COLUMN "IsTemplate" TO "IsTemplate_old";
|
||||
ALTER TABLE "Inventory" RENAME COLUMN "PoweredOn" TO "PoweredOn_old";
|
||||
ALTER TABLE "Inventory" RENAME COLUMN "SrmPlaceholder" TO "SrmPlaceholder_old";
|
||||
ALTER TABLE "Inventory" ADD COLUMN "IsTemplate" INTEGER;
|
||||
ALTER TABLE "Inventory" ADD COLUMN "PowerState" INTEGER;
|
||||
ALTER TABLE "Inventory" ADD COLUMN "SrmPlaceholder" INTEGER;
|
||||
UPDATE "Inventory"
|
||||
SET "IsTemplate" = CASE
|
||||
WHEN "IsTemplate_old" = 'TRUE' THEN 1
|
||||
ELSE 0
|
||||
END;
|
||||
UPDATE "Inventory"
|
||||
SET "PowerState" = CASE
|
||||
WHEN "PoweredOn_old" = 'TRUE' THEN 1
|
||||
ELSE 0
|
||||
END;
|
||||
UPDATE "Inventory"
|
||||
SET "SrmPlaceholder" = CASE
|
||||
WHEN "SrmPlaceholder_old" = 'TRUE' THEN 1
|
||||
ELSE 0
|
||||
END;
|
||||
ALTER TABLE "Inventory" DROP COLUMN "IsTemplate_old";
|
||||
ALTER TABLE "Inventory" DROP COLUMN "PoweredOn_old";
|
||||
ALTER TABLE "Inventory" DROP COLUMN "SrmPlaceholder_old";
|
||||
-- +goose StatementEnd
|
||||
9
db/migrations_postgres/20240930012450_add_uuid.sql
Normal file
9
db/migrations_postgres/20240930012450_add_uuid.sql
Normal file
@@ -0,0 +1,9 @@
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
ALTER TABLE "Inventory" ADD COLUMN "VmUuid" TEXT;
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
ALTER TABLE "Inventory" DROP COLUMN "VmUuid";
|
||||
-- +goose StatementEnd
|
||||
18
db/migrations_postgres/20240930031506_add_table.sql
Normal file
18
db/migrations_postgres/20240930031506_add_table.sql
Normal file
@@ -0,0 +1,18 @@
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
CREATE TABLE IF NOT EXISTS "InventoryHistory" (
|
||||
"Hid" BIGSERIAL PRIMARY KEY,
|
||||
"InventoryId" INTEGER,
|
||||
"ReportDate" BIGINT,
|
||||
"UpdateTime" BIGINT,
|
||||
"PreviousVcpus" INTEGER,
|
||||
"PreviousRam" INTEGER,
|
||||
"PreviousResourcePool" TEXT,
|
||||
"PreviousProvisionedDisk" REAL
|
||||
);
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
DROP TABLE "InventoryHistory";
|
||||
-- +goose StatementEnd
|
||||
@@ -0,0 +1,9 @@
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
ALTER TABLE "Updates" ADD COLUMN "PlaceholderChange" TEXT;
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
ALTER TABLE "Updates" DROP COLUMN "PlaceholderChange";
|
||||
-- +goose StatementEnd
|
||||
9
db/migrations_postgres/20241002032439_add_name.sql
Normal file
9
db/migrations_postgres/20241002032439_add_name.sql
Normal file
@@ -0,0 +1,9 @@
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
ALTER TABLE "Updates" ADD COLUMN "Name" TEXT;
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
ALTER TABLE "Updates" DROP COLUMN "Name";
|
||||
-- +goose StatementEnd
|
||||
9
db/migrations_postgres/20241002033323_add_change.sql
Normal file
9
db/migrations_postgres/20241002033323_add_change.sql
Normal file
@@ -0,0 +1,9 @@
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
ALTER TABLE "Updates" ADD COLUMN "RawChangeString" BYTEA;
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
ALTER TABLE "Updates" DROP COLUMN "RawChangeString";
|
||||
-- +goose StatementEnd
|
||||
14
db/migrations_postgres/20250115094500_snapshot_registry.sql
Normal file
14
db/migrations_postgres/20250115094500_snapshot_registry.sql
Normal file
@@ -0,0 +1,14 @@
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
CREATE TABLE IF NOT EXISTS snapshot_registry (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
snapshot_type TEXT NOT NULL,
|
||||
table_name TEXT NOT NULL UNIQUE,
|
||||
snapshot_time BIGINT NOT NULL
|
||||
);
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
DROP TABLE snapshot_registry;
|
||||
-- +goose StatementEnd
|
||||
9
db/migrations_postgres/20250116090000_drop_vmtype.sql
Normal file
9
db/migrations_postgres/20250116090000_drop_vmtype.sql
Normal file
@@ -0,0 +1,9 @@
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
ALTER TABLE "Inventory" DROP COLUMN IF EXISTS "VmType";
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
ALTER TABLE "Inventory" ADD COLUMN "VmType" TEXT;
|
||||
-- +goose StatementEnd
|
||||
5
db/migrations_postgres/20250116101000_snapshot_count.sql
Normal file
5
db/migrations_postgres/20250116101000_snapshot_count.sql
Normal file
@@ -0,0 +1,5 @@
|
||||
-- +goose Up
|
||||
ALTER TABLE snapshot_registry ADD COLUMN IF NOT EXISTS snapshot_count BIGINT NOT NULL DEFAULT 0;
|
||||
|
||||
-- +goose Down
|
||||
ALTER TABLE snapshot_registry DROP COLUMN IF EXISTS snapshot_count;
|
||||
@@ -0,0 +1,5 @@
|
||||
-- +goose Up
|
||||
CREATE INDEX IF NOT EXISTS idx_snapshot_registry_type_time ON snapshot_registry (snapshot_type, snapshot_time);
|
||||
|
||||
-- +goose Down
|
||||
DROP INDEX IF EXISTS idx_snapshot_registry_type_time;
|
||||
79
db/postgres.go
Normal file
79
db/postgres.go
Normal file
@@ -0,0 +1,79 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"regexp"
|
||||
"strings"
|
||||
"vctp/db/queries"
|
||||
|
||||
_ "github.com/jackc/pgx/v5/stdlib"
|
||||
"github.com/jmoiron/sqlx"
|
||||
)
|
||||
|
||||
type PostgresDB struct {
|
||||
logger *slog.Logger
|
||||
db *sqlx.DB
|
||||
queries *queries.Queries
|
||||
}
|
||||
|
||||
var _ Database = (*PostgresDB)(nil)
|
||||
|
||||
func (d *PostgresDB) DB() *sqlx.DB {
|
||||
return d.db
|
||||
}
|
||||
|
||||
func (d *PostgresDB) Queries() Querier {
|
||||
return d.queries
|
||||
}
|
||||
|
||||
func (d *PostgresDB) Logger() *slog.Logger {
|
||||
return d.logger
|
||||
}
|
||||
|
||||
func (d *PostgresDB) Close() error {
|
||||
return d.db.Close()
|
||||
}
|
||||
|
||||
func newPostgresDB(logger *slog.Logger, dsn string) (*PostgresDB, error) {
|
||||
if strings.TrimSpace(dsn) == "" {
|
||||
return nil, fmt.Errorf("postgres DSN is required")
|
||||
}
|
||||
db, err := sqlx.Open("pgx", dsn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
db.SetMaxOpenConns(10)
|
||||
|
||||
rebindDB := rebindDBTX{db: db}
|
||||
return &PostgresDB{logger: logger, db: db, queries: queries.New(rebindDB)}, nil
|
||||
}
|
||||
|
||||
type rebindDBTX struct {
|
||||
db *sqlx.DB
|
||||
}
|
||||
|
||||
func (r rebindDBTX) ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) {
|
||||
return r.db.ExecContext(ctx, rebindQuery(query), args...)
|
||||
}
|
||||
|
||||
func (r rebindDBTX) PrepareContext(ctx context.Context, query string) (*sql.Stmt, error) {
|
||||
return r.db.PrepareContext(ctx, rebindQuery(query))
|
||||
}
|
||||
|
||||
func (r rebindDBTX) QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error) {
|
||||
return r.db.QueryContext(ctx, rebindQuery(query), args...)
|
||||
}
|
||||
|
||||
func (r rebindDBTX) QueryRowContext(ctx context.Context, query string, args ...interface{}) *sql.Row {
|
||||
return r.db.QueryRowContext(ctx, rebindQuery(query), args...)
|
||||
}
|
||||
|
||||
var numberedPlaceholderRe = regexp.MustCompile(`\?\d+`)
|
||||
|
||||
func rebindQuery(query string) string {
|
||||
unindexed := numberedPlaceholderRe.ReplaceAllString(query, "?")
|
||||
return sqlx.Rebind(sqlx.DOLLAR, unindexed)
|
||||
}
|
||||
35
db/querier.go
Normal file
35
db/querier.go
Normal file
@@ -0,0 +1,35 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"vctp/db/queries"
|
||||
)
|
||||
|
||||
// Querier abstracts sqlc-generated queries so multiple database backends can share call sites.
|
||||
type Querier interface {
|
||||
CleanupUpdates(ctx context.Context, arg queries.CleanupUpdatesParams) error
|
||||
CleanupUpdatesNullVm(ctx context.Context) error
|
||||
CreateEvent(ctx context.Context, arg queries.CreateEventParams) (queries.Event, error)
|
||||
CreateInventory(ctx context.Context, arg queries.CreateInventoryParams) (queries.Inventory, error)
|
||||
CreateInventoryHistory(ctx context.Context, arg queries.CreateInventoryHistoryParams) (queries.InventoryHistory, error)
|
||||
CreateUpdate(ctx context.Context, arg queries.CreateUpdateParams) (queries.Update, error)
|
||||
GetInventoryByName(ctx context.Context, name string) ([]queries.Inventory, error)
|
||||
GetInventoryByVcenter(ctx context.Context, vcenter string) ([]queries.Inventory, error)
|
||||
GetInventoryEventId(ctx context.Context, cloudid sql.NullString) (queries.Inventory, error)
|
||||
GetInventoryVcUrl(ctx context.Context, vc string) ([]queries.Inventory, error)
|
||||
GetInventoryVmId(ctx context.Context, arg queries.GetInventoryVmIdParams) (queries.Inventory, error)
|
||||
GetInventoryVmUuid(ctx context.Context, arg queries.GetInventoryVmUuidParams) (queries.Inventory, error)
|
||||
GetReportInventory(ctx context.Context) ([]queries.Inventory, error)
|
||||
GetReportUpdates(ctx context.Context) ([]queries.Update, error)
|
||||
GetVmUpdates(ctx context.Context, arg queries.GetVmUpdatesParams) ([]queries.Update, error)
|
||||
InventoryCleanup(ctx context.Context, arg queries.InventoryCleanupParams) error
|
||||
InventoryCleanupTemplates(ctx context.Context) error
|
||||
InventoryCleanupVcenter(ctx context.Context, vc string) error
|
||||
InventoryMarkDeleted(ctx context.Context, arg queries.InventoryMarkDeletedParams) error
|
||||
InventoryUpdate(ctx context.Context, arg queries.InventoryUpdateParams) error
|
||||
ListEvents(ctx context.Context) ([]queries.Event, error)
|
||||
ListInventory(ctx context.Context) ([]queries.Inventory, error)
|
||||
ListUnprocessedEvents(ctx context.Context, eventtime sql.NullInt64) ([]queries.Event, error)
|
||||
UpdateEventsProcessed(ctx context.Context, eid int64) error
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by sqlc. DO NOT EDIT.
|
||||
// versions:
|
||||
// sqlc v1.27.0
|
||||
// sqlc v1.29.0
|
||||
|
||||
package queries
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by sqlc. DO NOT EDIT.
|
||||
// versions:
|
||||
// sqlc v1.27.0
|
||||
// sqlc v1.29.0
|
||||
|
||||
package queries
|
||||
|
||||
@@ -8,51 +8,95 @@ import (
|
||||
"database/sql"
|
||||
)
|
||||
|
||||
type Events struct {
|
||||
Eid int64
|
||||
CloudId string
|
||||
Source string
|
||||
EventTime sql.NullInt64
|
||||
ChainId string
|
||||
VmId sql.NullString
|
||||
EventKey sql.NullString
|
||||
DatacenterName sql.NullString
|
||||
ComputeResourceName sql.NullString
|
||||
UserName sql.NullString
|
||||
Processed int64
|
||||
DatacenterId sql.NullString
|
||||
ComputeResourceId sql.NullString
|
||||
VmName sql.NullString
|
||||
type Event struct {
|
||||
Eid int64 `db:"Eid" json:"Eid"`
|
||||
CloudId string `db:"CloudId" json:"CloudId"`
|
||||
Source string `db:"Source" json:"Source"`
|
||||
EventTime sql.NullInt64 `db:"EventTime" json:"EventTime"`
|
||||
ChainId string `db:"ChainId" json:"ChainId"`
|
||||
VmId sql.NullString `db:"VmId" json:"VmId"`
|
||||
EventKey sql.NullString `db:"EventKey" json:"EventKey"`
|
||||
DatacenterName sql.NullString `db:"DatacenterName" json:"DatacenterName"`
|
||||
ComputeResourceName sql.NullString `db:"ComputeResourceName" json:"ComputeResourceName"`
|
||||
UserName sql.NullString `db:"UserName" json:"UserName"`
|
||||
Processed int64 `db:"Processed" json:"Processed"`
|
||||
DatacenterId sql.NullString `db:"DatacenterId" json:"DatacenterId"`
|
||||
ComputeResourceId sql.NullString `db:"ComputeResourceId" json:"ComputeResourceId"`
|
||||
VmName sql.NullString `db:"VmName" json:"VmName"`
|
||||
EventType sql.NullString `db:"EventType" json:"EventType"`
|
||||
}
|
||||
|
||||
type Inventory struct {
|
||||
Iid int64
|
||||
Name string
|
||||
Vcenter string
|
||||
VmId sql.NullString
|
||||
EventKey sql.NullString
|
||||
EventId sql.NullString
|
||||
CreationTime sql.NullInt64
|
||||
DeletionTime sql.NullInt64
|
||||
ResourcePool sql.NullString
|
||||
VmType sql.NullString
|
||||
Datacenter sql.NullString
|
||||
Cluster sql.NullString
|
||||
Folder sql.NullString
|
||||
ProvisionedDisk sql.NullFloat64
|
||||
InitialVcpus sql.NullInt64
|
||||
InitialRam sql.NullInt64
|
||||
SrmPlaceholder sql.NullInt64
|
||||
Iid int64 `db:"Iid" json:"Iid"`
|
||||
Name string `db:"Name" json:"Name"`
|
||||
Vcenter string `db:"Vcenter" json:"Vcenter"`
|
||||
VmId sql.NullString `db:"VmId" json:"VmId"`
|
||||
EventKey sql.NullString `db:"EventKey" json:"EventKey"`
|
||||
CloudId sql.NullString `db:"CloudId" json:"CloudId"`
|
||||
CreationTime sql.NullInt64 `db:"CreationTime" json:"CreationTime"`
|
||||
DeletionTime sql.NullInt64 `db:"DeletionTime" json:"DeletionTime"`
|
||||
ResourcePool sql.NullString `db:"ResourcePool" json:"ResourcePool"`
|
||||
Datacenter sql.NullString `db:"Datacenter" json:"Datacenter"`
|
||||
Cluster sql.NullString `db:"Cluster" json:"Cluster"`
|
||||
Folder sql.NullString `db:"Folder" json:"Folder"`
|
||||
ProvisionedDisk sql.NullFloat64 `db:"ProvisionedDisk" json:"ProvisionedDisk"`
|
||||
InitialVcpus sql.NullInt64 `db:"InitialVcpus" json:"InitialVcpus"`
|
||||
InitialRam sql.NullInt64 `db:"InitialRam" json:"InitialRam"`
|
||||
IsTemplate interface{} `db:"IsTemplate" json:"IsTemplate"`
|
||||
PoweredOn interface{} `db:"PoweredOn" json:"PoweredOn"`
|
||||
SrmPlaceholder interface{} `db:"SrmPlaceholder" json:"SrmPlaceholder"`
|
||||
VmUuid sql.NullString `db:"VmUuid" json:"VmUuid"`
|
||||
}
|
||||
|
||||
type Updates struct {
|
||||
Uid int64
|
||||
InventoryId sql.NullInt64
|
||||
UpdateTime sql.NullInt64
|
||||
UpdateType string
|
||||
NewVcpus sql.NullInt64
|
||||
NewRam sql.NullInt64
|
||||
NewResourcePool sql.NullString
|
||||
EventKey sql.NullString
|
||||
EventId sql.NullString
|
||||
type InventoryHistory struct {
|
||||
Hid int64 `db:"Hid" json:"Hid"`
|
||||
InventoryId sql.NullInt64 `db:"InventoryId" json:"InventoryId"`
|
||||
ReportDate sql.NullInt64 `db:"ReportDate" json:"ReportDate"`
|
||||
UpdateTime sql.NullInt64 `db:"UpdateTime" json:"UpdateTime"`
|
||||
PreviousVcpus sql.NullInt64 `db:"PreviousVcpus" json:"PreviousVcpus"`
|
||||
PreviousRam sql.NullInt64 `db:"PreviousRam" json:"PreviousRam"`
|
||||
PreviousResourcePool sql.NullString `db:"PreviousResourcePool" json:"PreviousResourcePool"`
|
||||
PreviousProvisionedDisk sql.NullFloat64 `db:"PreviousProvisionedDisk" json:"PreviousProvisionedDisk"`
|
||||
}
|
||||
|
||||
type PragmaTableInfo struct {
|
||||
Cid sql.NullInt64 `db:"cid" json:"cid"`
|
||||
Name sql.NullString `db:"name" json:"name"`
|
||||
Type sql.NullString `db:"type" json:"type"`
|
||||
Notnull sql.NullInt64 `db:"notnull" json:"notnull"`
|
||||
DfltValue sql.NullString `db:"dflt_value" json:"dflt_value"`
|
||||
Pk sql.NullInt64 `db:"pk" json:"pk"`
|
||||
}
|
||||
|
||||
type SnapshotRegistry struct {
|
||||
ID int64 `db:"id" json:"id"`
|
||||
SnapshotType string `db:"snapshot_type" json:"snapshot_type"`
|
||||
TableName string `db:"table_name" json:"table_name"`
|
||||
SnapshotTime int64 `db:"snapshot_time" json:"snapshot_time"`
|
||||
SnapshotCount int64 `db:"snapshot_count" json:"snapshot_count"`
|
||||
}
|
||||
|
||||
type SqliteMaster struct {
|
||||
Type sql.NullString `db:"type" json:"type"`
|
||||
Name sql.NullString `db:"name" json:"name"`
|
||||
TblName sql.NullString `db:"tbl_name" json:"tbl_name"`
|
||||
Rootpage sql.NullInt64 `db:"rootpage" json:"rootpage"`
|
||||
Sql sql.NullString `db:"sql" json:"sql"`
|
||||
}
|
||||
|
||||
type Update struct {
|
||||
Uid int64 `db:"Uid" json:"Uid"`
|
||||
InventoryId sql.NullInt64 `db:"InventoryId" json:"InventoryId"`
|
||||
UpdateTime sql.NullInt64 `db:"UpdateTime" json:"UpdateTime"`
|
||||
UpdateType string `db:"UpdateType" json:"UpdateType"`
|
||||
NewVcpus sql.NullInt64 `db:"NewVcpus" json:"NewVcpus"`
|
||||
NewRam sql.NullInt64 `db:"NewRam" json:"NewRam"`
|
||||
NewResourcePool sql.NullString `db:"NewResourcePool" json:"NewResourcePool"`
|
||||
EventKey sql.NullString `db:"EventKey" json:"EventKey"`
|
||||
EventId sql.NullString `db:"EventId" json:"EventId"`
|
||||
NewProvisionedDisk sql.NullFloat64 `db:"NewProvisionedDisk" json:"NewProvisionedDisk"`
|
||||
UserName sql.NullString `db:"UserName" json:"UserName"`
|
||||
PlaceholderChange sql.NullString `db:"PlaceholderChange" json:"PlaceholderChange"`
|
||||
Name sql.NullString `db:"Name" json:"Name"`
|
||||
RawChangeString []byte `db:"RawChangeString" json:"RawChangeString"`
|
||||
}
|
||||
|
||||
@@ -1,53 +1,131 @@
|
||||
-- name: ListInventory :many
|
||||
SELECT * FROM "Inventory"
|
||||
SELECT * FROM inventory
|
||||
ORDER BY "Name";
|
||||
|
||||
-- name: GetReportInventory :many
|
||||
SELECT * FROM inventory
|
||||
ORDER BY "CreationTime";
|
||||
|
||||
-- name: GetInventoryByName :many
|
||||
SELECT * FROM "Inventory"
|
||||
SELECT * FROM inventory
|
||||
WHERE "Name" = ?;
|
||||
|
||||
-- name: GetInventoryByVcenter :many
|
||||
SELECT * FROM inventory
|
||||
WHERE "Vcenter" = ?;
|
||||
|
||||
-- name: GetInventoryVmId :one
|
||||
SELECT * FROM "Inventory"
|
||||
WHERE "VmId" = ? LIMIT 1;
|
||||
SELECT * FROM inventory
|
||||
WHERE "VmId" = sqlc.arg('vmId') AND "Datacenter" = sqlc.arg('datacenterName');
|
||||
|
||||
-- name: GetInventoryVmUuid :one
|
||||
SELECT * FROM inventory
|
||||
WHERE "VmUuid" = sqlc.arg('vmUuid') AND "Datacenter" = sqlc.arg('datacenterName');
|
||||
|
||||
-- name: GetInventoryVcUrl :many
|
||||
SELECT * FROM inventory
|
||||
WHERE "Vcenter" = sqlc.arg('vc');
|
||||
|
||||
-- name: GetInventoryEventId :one
|
||||
SELECT * FROM "Inventory"
|
||||
WHERE "EventId" = ? LIMIT 1;
|
||||
SELECT * FROM inventory
|
||||
WHERE "CloudId" = ? LIMIT 1;
|
||||
|
||||
-- name: CreateInventory :one
|
||||
INSERT INTO "Inventory" (
|
||||
"Name", "Vcenter", "VmId", "EventKey", "EventId", "CreationTime", "ResourcePool", "VmType", "Datacenter", "Cluster", "Folder", "ProvisionedDisk", "InitialVcpus", "InitialRam", "SrmPlaceholder"
|
||||
INSERT INTO inventory (
|
||||
"Name", "Vcenter", "VmId", "VmUuid", "EventKey", "CloudId", "CreationTime", "ResourcePool", "IsTemplate", "Datacenter", "Cluster", "Folder", "ProvisionedDisk", "InitialVcpus", "InitialRam", "SrmPlaceholder", "PoweredOn"
|
||||
) VALUES(
|
||||
?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?
|
||||
?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?
|
||||
)
|
||||
RETURNING *;
|
||||
|
||||
-- name: InventoryUpdate :exec
|
||||
UPDATE inventory
|
||||
SET "VmUuid" = sqlc.arg('uuid'), "SrmPlaceholder" = sqlc.arg('srmPlaceholder')
|
||||
WHERE "Iid" = sqlc.arg('iid');
|
||||
|
||||
-- name: InventoryMarkDeleted :exec
|
||||
UPDATE inventory
|
||||
SET "DeletionTime" = sqlc.arg('deletionTime')
|
||||
WHERE "VmId" = sqlc.arg('vmId') AND "Datacenter" = sqlc.arg('datacenterName');
|
||||
|
||||
-- name: InventoryCleanup :exec
|
||||
DELETE FROM inventory
|
||||
WHERE "VmId" = sqlc.arg('vmId') AND "Datacenter" = sqlc.arg('datacenterName')
|
||||
RETURNING *;
|
||||
|
||||
-- name: InventoryCleanupVcenter :exec
|
||||
DELETE FROM inventory
|
||||
WHERE "Vcenter" = sqlc.arg('vc')
|
||||
RETURNING *;
|
||||
|
||||
-- name: InventoryCleanupTemplates :exec
|
||||
DELETE FROM inventory
|
||||
WHERE "IsTemplate" = 'TRUE'
|
||||
RETURNING *;
|
||||
|
||||
-- name: CreateUpdate :one
|
||||
INSERT INTO "Updates" (
|
||||
"InventoryId", "EventKey", "EventId", "UpdateTime", "UpdateType", "NewVcpus", "NewRam", "NewResourcePool"
|
||||
INSERT INTO updates (
|
||||
"InventoryId", "Name", "EventKey", "EventId", "UpdateTime", "UpdateType", "NewVcpus", "NewRam", "NewResourcePool", "NewProvisionedDisk", "UserName", "PlaceholderChange", "RawChangeString"
|
||||
) VALUES(
|
||||
?, ?, ?, ?, ?, ?, ?, ?
|
||||
?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?
|
||||
)
|
||||
RETURNING *;
|
||||
|
||||
-- name: GetReportUpdates :many
|
||||
SELECT * FROM updates
|
||||
ORDER BY "UpdateTime";
|
||||
|
||||
-- name: GetVmUpdates :many
|
||||
SELECT * FROM updates
|
||||
WHERE "UpdateType" = sqlc.arg('updateType') AND "InventoryId" = sqlc.arg('InventoryId');
|
||||
|
||||
-- name: CleanupUpdates :exec
|
||||
DELETE FROM updates
|
||||
WHERE "UpdateType" = sqlc.arg('updateType') AND "UpdateTime" <= sqlc.arg('updateTime')
|
||||
RETURNING *;
|
||||
|
||||
-- name: CleanupUpdatesNullVm :exec
|
||||
DELETE FROM updates
|
||||
WHERE "InventoryId" IS NULL
|
||||
RETURNING *;
|
||||
|
||||
-- name: CreateEvent :one
|
||||
INSERT INTO "Events" (
|
||||
"CloudId", "Source", "EventTime", "ChainId", "VmId", "VmName", "EventKey", "DatacenterId", "DatacenterName", "ComputeResourceId", "ComputeResourceName", "UserName"
|
||||
INSERT INTO events (
|
||||
"CloudId", "Source", "EventTime", "ChainId", "VmId", "VmName", "EventType", "EventKey", "DatacenterId", "DatacenterName", "ComputeResourceId", "ComputeResourceName", "UserName"
|
||||
) VALUES(
|
||||
?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?
|
||||
?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?
|
||||
)
|
||||
RETURNING *;
|
||||
|
||||
-- name: ListEvents :many
|
||||
SELECT * FROM "Events"
|
||||
SELECT * FROM events
|
||||
ORDER BY "EventTime";
|
||||
|
||||
-- name: ListUnprocessedEvents :many
|
||||
SELECT * FROM "Events"
|
||||
SELECT * FROM events
|
||||
WHERE "Processed" = 0
|
||||
AND "EventTime" > sqlc.arg('eventTime')
|
||||
ORDER BY "EventTime";
|
||||
|
||||
-- name: UpdateEventsProcessed :exec
|
||||
UPDATE "Events"
|
||||
UPDATE events
|
||||
SET "Processed" = 1
|
||||
WHERE "Eid" = sqlc.arg('eid');
|
||||
|
||||
-- name: CreateInventoryHistory :one
|
||||
INSERT INTO inventory_history (
|
||||
"InventoryId", "ReportDate", "UpdateTime", "PreviousVcpus", "PreviousRam", "PreviousResourcePool", "PreviousProvisionedDisk"
|
||||
) VALUES(
|
||||
?, ?, ?, ?, ?, ?, ?
|
||||
)
|
||||
RETURNING *;
|
||||
|
||||
-- name: SqliteTableExists :one
|
||||
SELECT COUNT(1) AS count
|
||||
FROM sqlite_master
|
||||
WHERE type = 'table' AND name = sqlc.arg('table_name');
|
||||
|
||||
-- name: SqliteColumnExists :one
|
||||
SELECT COUNT(1) AS count
|
||||
FROM pragma_table_info
|
||||
WHERE name = sqlc.arg('column_name');
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by sqlc. DO NOT EDIT.
|
||||
// versions:
|
||||
// sqlc v1.27.0
|
||||
// sqlc v1.29.0
|
||||
// source: query.sql
|
||||
|
||||
package queries
|
||||
@@ -10,31 +10,59 @@ import (
|
||||
"database/sql"
|
||||
)
|
||||
|
||||
const cleanupUpdates = `-- name: CleanupUpdates :exec
|
||||
DELETE FROM updates
|
||||
WHERE "UpdateType" = ?1 AND "UpdateTime" <= ?2
|
||||
RETURNING Uid, InventoryId, UpdateTime, UpdateType, NewVcpus, NewRam, NewResourcePool, EventKey, EventId, NewProvisionedDisk, UserName, PlaceholderChange, Name, RawChangeString
|
||||
`
|
||||
|
||||
type CleanupUpdatesParams struct {
|
||||
UpdateType string `db:"updateType" json:"updateType"`
|
||||
UpdateTime sql.NullInt64 `db:"updateTime" json:"updateTime"`
|
||||
}
|
||||
|
||||
func (q *Queries) CleanupUpdates(ctx context.Context, arg CleanupUpdatesParams) error {
|
||||
_, err := q.db.ExecContext(ctx, cleanupUpdates, arg.UpdateType, arg.UpdateTime)
|
||||
return err
|
||||
}
|
||||
|
||||
const cleanupUpdatesNullVm = `-- name: CleanupUpdatesNullVm :exec
|
||||
DELETE FROM updates
|
||||
WHERE "InventoryId" IS NULL
|
||||
RETURNING Uid, InventoryId, UpdateTime, UpdateType, NewVcpus, NewRam, NewResourcePool, EventKey, EventId, NewProvisionedDisk, UserName, PlaceholderChange, Name, RawChangeString
|
||||
`
|
||||
|
||||
func (q *Queries) CleanupUpdatesNullVm(ctx context.Context) error {
|
||||
_, err := q.db.ExecContext(ctx, cleanupUpdatesNullVm)
|
||||
return err
|
||||
}
|
||||
|
||||
const createEvent = `-- name: CreateEvent :one
|
||||
INSERT INTO "Events" (
|
||||
"CloudId", "Source", "EventTime", "ChainId", "VmId", "VmName", "EventKey", "DatacenterId", "DatacenterName", "ComputeResourceId", "ComputeResourceName", "UserName"
|
||||
INSERT INTO events (
|
||||
"CloudId", "Source", "EventTime", "ChainId", "VmId", "VmName", "EventType", "EventKey", "DatacenterId", "DatacenterName", "ComputeResourceId", "ComputeResourceName", "UserName"
|
||||
) VALUES(
|
||||
?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?
|
||||
?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?
|
||||
)
|
||||
RETURNING Eid, CloudId, Source, EventTime, ChainId, VmId, EventKey, DatacenterName, ComputeResourceName, UserName, Processed, DatacenterId, ComputeResourceId, VmName
|
||||
RETURNING Eid, CloudId, Source, EventTime, ChainId, VmId, EventKey, DatacenterName, ComputeResourceName, UserName, Processed, DatacenterId, ComputeResourceId, VmName, EventType
|
||||
`
|
||||
|
||||
type CreateEventParams struct {
|
||||
CloudId string
|
||||
Source string
|
||||
EventTime sql.NullInt64
|
||||
ChainId string
|
||||
VmId sql.NullString
|
||||
VmName sql.NullString
|
||||
EventKey sql.NullString
|
||||
DatacenterId sql.NullString
|
||||
DatacenterName sql.NullString
|
||||
ComputeResourceId sql.NullString
|
||||
ComputeResourceName sql.NullString
|
||||
UserName sql.NullString
|
||||
CloudId string `db:"CloudId" json:"CloudId"`
|
||||
Source string `db:"Source" json:"Source"`
|
||||
EventTime sql.NullInt64 `db:"EventTime" json:"EventTime"`
|
||||
ChainId string `db:"ChainId" json:"ChainId"`
|
||||
VmId sql.NullString `db:"VmId" json:"VmId"`
|
||||
VmName sql.NullString `db:"VmName" json:"VmName"`
|
||||
EventType sql.NullString `db:"EventType" json:"EventType"`
|
||||
EventKey sql.NullString `db:"EventKey" json:"EventKey"`
|
||||
DatacenterId sql.NullString `db:"DatacenterId" json:"DatacenterId"`
|
||||
DatacenterName sql.NullString `db:"DatacenterName" json:"DatacenterName"`
|
||||
ComputeResourceId sql.NullString `db:"ComputeResourceId" json:"ComputeResourceId"`
|
||||
ComputeResourceName sql.NullString `db:"ComputeResourceName" json:"ComputeResourceName"`
|
||||
UserName sql.NullString `db:"UserName" json:"UserName"`
|
||||
}
|
||||
|
||||
func (q *Queries) CreateEvent(ctx context.Context, arg CreateEventParams) (Events, error) {
|
||||
func (q *Queries) CreateEvent(ctx context.Context, arg CreateEventParams) (Event, error) {
|
||||
row := q.db.QueryRowContext(ctx, createEvent,
|
||||
arg.CloudId,
|
||||
arg.Source,
|
||||
@@ -42,6 +70,7 @@ func (q *Queries) CreateEvent(ctx context.Context, arg CreateEventParams) (Event
|
||||
arg.ChainId,
|
||||
arg.VmId,
|
||||
arg.VmName,
|
||||
arg.EventType,
|
||||
arg.EventKey,
|
||||
arg.DatacenterId,
|
||||
arg.DatacenterName,
|
||||
@@ -49,7 +78,7 @@ func (q *Queries) CreateEvent(ctx context.Context, arg CreateEventParams) (Event
|
||||
arg.ComputeResourceName,
|
||||
arg.UserName,
|
||||
)
|
||||
var i Events
|
||||
var i Event
|
||||
err := row.Scan(
|
||||
&i.Eid,
|
||||
&i.CloudId,
|
||||
@@ -65,35 +94,38 @@ func (q *Queries) CreateEvent(ctx context.Context, arg CreateEventParams) (Event
|
||||
&i.DatacenterId,
|
||||
&i.ComputeResourceId,
|
||||
&i.VmName,
|
||||
&i.EventType,
|
||||
)
|
||||
return i, err
|
||||
}
|
||||
|
||||
const createInventory = `-- name: CreateInventory :one
|
||||
INSERT INTO "Inventory" (
|
||||
"Name", "Vcenter", "VmId", "EventKey", "EventId", "CreationTime", "ResourcePool", "VmType", "Datacenter", "Cluster", "Folder", "ProvisionedDisk", "InitialVcpus", "InitialRam", "SrmPlaceholder"
|
||||
INSERT INTO inventory (
|
||||
"Name", "Vcenter", "VmId", "VmUuid", "EventKey", "CloudId", "CreationTime", "ResourcePool", "IsTemplate", "Datacenter", "Cluster", "Folder", "ProvisionedDisk", "InitialVcpus", "InitialRam", "SrmPlaceholder", "PoweredOn"
|
||||
) VALUES(
|
||||
?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?
|
||||
?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?
|
||||
)
|
||||
RETURNING Iid, Name, Vcenter, VmId, EventKey, EventId, CreationTime, DeletionTime, ResourcePool, VmType, Datacenter, Cluster, Folder, ProvisionedDisk, InitialVcpus, InitialRam, SrmPlaceholder
|
||||
RETURNING Iid, Name, Vcenter, VmId, EventKey, CloudId, CreationTime, DeletionTime, ResourcePool, Datacenter, Cluster, Folder, ProvisionedDisk, InitialVcpus, InitialRam, IsTemplate, PoweredOn, SrmPlaceholder, VmUuid
|
||||
`
|
||||
|
||||
type CreateInventoryParams struct {
|
||||
Name string
|
||||
Vcenter string
|
||||
VmId sql.NullString
|
||||
EventKey sql.NullString
|
||||
EventId sql.NullString
|
||||
CreationTime sql.NullInt64
|
||||
ResourcePool sql.NullString
|
||||
VmType sql.NullString
|
||||
Datacenter sql.NullString
|
||||
Cluster sql.NullString
|
||||
Folder sql.NullString
|
||||
ProvisionedDisk sql.NullFloat64
|
||||
InitialVcpus sql.NullInt64
|
||||
InitialRam sql.NullInt64
|
||||
SrmPlaceholder sql.NullInt64
|
||||
Name string `db:"Name" json:"Name"`
|
||||
Vcenter string `db:"Vcenter" json:"Vcenter"`
|
||||
VmId sql.NullString `db:"VmId" json:"VmId"`
|
||||
VmUuid sql.NullString `db:"VmUuid" json:"VmUuid"`
|
||||
EventKey sql.NullString `db:"EventKey" json:"EventKey"`
|
||||
CloudId sql.NullString `db:"CloudId" json:"CloudId"`
|
||||
CreationTime sql.NullInt64 `db:"CreationTime" json:"CreationTime"`
|
||||
ResourcePool sql.NullString `db:"ResourcePool" json:"ResourcePool"`
|
||||
IsTemplate interface{} `db:"IsTemplate" json:"IsTemplate"`
|
||||
Datacenter sql.NullString `db:"Datacenter" json:"Datacenter"`
|
||||
Cluster sql.NullString `db:"Cluster" json:"Cluster"`
|
||||
Folder sql.NullString `db:"Folder" json:"Folder"`
|
||||
ProvisionedDisk sql.NullFloat64 `db:"ProvisionedDisk" json:"ProvisionedDisk"`
|
||||
InitialVcpus sql.NullInt64 `db:"InitialVcpus" json:"InitialVcpus"`
|
||||
InitialRam sql.NullInt64 `db:"InitialRam" json:"InitialRam"`
|
||||
SrmPlaceholder interface{} `db:"SrmPlaceholder" json:"SrmPlaceholder"`
|
||||
PoweredOn interface{} `db:"PoweredOn" json:"PoweredOn"`
|
||||
}
|
||||
|
||||
func (q *Queries) CreateInventory(ctx context.Context, arg CreateInventoryParams) (Inventory, error) {
|
||||
@@ -101,11 +133,12 @@ func (q *Queries) CreateInventory(ctx context.Context, arg CreateInventoryParams
|
||||
arg.Name,
|
||||
arg.Vcenter,
|
||||
arg.VmId,
|
||||
arg.VmUuid,
|
||||
arg.EventKey,
|
||||
arg.EventId,
|
||||
arg.CloudId,
|
||||
arg.CreationTime,
|
||||
arg.ResourcePool,
|
||||
arg.VmType,
|
||||
arg.IsTemplate,
|
||||
arg.Datacenter,
|
||||
arg.Cluster,
|
||||
arg.Folder,
|
||||
@@ -113,6 +146,7 @@ func (q *Queries) CreateInventory(ctx context.Context, arg CreateInventoryParams
|
||||
arg.InitialVcpus,
|
||||
arg.InitialRam,
|
||||
arg.SrmPlaceholder,
|
||||
arg.PoweredOn,
|
||||
)
|
||||
var i Inventory
|
||||
err := row.Scan(
|
||||
@@ -121,45 +155,96 @@ func (q *Queries) CreateInventory(ctx context.Context, arg CreateInventoryParams
|
||||
&i.Vcenter,
|
||||
&i.VmId,
|
||||
&i.EventKey,
|
||||
&i.EventId,
|
||||
&i.CloudId,
|
||||
&i.CreationTime,
|
||||
&i.DeletionTime,
|
||||
&i.ResourcePool,
|
||||
&i.VmType,
|
||||
&i.Datacenter,
|
||||
&i.Cluster,
|
||||
&i.Folder,
|
||||
&i.ProvisionedDisk,
|
||||
&i.InitialVcpus,
|
||||
&i.InitialRam,
|
||||
&i.IsTemplate,
|
||||
&i.PoweredOn,
|
||||
&i.SrmPlaceholder,
|
||||
&i.VmUuid,
|
||||
)
|
||||
return i, err
|
||||
}
|
||||
|
||||
const createInventoryHistory = `-- name: CreateInventoryHistory :one
|
||||
INSERT INTO inventory_history (
|
||||
"InventoryId", "ReportDate", "UpdateTime", "PreviousVcpus", "PreviousRam", "PreviousResourcePool", "PreviousProvisionedDisk"
|
||||
) VALUES(
|
||||
?, ?, ?, ?, ?, ?, ?
|
||||
)
|
||||
RETURNING Hid, InventoryId, ReportDate, UpdateTime, PreviousVcpus, PreviousRam, PreviousResourcePool, PreviousProvisionedDisk
|
||||
`
|
||||
|
||||
type CreateInventoryHistoryParams struct {
|
||||
InventoryId sql.NullInt64 `db:"InventoryId" json:"InventoryId"`
|
||||
ReportDate sql.NullInt64 `db:"ReportDate" json:"ReportDate"`
|
||||
UpdateTime sql.NullInt64 `db:"UpdateTime" json:"UpdateTime"`
|
||||
PreviousVcpus sql.NullInt64 `db:"PreviousVcpus" json:"PreviousVcpus"`
|
||||
PreviousRam sql.NullInt64 `db:"PreviousRam" json:"PreviousRam"`
|
||||
PreviousResourcePool sql.NullString `db:"PreviousResourcePool" json:"PreviousResourcePool"`
|
||||
PreviousProvisionedDisk sql.NullFloat64 `db:"PreviousProvisionedDisk" json:"PreviousProvisionedDisk"`
|
||||
}
|
||||
|
||||
func (q *Queries) CreateInventoryHistory(ctx context.Context, arg CreateInventoryHistoryParams) (InventoryHistory, error) {
|
||||
row := q.db.QueryRowContext(ctx, createInventoryHistory,
|
||||
arg.InventoryId,
|
||||
arg.ReportDate,
|
||||
arg.UpdateTime,
|
||||
arg.PreviousVcpus,
|
||||
arg.PreviousRam,
|
||||
arg.PreviousResourcePool,
|
||||
arg.PreviousProvisionedDisk,
|
||||
)
|
||||
var i InventoryHistory
|
||||
err := row.Scan(
|
||||
&i.Hid,
|
||||
&i.InventoryId,
|
||||
&i.ReportDate,
|
||||
&i.UpdateTime,
|
||||
&i.PreviousVcpus,
|
||||
&i.PreviousRam,
|
||||
&i.PreviousResourcePool,
|
||||
&i.PreviousProvisionedDisk,
|
||||
)
|
||||
return i, err
|
||||
}
|
||||
|
||||
const createUpdate = `-- name: CreateUpdate :one
|
||||
INSERT INTO "Updates" (
|
||||
"InventoryId", "EventKey", "EventId", "UpdateTime", "UpdateType", "NewVcpus", "NewRam", "NewResourcePool"
|
||||
INSERT INTO updates (
|
||||
"InventoryId", "Name", "EventKey", "EventId", "UpdateTime", "UpdateType", "NewVcpus", "NewRam", "NewResourcePool", "NewProvisionedDisk", "UserName", "PlaceholderChange", "RawChangeString"
|
||||
) VALUES(
|
||||
?, ?, ?, ?, ?, ?, ?, ?
|
||||
?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?
|
||||
)
|
||||
RETURNING Uid, InventoryId, UpdateTime, UpdateType, NewVcpus, NewRam, NewResourcePool, EventKey, EventId
|
||||
RETURNING Uid, InventoryId, UpdateTime, UpdateType, NewVcpus, NewRam, NewResourcePool, EventKey, EventId, NewProvisionedDisk, UserName, PlaceholderChange, Name, RawChangeString
|
||||
`
|
||||
|
||||
type CreateUpdateParams struct {
|
||||
InventoryId sql.NullInt64
|
||||
EventKey sql.NullString
|
||||
EventId sql.NullString
|
||||
UpdateTime sql.NullInt64
|
||||
UpdateType string
|
||||
NewVcpus sql.NullInt64
|
||||
NewRam sql.NullInt64
|
||||
NewResourcePool sql.NullString
|
||||
InventoryId sql.NullInt64 `db:"InventoryId" json:"InventoryId"`
|
||||
Name sql.NullString `db:"Name" json:"Name"`
|
||||
EventKey sql.NullString `db:"EventKey" json:"EventKey"`
|
||||
EventId sql.NullString `db:"EventId" json:"EventId"`
|
||||
UpdateTime sql.NullInt64 `db:"UpdateTime" json:"UpdateTime"`
|
||||
UpdateType string `db:"UpdateType" json:"UpdateType"`
|
||||
NewVcpus sql.NullInt64 `db:"NewVcpus" json:"NewVcpus"`
|
||||
NewRam sql.NullInt64 `db:"NewRam" json:"NewRam"`
|
||||
NewResourcePool sql.NullString `db:"NewResourcePool" json:"NewResourcePool"`
|
||||
NewProvisionedDisk sql.NullFloat64 `db:"NewProvisionedDisk" json:"NewProvisionedDisk"`
|
||||
UserName sql.NullString `db:"UserName" json:"UserName"`
|
||||
PlaceholderChange sql.NullString `db:"PlaceholderChange" json:"PlaceholderChange"`
|
||||
RawChangeString []byte `db:"RawChangeString" json:"RawChangeString"`
|
||||
}
|
||||
|
||||
func (q *Queries) CreateUpdate(ctx context.Context, arg CreateUpdateParams) (Updates, error) {
|
||||
func (q *Queries) CreateUpdate(ctx context.Context, arg CreateUpdateParams) (Update, error) {
|
||||
row := q.db.QueryRowContext(ctx, createUpdate,
|
||||
arg.InventoryId,
|
||||
arg.Name,
|
||||
arg.EventKey,
|
||||
arg.EventId,
|
||||
arg.UpdateTime,
|
||||
@@ -167,8 +252,12 @@ func (q *Queries) CreateUpdate(ctx context.Context, arg CreateUpdateParams) (Upd
|
||||
arg.NewVcpus,
|
||||
arg.NewRam,
|
||||
arg.NewResourcePool,
|
||||
arg.NewProvisionedDisk,
|
||||
arg.UserName,
|
||||
arg.PlaceholderChange,
|
||||
arg.RawChangeString,
|
||||
)
|
||||
var i Updates
|
||||
var i Update
|
||||
err := row.Scan(
|
||||
&i.Uid,
|
||||
&i.InventoryId,
|
||||
@@ -179,12 +268,17 @@ func (q *Queries) CreateUpdate(ctx context.Context, arg CreateUpdateParams) (Upd
|
||||
&i.NewResourcePool,
|
||||
&i.EventKey,
|
||||
&i.EventId,
|
||||
&i.NewProvisionedDisk,
|
||||
&i.UserName,
|
||||
&i.PlaceholderChange,
|
||||
&i.Name,
|
||||
&i.RawChangeString,
|
||||
)
|
||||
return i, err
|
||||
}
|
||||
|
||||
const getInventoryByName = `-- name: GetInventoryByName :many
|
||||
SELECT Iid, Name, Vcenter, VmId, EventKey, EventId, CreationTime, DeletionTime, ResourcePool, VmType, Datacenter, Cluster, Folder, ProvisionedDisk, InitialVcpus, InitialRam, SrmPlaceholder FROM "Inventory"
|
||||
SELECT Iid, Name, Vcenter, VmId, EventKey, CloudId, CreationTime, DeletionTime, ResourcePool, Datacenter, Cluster, Folder, ProvisionedDisk, InitialVcpus, InitialRam, IsTemplate, PoweredOn, SrmPlaceholder, VmUuid FROM inventory
|
||||
WHERE "Name" = ?
|
||||
`
|
||||
|
||||
@@ -203,18 +297,68 @@ func (q *Queries) GetInventoryByName(ctx context.Context, name string) ([]Invent
|
||||
&i.Vcenter,
|
||||
&i.VmId,
|
||||
&i.EventKey,
|
||||
&i.EventId,
|
||||
&i.CloudId,
|
||||
&i.CreationTime,
|
||||
&i.DeletionTime,
|
||||
&i.ResourcePool,
|
||||
&i.VmType,
|
||||
&i.Datacenter,
|
||||
&i.Cluster,
|
||||
&i.Folder,
|
||||
&i.ProvisionedDisk,
|
||||
&i.InitialVcpus,
|
||||
&i.InitialRam,
|
||||
&i.IsTemplate,
|
||||
&i.PoweredOn,
|
||||
&i.SrmPlaceholder,
|
||||
&i.VmUuid,
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
items = append(items, i)
|
||||
}
|
||||
if err := rows.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return items, nil
|
||||
}
|
||||
|
||||
const getInventoryByVcenter = `-- name: GetInventoryByVcenter :many
|
||||
SELECT Iid, Name, Vcenter, VmId, EventKey, CloudId, CreationTime, DeletionTime, ResourcePool, Datacenter, Cluster, Folder, ProvisionedDisk, InitialVcpus, InitialRam, IsTemplate, PoweredOn, SrmPlaceholder, VmUuid FROM inventory
|
||||
WHERE "Vcenter" = ?
|
||||
`
|
||||
|
||||
func (q *Queries) GetInventoryByVcenter(ctx context.Context, vcenter string) ([]Inventory, error) {
|
||||
rows, err := q.db.QueryContext(ctx, getInventoryByVcenter, vcenter)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
var items []Inventory
|
||||
for rows.Next() {
|
||||
var i Inventory
|
||||
if err := rows.Scan(
|
||||
&i.Iid,
|
||||
&i.Name,
|
||||
&i.Vcenter,
|
||||
&i.VmId,
|
||||
&i.EventKey,
|
||||
&i.CloudId,
|
||||
&i.CreationTime,
|
||||
&i.DeletionTime,
|
||||
&i.ResourcePool,
|
||||
&i.Datacenter,
|
||||
&i.Cluster,
|
||||
&i.Folder,
|
||||
&i.ProvisionedDisk,
|
||||
&i.InitialVcpus,
|
||||
&i.InitialRam,
|
||||
&i.IsTemplate,
|
||||
&i.PoweredOn,
|
||||
&i.SrmPlaceholder,
|
||||
&i.VmUuid,
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -230,12 +374,12 @@ func (q *Queries) GetInventoryByName(ctx context.Context, name string) ([]Invent
|
||||
}
|
||||
|
||||
const getInventoryEventId = `-- name: GetInventoryEventId :one
|
||||
SELECT Iid, Name, Vcenter, VmId, EventKey, EventId, CreationTime, DeletionTime, ResourcePool, VmType, Datacenter, Cluster, Folder, ProvisionedDisk, InitialVcpus, InitialRam, SrmPlaceholder FROM "Inventory"
|
||||
WHERE "EventId" = ? LIMIT 1
|
||||
SELECT Iid, Name, Vcenter, VmId, EventKey, CloudId, CreationTime, DeletionTime, ResourcePool, Datacenter, Cluster, Folder, ProvisionedDisk, InitialVcpus, InitialRam, IsTemplate, PoweredOn, SrmPlaceholder, VmUuid FROM inventory
|
||||
WHERE "CloudId" = ? LIMIT 1
|
||||
`
|
||||
|
||||
func (q *Queries) GetInventoryEventId(ctx context.Context, eventid sql.NullString) (Inventory, error) {
|
||||
row := q.db.QueryRowContext(ctx, getInventoryEventId, eventid)
|
||||
func (q *Queries) GetInventoryEventId(ctx context.Context, cloudid sql.NullString) (Inventory, error) {
|
||||
row := q.db.QueryRowContext(ctx, getInventoryEventId, cloudid)
|
||||
var i Inventory
|
||||
err := row.Scan(
|
||||
&i.Iid,
|
||||
@@ -243,29 +387,84 @@ func (q *Queries) GetInventoryEventId(ctx context.Context, eventid sql.NullStrin
|
||||
&i.Vcenter,
|
||||
&i.VmId,
|
||||
&i.EventKey,
|
||||
&i.EventId,
|
||||
&i.CloudId,
|
||||
&i.CreationTime,
|
||||
&i.DeletionTime,
|
||||
&i.ResourcePool,
|
||||
&i.VmType,
|
||||
&i.Datacenter,
|
||||
&i.Cluster,
|
||||
&i.Folder,
|
||||
&i.ProvisionedDisk,
|
||||
&i.InitialVcpus,
|
||||
&i.InitialRam,
|
||||
&i.IsTemplate,
|
||||
&i.PoweredOn,
|
||||
&i.SrmPlaceholder,
|
||||
&i.VmUuid,
|
||||
)
|
||||
return i, err
|
||||
}
|
||||
|
||||
const getInventoryVcUrl = `-- name: GetInventoryVcUrl :many
|
||||
SELECT Iid, Name, Vcenter, VmId, EventKey, CloudId, CreationTime, DeletionTime, ResourcePool, Datacenter, Cluster, Folder, ProvisionedDisk, InitialVcpus, InitialRam, IsTemplate, PoweredOn, SrmPlaceholder, VmUuid FROM inventory
|
||||
WHERE "Vcenter" = ?1
|
||||
`
|
||||
|
||||
func (q *Queries) GetInventoryVcUrl(ctx context.Context, vc string) ([]Inventory, error) {
|
||||
rows, err := q.db.QueryContext(ctx, getInventoryVcUrl, vc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
var items []Inventory
|
||||
for rows.Next() {
|
||||
var i Inventory
|
||||
if err := rows.Scan(
|
||||
&i.Iid,
|
||||
&i.Name,
|
||||
&i.Vcenter,
|
||||
&i.VmId,
|
||||
&i.EventKey,
|
||||
&i.CloudId,
|
||||
&i.CreationTime,
|
||||
&i.DeletionTime,
|
||||
&i.ResourcePool,
|
||||
&i.Datacenter,
|
||||
&i.Cluster,
|
||||
&i.Folder,
|
||||
&i.ProvisionedDisk,
|
||||
&i.InitialVcpus,
|
||||
&i.InitialRam,
|
||||
&i.IsTemplate,
|
||||
&i.PoweredOn,
|
||||
&i.SrmPlaceholder,
|
||||
&i.VmUuid,
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
items = append(items, i)
|
||||
}
|
||||
if err := rows.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return items, nil
|
||||
}
|
||||
|
||||
const getInventoryVmId = `-- name: GetInventoryVmId :one
|
||||
SELECT Iid, Name, Vcenter, VmId, EventKey, EventId, CreationTime, DeletionTime, ResourcePool, VmType, Datacenter, Cluster, Folder, ProvisionedDisk, InitialVcpus, InitialRam, SrmPlaceholder FROM "Inventory"
|
||||
WHERE "VmId" = ? LIMIT 1
|
||||
SELECT Iid, Name, Vcenter, VmId, EventKey, CloudId, CreationTime, DeletionTime, ResourcePool, Datacenter, Cluster, Folder, ProvisionedDisk, InitialVcpus, InitialRam, IsTemplate, PoweredOn, SrmPlaceholder, VmUuid FROM inventory
|
||||
WHERE "VmId" = ?1 AND "Datacenter" = ?2
|
||||
`
|
||||
|
||||
func (q *Queries) GetInventoryVmId(ctx context.Context, vmid sql.NullString) (Inventory, error) {
|
||||
row := q.db.QueryRowContext(ctx, getInventoryVmId, vmid)
|
||||
type GetInventoryVmIdParams struct {
|
||||
VmId sql.NullString `db:"vmId" json:"vmId"`
|
||||
DatacenterName sql.NullString `db:"datacenterName" json:"datacenterName"`
|
||||
}
|
||||
|
||||
func (q *Queries) GetInventoryVmId(ctx context.Context, arg GetInventoryVmIdParams) (Inventory, error) {
|
||||
row := q.db.QueryRowContext(ctx, getInventoryVmId, arg.VmId, arg.DatacenterName)
|
||||
var i Inventory
|
||||
err := row.Scan(
|
||||
&i.Iid,
|
||||
@@ -273,36 +472,286 @@ func (q *Queries) GetInventoryVmId(ctx context.Context, vmid sql.NullString) (In
|
||||
&i.Vcenter,
|
||||
&i.VmId,
|
||||
&i.EventKey,
|
||||
&i.EventId,
|
||||
&i.CloudId,
|
||||
&i.CreationTime,
|
||||
&i.DeletionTime,
|
||||
&i.ResourcePool,
|
||||
&i.VmType,
|
||||
&i.Datacenter,
|
||||
&i.Cluster,
|
||||
&i.Folder,
|
||||
&i.ProvisionedDisk,
|
||||
&i.InitialVcpus,
|
||||
&i.InitialRam,
|
||||
&i.IsTemplate,
|
||||
&i.PoweredOn,
|
||||
&i.SrmPlaceholder,
|
||||
&i.VmUuid,
|
||||
)
|
||||
return i, err
|
||||
}
|
||||
|
||||
const getInventoryVmUuid = `-- name: GetInventoryVmUuid :one
|
||||
SELECT Iid, Name, Vcenter, VmId, EventKey, CloudId, CreationTime, DeletionTime, ResourcePool, Datacenter, Cluster, Folder, ProvisionedDisk, InitialVcpus, InitialRam, IsTemplate, PoweredOn, SrmPlaceholder, VmUuid FROM inventory
|
||||
WHERE "VmUuid" = ?1 AND "Datacenter" = ?2
|
||||
`
|
||||
|
||||
type GetInventoryVmUuidParams struct {
|
||||
VmUuid sql.NullString `db:"vmUuid" json:"vmUuid"`
|
||||
DatacenterName sql.NullString `db:"datacenterName" json:"datacenterName"`
|
||||
}
|
||||
|
||||
func (q *Queries) GetInventoryVmUuid(ctx context.Context, arg GetInventoryVmUuidParams) (Inventory, error) {
|
||||
row := q.db.QueryRowContext(ctx, getInventoryVmUuid, arg.VmUuid, arg.DatacenterName)
|
||||
var i Inventory
|
||||
err := row.Scan(
|
||||
&i.Iid,
|
||||
&i.Name,
|
||||
&i.Vcenter,
|
||||
&i.VmId,
|
||||
&i.EventKey,
|
||||
&i.CloudId,
|
||||
&i.CreationTime,
|
||||
&i.DeletionTime,
|
||||
&i.ResourcePool,
|
||||
&i.Datacenter,
|
||||
&i.Cluster,
|
||||
&i.Folder,
|
||||
&i.ProvisionedDisk,
|
||||
&i.InitialVcpus,
|
||||
&i.InitialRam,
|
||||
&i.IsTemplate,
|
||||
&i.PoweredOn,
|
||||
&i.SrmPlaceholder,
|
||||
&i.VmUuid,
|
||||
)
|
||||
return i, err
|
||||
}
|
||||
|
||||
const getReportInventory = `-- name: GetReportInventory :many
|
||||
SELECT Iid, Name, Vcenter, VmId, EventKey, CloudId, CreationTime, DeletionTime, ResourcePool, Datacenter, Cluster, Folder, ProvisionedDisk, InitialVcpus, InitialRam, IsTemplate, PoweredOn, SrmPlaceholder, VmUuid FROM inventory
|
||||
ORDER BY "CreationTime"
|
||||
`
|
||||
|
||||
func (q *Queries) GetReportInventory(ctx context.Context) ([]Inventory, error) {
|
||||
rows, err := q.db.QueryContext(ctx, getReportInventory)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
var items []Inventory
|
||||
for rows.Next() {
|
||||
var i Inventory
|
||||
if err := rows.Scan(
|
||||
&i.Iid,
|
||||
&i.Name,
|
||||
&i.Vcenter,
|
||||
&i.VmId,
|
||||
&i.EventKey,
|
||||
&i.CloudId,
|
||||
&i.CreationTime,
|
||||
&i.DeletionTime,
|
||||
&i.ResourcePool,
|
||||
&i.Datacenter,
|
||||
&i.Cluster,
|
||||
&i.Folder,
|
||||
&i.ProvisionedDisk,
|
||||
&i.InitialVcpus,
|
||||
&i.InitialRam,
|
||||
&i.IsTemplate,
|
||||
&i.PoweredOn,
|
||||
&i.SrmPlaceholder,
|
||||
&i.VmUuid,
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
items = append(items, i)
|
||||
}
|
||||
if err := rows.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return items, nil
|
||||
}
|
||||
|
||||
const getReportUpdates = `-- name: GetReportUpdates :many
|
||||
SELECT Uid, InventoryId, UpdateTime, UpdateType, NewVcpus, NewRam, NewResourcePool, EventKey, EventId, NewProvisionedDisk, UserName, PlaceholderChange, Name, RawChangeString FROM updates
|
||||
ORDER BY "UpdateTime"
|
||||
`
|
||||
|
||||
func (q *Queries) GetReportUpdates(ctx context.Context) ([]Update, error) {
|
||||
rows, err := q.db.QueryContext(ctx, getReportUpdates)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
var items []Update
|
||||
for rows.Next() {
|
||||
var i Update
|
||||
if err := rows.Scan(
|
||||
&i.Uid,
|
||||
&i.InventoryId,
|
||||
&i.UpdateTime,
|
||||
&i.UpdateType,
|
||||
&i.NewVcpus,
|
||||
&i.NewRam,
|
||||
&i.NewResourcePool,
|
||||
&i.EventKey,
|
||||
&i.EventId,
|
||||
&i.NewProvisionedDisk,
|
||||
&i.UserName,
|
||||
&i.PlaceholderChange,
|
||||
&i.Name,
|
||||
&i.RawChangeString,
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
items = append(items, i)
|
||||
}
|
||||
if err := rows.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return items, nil
|
||||
}
|
||||
|
||||
const getVmUpdates = `-- name: GetVmUpdates :many
|
||||
SELECT Uid, InventoryId, UpdateTime, UpdateType, NewVcpus, NewRam, NewResourcePool, EventKey, EventId, NewProvisionedDisk, UserName, PlaceholderChange, Name, RawChangeString FROM updates
|
||||
WHERE "UpdateType" = ?1 AND "InventoryId" = ?2
|
||||
`
|
||||
|
||||
type GetVmUpdatesParams struct {
|
||||
UpdateType string `db:"updateType" json:"updateType"`
|
||||
InventoryId sql.NullInt64 `db:"InventoryId" json:"InventoryId"`
|
||||
}
|
||||
|
||||
func (q *Queries) GetVmUpdates(ctx context.Context, arg GetVmUpdatesParams) ([]Update, error) {
|
||||
rows, err := q.db.QueryContext(ctx, getVmUpdates, arg.UpdateType, arg.InventoryId)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
var items []Update
|
||||
for rows.Next() {
|
||||
var i Update
|
||||
if err := rows.Scan(
|
||||
&i.Uid,
|
||||
&i.InventoryId,
|
||||
&i.UpdateTime,
|
||||
&i.UpdateType,
|
||||
&i.NewVcpus,
|
||||
&i.NewRam,
|
||||
&i.NewResourcePool,
|
||||
&i.EventKey,
|
||||
&i.EventId,
|
||||
&i.NewProvisionedDisk,
|
||||
&i.UserName,
|
||||
&i.PlaceholderChange,
|
||||
&i.Name,
|
||||
&i.RawChangeString,
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
items = append(items, i)
|
||||
}
|
||||
if err := rows.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return items, nil
|
||||
}
|
||||
|
||||
const inventoryCleanup = `-- name: InventoryCleanup :exec
|
||||
DELETE FROM inventory
|
||||
WHERE "VmId" = ?1 AND "Datacenter" = ?2
|
||||
RETURNING Iid, Name, Vcenter, VmId, EventKey, CloudId, CreationTime, DeletionTime, ResourcePool, Datacenter, Cluster, Folder, ProvisionedDisk, InitialVcpus, InitialRam, IsTemplate, PoweredOn, SrmPlaceholder, VmUuid
|
||||
`
|
||||
|
||||
type InventoryCleanupParams struct {
|
||||
VmId sql.NullString `db:"vmId" json:"vmId"`
|
||||
DatacenterName sql.NullString `db:"datacenterName" json:"datacenterName"`
|
||||
}
|
||||
|
||||
func (q *Queries) InventoryCleanup(ctx context.Context, arg InventoryCleanupParams) error {
|
||||
_, err := q.db.ExecContext(ctx, inventoryCleanup, arg.VmId, arg.DatacenterName)
|
||||
return err
|
||||
}
|
||||
|
||||
const inventoryCleanupTemplates = `-- name: InventoryCleanupTemplates :exec
|
||||
DELETE FROM inventory
|
||||
WHERE "IsTemplate" = 'TRUE'
|
||||
RETURNING Iid, Name, Vcenter, VmId, EventKey, CloudId, CreationTime, DeletionTime, ResourcePool, Datacenter, Cluster, Folder, ProvisionedDisk, InitialVcpus, InitialRam, IsTemplate, PoweredOn, SrmPlaceholder, VmUuid
|
||||
`
|
||||
|
||||
func (q *Queries) InventoryCleanupTemplates(ctx context.Context) error {
|
||||
_, err := q.db.ExecContext(ctx, inventoryCleanupTemplates)
|
||||
return err
|
||||
}
|
||||
|
||||
const inventoryCleanupVcenter = `-- name: InventoryCleanupVcenter :exec
|
||||
DELETE FROM inventory
|
||||
WHERE "Vcenter" = ?1
|
||||
RETURNING Iid, Name, Vcenter, VmId, EventKey, CloudId, CreationTime, DeletionTime, ResourcePool, Datacenter, Cluster, Folder, ProvisionedDisk, InitialVcpus, InitialRam, IsTemplate, PoweredOn, SrmPlaceholder, VmUuid
|
||||
`
|
||||
|
||||
func (q *Queries) InventoryCleanupVcenter(ctx context.Context, vc string) error {
|
||||
_, err := q.db.ExecContext(ctx, inventoryCleanupVcenter, vc)
|
||||
return err
|
||||
}
|
||||
|
||||
const inventoryMarkDeleted = `-- name: InventoryMarkDeleted :exec
|
||||
UPDATE inventory
|
||||
SET "DeletionTime" = ?1
|
||||
WHERE "VmId" = ?2 AND "Datacenter" = ?3
|
||||
`
|
||||
|
||||
type InventoryMarkDeletedParams struct {
|
||||
DeletionTime sql.NullInt64 `db:"deletionTime" json:"deletionTime"`
|
||||
VmId sql.NullString `db:"vmId" json:"vmId"`
|
||||
DatacenterName sql.NullString `db:"datacenterName" json:"datacenterName"`
|
||||
}
|
||||
|
||||
func (q *Queries) InventoryMarkDeleted(ctx context.Context, arg InventoryMarkDeletedParams) error {
|
||||
_, err := q.db.ExecContext(ctx, inventoryMarkDeleted, arg.DeletionTime, arg.VmId, arg.DatacenterName)
|
||||
return err
|
||||
}
|
||||
|
||||
const inventoryUpdate = `-- name: InventoryUpdate :exec
|
||||
UPDATE inventory
|
||||
SET "VmUuid" = ?1, "SrmPlaceholder" = ?2
|
||||
WHERE "Iid" = ?3
|
||||
`
|
||||
|
||||
type InventoryUpdateParams struct {
|
||||
Uuid sql.NullString `db:"uuid" json:"uuid"`
|
||||
SrmPlaceholder interface{} `db:"srmPlaceholder" json:"srmPlaceholder"`
|
||||
Iid int64 `db:"iid" json:"iid"`
|
||||
}
|
||||
|
||||
func (q *Queries) InventoryUpdate(ctx context.Context, arg InventoryUpdateParams) error {
|
||||
_, err := q.db.ExecContext(ctx, inventoryUpdate, arg.Uuid, arg.SrmPlaceholder, arg.Iid)
|
||||
return err
|
||||
}
|
||||
|
||||
const listEvents = `-- name: ListEvents :many
|
||||
SELECT Eid, CloudId, Source, EventTime, ChainId, VmId, EventKey, DatacenterName, ComputeResourceName, UserName, Processed, DatacenterId, ComputeResourceId, VmName FROM "Events"
|
||||
SELECT Eid, CloudId, Source, EventTime, ChainId, VmId, EventKey, DatacenterName, ComputeResourceName, UserName, Processed, DatacenterId, ComputeResourceId, VmName, EventType FROM events
|
||||
ORDER BY "EventTime"
|
||||
`
|
||||
|
||||
func (q *Queries) ListEvents(ctx context.Context) ([]Events, error) {
|
||||
func (q *Queries) ListEvents(ctx context.Context) ([]Event, error) {
|
||||
rows, err := q.db.QueryContext(ctx, listEvents)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
var items []Events
|
||||
var items []Event
|
||||
for rows.Next() {
|
||||
var i Events
|
||||
var i Event
|
||||
if err := rows.Scan(
|
||||
&i.Eid,
|
||||
&i.CloudId,
|
||||
@@ -318,6 +767,7 @@ func (q *Queries) ListEvents(ctx context.Context) ([]Events, error) {
|
||||
&i.DatacenterId,
|
||||
&i.ComputeResourceId,
|
||||
&i.VmName,
|
||||
&i.EventType,
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -333,7 +783,7 @@ func (q *Queries) ListEvents(ctx context.Context) ([]Events, error) {
|
||||
}
|
||||
|
||||
const listInventory = `-- name: ListInventory :many
|
||||
SELECT Iid, Name, Vcenter, VmId, EventKey, EventId, CreationTime, DeletionTime, ResourcePool, VmType, Datacenter, Cluster, Folder, ProvisionedDisk, InitialVcpus, InitialRam, SrmPlaceholder FROM "Inventory"
|
||||
SELECT Iid, Name, Vcenter, VmId, EventKey, CloudId, CreationTime, DeletionTime, ResourcePool, Datacenter, Cluster, Folder, ProvisionedDisk, InitialVcpus, InitialRam, IsTemplate, PoweredOn, SrmPlaceholder, VmUuid FROM inventory
|
||||
ORDER BY "Name"
|
||||
`
|
||||
|
||||
@@ -352,18 +802,20 @@ func (q *Queries) ListInventory(ctx context.Context) ([]Inventory, error) {
|
||||
&i.Vcenter,
|
||||
&i.VmId,
|
||||
&i.EventKey,
|
||||
&i.EventId,
|
||||
&i.CloudId,
|
||||
&i.CreationTime,
|
||||
&i.DeletionTime,
|
||||
&i.ResourcePool,
|
||||
&i.VmType,
|
||||
&i.Datacenter,
|
||||
&i.Cluster,
|
||||
&i.Folder,
|
||||
&i.ProvisionedDisk,
|
||||
&i.InitialVcpus,
|
||||
&i.InitialRam,
|
||||
&i.IsTemplate,
|
||||
&i.PoweredOn,
|
||||
&i.SrmPlaceholder,
|
||||
&i.VmUuid,
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -379,20 +831,21 @@ func (q *Queries) ListInventory(ctx context.Context) ([]Inventory, error) {
|
||||
}
|
||||
|
||||
const listUnprocessedEvents = `-- name: ListUnprocessedEvents :many
|
||||
SELECT Eid, CloudId, Source, EventTime, ChainId, VmId, EventKey, DatacenterName, ComputeResourceName, UserName, Processed, DatacenterId, ComputeResourceId, VmName FROM "Events"
|
||||
SELECT Eid, CloudId, Source, EventTime, ChainId, VmId, EventKey, DatacenterName, ComputeResourceName, UserName, Processed, DatacenterId, ComputeResourceId, VmName, EventType FROM events
|
||||
WHERE "Processed" = 0
|
||||
AND "EventTime" > ?1
|
||||
ORDER BY "EventTime"
|
||||
`
|
||||
|
||||
func (q *Queries) ListUnprocessedEvents(ctx context.Context) ([]Events, error) {
|
||||
rows, err := q.db.QueryContext(ctx, listUnprocessedEvents)
|
||||
func (q *Queries) ListUnprocessedEvents(ctx context.Context, eventtime sql.NullInt64) ([]Event, error) {
|
||||
rows, err := q.db.QueryContext(ctx, listUnprocessedEvents, eventtime)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
var items []Events
|
||||
var items []Event
|
||||
for rows.Next() {
|
||||
var i Events
|
||||
var i Event
|
||||
if err := rows.Scan(
|
||||
&i.Eid,
|
||||
&i.CloudId,
|
||||
@@ -408,6 +861,7 @@ func (q *Queries) ListUnprocessedEvents(ctx context.Context) ([]Events, error) {
|
||||
&i.DatacenterId,
|
||||
&i.ComputeResourceId,
|
||||
&i.VmName,
|
||||
&i.EventType,
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -422,8 +876,34 @@ func (q *Queries) ListUnprocessedEvents(ctx context.Context) ([]Events, error) {
|
||||
return items, nil
|
||||
}
|
||||
|
||||
const sqliteColumnExists = `-- name: SqliteColumnExists :one
|
||||
SELECT COUNT(1) AS count
|
||||
FROM pragma_table_info
|
||||
WHERE name = ?1
|
||||
`
|
||||
|
||||
func (q *Queries) SqliteColumnExists(ctx context.Context, columnName sql.NullString) (int64, error) {
|
||||
row := q.db.QueryRowContext(ctx, sqliteColumnExists, columnName)
|
||||
var count int64
|
||||
err := row.Scan(&count)
|
||||
return count, err
|
||||
}
|
||||
|
||||
const sqliteTableExists = `-- name: SqliteTableExists :one
|
||||
SELECT COUNT(1) AS count
|
||||
FROM sqlite_master
|
||||
WHERE type = 'table' AND name = ?1
|
||||
`
|
||||
|
||||
func (q *Queries) SqliteTableExists(ctx context.Context, tableName sql.NullString) (int64, error) {
|
||||
row := q.db.QueryRowContext(ctx, sqliteTableExists, tableName)
|
||||
var count int64
|
||||
err := row.Scan(&count)
|
||||
return count, err
|
||||
}
|
||||
|
||||
const updateEventsProcessed = `-- name: UpdateEventsProcessed :exec
|
||||
UPDATE "Events"
|
||||
UPDATE events
|
||||
SET "Processed" = 1
|
||||
WHERE "Eid" = ?1
|
||||
`
|
||||
|
||||
@@ -1,25 +0,0 @@
|
||||
-- name: GetAuthor :one
|
||||
SELECT * FROM authors
|
||||
WHERE id = ? LIMIT 1;
|
||||
|
||||
-- name: ListAuthors :many
|
||||
SELECT * FROM authors
|
||||
ORDER BY name;
|
||||
|
||||
-- name: CreateAuthor :one
|
||||
INSERT INTO authors (
|
||||
name, bio
|
||||
) VALUES (
|
||||
?, ?
|
||||
)
|
||||
RETURNING *;
|
||||
|
||||
-- name: UpdateAuthor :exec
|
||||
UPDATE authors
|
||||
SET name = ?,
|
||||
bio = ?
|
||||
WHERE id = ?;
|
||||
|
||||
-- name: DeleteAuthor :exec
|
||||
DELETE FROM authors
|
||||
WHERE id = ?;
|
||||
95
db/schema.sql
Normal file
95
db/schema.sql
Normal file
@@ -0,0 +1,95 @@
|
||||
CREATE TABLE IF NOT EXISTS inventory (
|
||||
"Iid" INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
"Name" TEXT NOT NULL,
|
||||
"Vcenter" TEXT NOT NULL,
|
||||
"VmId" TEXT,
|
||||
"EventKey" TEXT,
|
||||
"CloudId" TEXT,
|
||||
"CreationTime" INTEGER,
|
||||
"DeletionTime" INTEGER,
|
||||
"ResourcePool" TEXT,
|
||||
"Datacenter" TEXT,
|
||||
"Cluster" TEXT,
|
||||
"Folder" TEXT,
|
||||
"ProvisionedDisk" REAL,
|
||||
"InitialVcpus" INTEGER,
|
||||
"InitialRam" INTEGER,
|
||||
"IsTemplate" TEXT NOT NULL DEFAULT "FALSE",
|
||||
"PoweredOn" TEXT NOT NULL DEFAULT "FALSE",
|
||||
"SrmPlaceholder" TEXT NOT NULL DEFAULT "FALSE",
|
||||
"VmUuid" TEXT
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS updates (
|
||||
"Uid" INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
"InventoryId" INTEGER,
|
||||
"UpdateTime" INTEGER,
|
||||
"UpdateType" TEXT NOT NULL,
|
||||
"NewVcpus" INTEGER,
|
||||
"NewRam" INTEGER,
|
||||
"NewResourcePool" TEXT,
|
||||
"EventKey" TEXT,
|
||||
"EventId" TEXT,
|
||||
"NewProvisionedDisk" REAL,
|
||||
"UserName" TEXT,
|
||||
"PlaceholderChange" TEXT,
|
||||
"Name" TEXT,
|
||||
"RawChangeString" BLOB
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS events (
|
||||
"Eid" INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
"CloudId" TEXT NOT NULL,
|
||||
"Source" TEXT NOT NULL,
|
||||
"EventTime" INTEGER,
|
||||
"ChainId" TEXT NOT NULL,
|
||||
"VmId" TEXT,
|
||||
"EventKey" TEXT,
|
||||
"DatacenterName" TEXT,
|
||||
"ComputeResourceName" TEXT,
|
||||
"UserName" TEXT,
|
||||
"Processed" INTEGER NOT NULL DEFAULT 0,
|
||||
"DatacenterId" TEXT,
|
||||
"ComputeResourceId" TEXT,
|
||||
"VmName" TEXT,
|
||||
"EventType" TEXT
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS inventory_history (
|
||||
"Hid" INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
"InventoryId" INTEGER,
|
||||
"ReportDate" INTEGER,
|
||||
"UpdateTime" INTEGER,
|
||||
"PreviousVcpus" INTEGER,
|
||||
"PreviousRam" INTEGER,
|
||||
"PreviousResourcePool" TEXT,
|
||||
"PreviousProvisionedDisk" REAL
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS snapshot_registry (
|
||||
"id" INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
"snapshot_type" TEXT NOT NULL,
|
||||
"table_name" TEXT NOT NULL UNIQUE,
|
||||
"snapshot_time" INTEGER NOT NULL,
|
||||
"snapshot_count" BIGINT NOT NULL DEFAULT 0
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS idx_snapshot_registry_type_time ON snapshot_registry (snapshot_type, snapshot_time);
|
||||
|
||||
-- The following tables are declared for sqlc type-checking only.
|
||||
-- Do not apply this file as a migration.
|
||||
CREATE TABLE sqlite_master (
|
||||
"type" TEXT,
|
||||
"name" TEXT,
|
||||
"tbl_name" TEXT,
|
||||
"rootpage" INTEGER,
|
||||
"sql" TEXT
|
||||
);
|
||||
|
||||
CREATE TABLE pragma_table_info (
|
||||
"cid" INTEGER,
|
||||
"name" TEXT,
|
||||
"type" TEXT,
|
||||
"notnull" INTEGER,
|
||||
"dflt_value" TEXT,
|
||||
"pk" INTEGER
|
||||
);
|
||||
178
dist/assets/css/web3.css
vendored
Normal file
178
dist/assets/css/web3.css
vendored
Normal file
@@ -0,0 +1,178 @@
|
||||
:root {
|
||||
--web2-blue: #1d9bf0;
|
||||
--web2-slate: #0f172a;
|
||||
--web2-muted: #64748b;
|
||||
--web2-card: #ffffff;
|
||||
--web2-border: #e5e7eb;
|
||||
}
|
||||
body {
|
||||
font-family: "Segoe UI", "Helvetica Neue", Arial, sans-serif;
|
||||
color: var(--web2-slate);
|
||||
}
|
||||
.web2-bg {
|
||||
background: #ffffff;
|
||||
}
|
||||
.web2-shell {
|
||||
max-width: 1100px;
|
||||
margin: 0 auto;
|
||||
padding: 2rem 1.5rem 4rem;
|
||||
}
|
||||
.web2-header {
|
||||
background: var(--web2-card);
|
||||
border: 1px solid var(--web2-border);
|
||||
border-radius: 4px;
|
||||
padding: 1.5rem 2rem;
|
||||
}
|
||||
.web2-card {
|
||||
background: var(--web2-card);
|
||||
border: 1px solid var(--web2-border);
|
||||
border-radius: 4px;
|
||||
padding: 1.5rem 1.75rem;
|
||||
}
|
||||
.web2-card h2 {
|
||||
position: relative;
|
||||
padding-left: 0.75rem;
|
||||
font-size: 1.05rem;
|
||||
font-weight: 700;
|
||||
letter-spacing: 0.02em;
|
||||
color: #0b1220;
|
||||
}
|
||||
.web2-card h2::before {
|
||||
content: "";
|
||||
position: absolute;
|
||||
left: 0;
|
||||
top: 50%;
|
||||
transform: translateY(-50%);
|
||||
width: 4px;
|
||||
height: 70%;
|
||||
background: var(--web2-blue);
|
||||
border-radius: 2px;
|
||||
box-shadow: 0 0 0 1px rgba(29, 155, 240, 0.18);
|
||||
}
|
||||
.web2-pill {
|
||||
display: inline-flex;
|
||||
align-items: center;
|
||||
gap: 0.4rem;
|
||||
background: #f8fafc;
|
||||
border: 1px solid var(--web2-border);
|
||||
color: var(--web2-muted);
|
||||
padding: 0.2rem 0.6rem;
|
||||
border-radius: 3px;
|
||||
font-size: 0.85rem;
|
||||
letter-spacing: 0.02em;
|
||||
}
|
||||
.web2-code {
|
||||
font-family: ui-monospace, SFMono-Regular, Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", monospace;
|
||||
background: #f1f5f9;
|
||||
border: 1px solid var(--web2-border);
|
||||
border-radius: 3px;
|
||||
padding: 0.1rem 0.35rem;
|
||||
font-size: 0.85em;
|
||||
color: #0f172a;
|
||||
}
|
||||
.web2-paragraphs p + p {
|
||||
margin-top: 0.85rem;
|
||||
}
|
||||
.web2-link {
|
||||
color: var(--web2-blue);
|
||||
text-decoration: none;
|
||||
font-weight: 600;
|
||||
}
|
||||
.web2-link:hover {
|
||||
text-decoration: underline;
|
||||
}
|
||||
.web2-button {
|
||||
background: var(--web2-blue);
|
||||
color: #fff;
|
||||
padding: 0.45rem 0.9rem;
|
||||
border-radius: 3px;
|
||||
border: 1px solid #1482d0;
|
||||
box-shadow: none;
|
||||
font-weight: 600;
|
||||
text-decoration: none;
|
||||
}
|
||||
.web2-button:hover {
|
||||
background: #1787d4;
|
||||
}
|
||||
.web2-button-group {
|
||||
display: flex;
|
||||
flex-wrap: wrap;
|
||||
}
|
||||
.web2-button-group .web2-button {
|
||||
margin: 0 0.5rem 0.5rem 0;
|
||||
}
|
||||
.web3-button {
|
||||
background: #f3f4f6;
|
||||
color: #0f172a;
|
||||
padding: 0.5rem 1rem;
|
||||
border-radius: 6px;
|
||||
border: 1px solid #e5e7eb;
|
||||
text-decoration: none;
|
||||
font-weight: 600;
|
||||
transition: background 0.15s ease, border-color 0.15s ease, color 0.15s ease, box-shadow 0.15s ease;
|
||||
display: inline-flex;
|
||||
align-items: center;
|
||||
gap: 0.35rem;
|
||||
}
|
||||
.web3-button:hover {
|
||||
background: #e2e8f0;
|
||||
border-color: #cbd5e1;
|
||||
}
|
||||
.web3-button.active {
|
||||
background: #dbeafe;
|
||||
border-color: #93c5fd;
|
||||
color: #1d4ed8;
|
||||
box-shadow: 0 0 0 2px rgba(147, 197, 253, 0.35);
|
||||
}
|
||||
.web3-button-group {
|
||||
display: flex;
|
||||
gap: 0.75rem;
|
||||
flex-wrap: wrap;
|
||||
margin-top: 4px;
|
||||
}
|
||||
.web2-list li {
|
||||
background: #ffffff;
|
||||
border: 1px solid var(--web2-border);
|
||||
border-radius: 3px;
|
||||
padding: 0.75rem 1rem;
|
||||
box-shadow: none;
|
||||
}
|
||||
.web2-table {
|
||||
width: 100%;
|
||||
border-collapse: collapse;
|
||||
font-size: 0.95rem;
|
||||
}
|
||||
.web2-table thead th {
|
||||
text-align: left;
|
||||
padding: 0.75rem 0.5rem;
|
||||
font-weight: 700;
|
||||
color: var(--web2-muted);
|
||||
border-bottom: 1px solid var(--web2-border);
|
||||
}
|
||||
.web2-table tbody td {
|
||||
padding: 0.9rem 0.5rem;
|
||||
border-bottom: 1px solid var(--web2-border);
|
||||
}
|
||||
.web2-table tbody tr:nth-child(odd) {
|
||||
background: #f8fafc;
|
||||
}
|
||||
.web2-table tbody tr:nth-child(even) {
|
||||
background: #ffffff;
|
||||
}
|
||||
.web2-group-row td {
|
||||
background: #e8eef5;
|
||||
color: #0f172a;
|
||||
border-bottom: 1px solid var(--web2-border);
|
||||
padding: 0.65rem 0.5rem;
|
||||
}
|
||||
.web2-badge {
|
||||
display: inline-flex;
|
||||
align-items: center;
|
||||
gap: 0.25rem;
|
||||
border: 1px solid var(--web2-border);
|
||||
padding: 0.15rem 0.45rem;
|
||||
border-radius: 3px;
|
||||
font-size: 0.8rem;
|
||||
color: var(--web2-muted);
|
||||
background: #f8fafc;
|
||||
}
|
||||
2
dist/dist.go
vendored
2
dist/dist.go
vendored
@@ -4,5 +4,5 @@ import (
|
||||
"embed"
|
||||
)
|
||||
|
||||
//go:embed all:assets
|
||||
//go:embed all:assets favicon.ico favicon-16x16.png favicon-32x32.png
|
||||
var AssetsDir embed.FS
|
||||
|
||||
BIN
dist/favicon-16x16.png
vendored
Normal file
BIN
dist/favicon-16x16.png
vendored
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 520 B |
BIN
dist/favicon-32x32.png
vendored
Normal file
BIN
dist/favicon-32x32.png
vendored
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 1.1 KiB |
BIN
dist/favicon.ico
vendored
Normal file
BIN
dist/favicon.ico
vendored
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 15 KiB |
@@ -95,14 +95,33 @@ func beforeAll() {
|
||||
|
||||
func startApp() error {
|
||||
port := getPort()
|
||||
app = exec.Command("go", "run", "main.go")
|
||||
settingsPath := "./test-settings.yml"
|
||||
settingsBody := fmt.Sprintf(`settings:
|
||||
log_level: "debug"
|
||||
log_output: "text"
|
||||
database_driver: "sqlite"
|
||||
database_url: "./test-db.sqlite3"
|
||||
bind_ip: "127.0.0.1"
|
||||
bind_port: %d
|
||||
bind_disable_tls: true
|
||||
tls_cert_filename:
|
||||
tls_key_filename:
|
||||
vcenter_username: "test"
|
||||
vcenter_password: "test"
|
||||
vcenter_insecure: true
|
||||
vcenter_event_polling_seconds: 60
|
||||
vcenter_inventory_polling_seconds: 7200
|
||||
vcenter_inventory_snapshot_seconds: 3600
|
||||
vcenter_inventory_aggregate_seconds: 86400
|
||||
hourly_snapshot_max_age_days: 1
|
||||
daily_snapshot_max_age_months: 1
|
||||
`, port)
|
||||
if err := os.WriteFile("../"+settingsPath, []byte(settingsBody), 0o600); err != nil {
|
||||
return err
|
||||
}
|
||||
app = exec.Command("go", "run", "main.go", "-settings", settingsPath)
|
||||
app.Dir = "../"
|
||||
app.Env = append(
|
||||
os.Environ(),
|
||||
"DB_URL=./test-db.sqlite3",
|
||||
fmt.Sprintf("PORT=%d", port),
|
||||
"LOG_LEVEL=DEBUG",
|
||||
)
|
||||
app.Env = os.Environ()
|
||||
|
||||
var err error
|
||||
baseUrL, err = url.Parse(fmt.Sprintf("http://localhost:%d", port))
|
||||
@@ -188,6 +207,9 @@ func afterAll() {
|
||||
if err := os.Remove("../test-db.sqlite3"); err != nil {
|
||||
log.Fatalf("could not remove test-db.sqlite3: %v", err)
|
||||
}
|
||||
if err := os.Remove("../test-settings.yml"); err != nil {
|
||||
log.Fatalf("could not remove test-settings.yml: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// beforeEach creates a new context and page for each test,
|
||||
|
||||
66
go.mod
66
go.mod
@@ -1,36 +1,64 @@
|
||||
module vctp
|
||||
|
||||
go 1.23.1
|
||||
go 1.25.5
|
||||
|
||||
require (
|
||||
github.com/a-h/templ v0.2.778
|
||||
github.com/go-co-op/gocron/v2 v2.11.0
|
||||
github.com/a-h/templ v0.3.977
|
||||
github.com/go-co-op/gocron/v2 v2.19.0
|
||||
github.com/jackc/pgx/v5 v5.8.0
|
||||
github.com/jmoiron/sqlx v1.4.0
|
||||
github.com/joho/godotenv v1.5.1
|
||||
github.com/pressly/goose/v3 v3.22.0
|
||||
github.com/vmware/govmomi v0.43.0
|
||||
modernc.org/sqlite v1.33.0
|
||||
github.com/pressly/goose/v3 v3.26.0
|
||||
github.com/prometheus/client_golang v1.19.0
|
||||
github.com/swaggo/swag v1.16.6
|
||||
github.com/vmware/govmomi v0.52.0
|
||||
github.com/xuri/excelize/v2 v2.10.0
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
modernc.org/sqlite v1.44.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/KyleBanks/depth v1.2.1 // indirect
|
||||
github.com/PuerkitoBio/purell v1.1.1 // indirect
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.5 // indirect
|
||||
github.com/go-openapi/jsonreference v0.19.6 // indirect
|
||||
github.com/go-openapi/spec v0.20.4 // indirect
|
||||
github.com/go-openapi/swag v0.19.15 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
|
||||
github.com/jonboulle/clockwork v0.4.0 // indirect
|
||||
github.com/jackc/pgpassfile v1.0.0 // indirect
|
||||
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect
|
||||
github.com/jackc/puddle/v2 v2.2.2 // indirect
|
||||
github.com/jonboulle/clockwork v0.5.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/mailru/easyjson v0.7.6 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/mfridman/interpolate v0.0.2 // indirect
|
||||
github.com/ncruces/go-strftime v0.1.9 // indirect
|
||||
github.com/ncruces/go-strftime v1.0.0 // indirect
|
||||
github.com/prometheus/client_model v0.5.0 // indirect
|
||||
github.com/prometheus/common v0.48.0 // indirect
|
||||
github.com/prometheus/procfs v0.15.1 // indirect
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
|
||||
github.com/richardlehane/mscfb v1.0.6 // indirect
|
||||
github.com/richardlehane/msoleps v1.0.6 // indirect
|
||||
github.com/robfig/cron/v3 v3.0.1 // indirect
|
||||
github.com/sethvargo/go-retry v0.3.0 // indirect
|
||||
github.com/tiendc/go-deepcopy v1.7.2 // indirect
|
||||
github.com/xuri/efp v0.0.1 // indirect
|
||||
github.com/xuri/nfp v0.0.2-0.20250530014748-2ddeb826f9a9 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 // indirect
|
||||
golang.org/x/sync v0.8.0 // indirect
|
||||
golang.org/x/sys v0.25.0 // indirect
|
||||
golang.org/x/tools v0.25.0 // indirect
|
||||
modernc.org/gc/v3 v3.0.0-20240801135723-a856999a2e4a // indirect
|
||||
modernc.org/mathutil v1.6.0 // indirect
|
||||
modernc.org/memory v1.8.0 // indirect
|
||||
modernc.org/strutil v1.2.0 // indirect
|
||||
modernc.org/token v1.1.0 // indirect
|
||||
golang.org/x/crypto v0.47.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20260112195511-716be5621a96 // indirect
|
||||
golang.org/x/mod v0.32.0 // indirect
|
||||
golang.org/x/net v0.49.0 // indirect
|
||||
golang.org/x/sync v0.19.0 // indirect
|
||||
golang.org/x/sys v0.40.0 // indirect
|
||||
golang.org/x/text v0.33.0 // indirect
|
||||
golang.org/x/tools v0.41.0 // indirect
|
||||
google.golang.org/protobuf v1.33.0 // indirect
|
||||
modernc.org/libc v1.67.4 // indirect
|
||||
modernc.org/mathutil v1.7.1 // indirect
|
||||
modernc.org/memory v1.11.0 // indirect
|
||||
)
|
||||
|
||||
198
go.sum
198
go.sum
@@ -1,81 +1,193 @@
|
||||
filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=
|
||||
filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4=
|
||||
github.com/a-h/templ v0.2.778 h1:VzhOuvWECrwOec4790lcLlZpP4Iptt5Q4K9aFxQmtaM=
|
||||
github.com/a-h/templ v0.2.778/go.mod h1:lq48JXoUvuQrU0VThrK31yFwdRjTCnIE5bcPCM9IP1w=
|
||||
github.com/KyleBanks/depth v1.2.1 h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc=
|
||||
github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE=
|
||||
github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI=
|
||||
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M=
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
||||
github.com/a-h/templ v0.3.977 h1:kiKAPXTZE2Iaf8JbtM21r54A8bCNsncrfnokZZSrSDg=
|
||||
github.com/a-h/templ v0.3.977/go.mod h1:oCZcnKRf5jjsGpf2yELzQfodLphd2mwecwG4Crk5HBo=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
||||
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||
github.com/go-co-op/gocron/v2 v2.11.0 h1:IOowNA6SzwdRFnD4/Ol3Kj6G2xKfsoiiGq2Jhhm9bvE=
|
||||
github.com/go-co-op/gocron/v2 v2.11.0/go.mod h1:xY7bJxGazKam1cz04EebrlP4S9q4iWdiAylMGP3jY9w=
|
||||
github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y=
|
||||
github.com/go-co-op/gocron/v2 v2.19.0 h1:OKf2y6LXPs/BgBI2fl8PxUpNAI1DA9Mg+hSeGOS38OU=
|
||||
github.com/go-co-op/gocron/v2 v2.19.0/go.mod h1:5lEiCKk1oVJV39Zg7/YG10OnaVrDAV5GGR6O0663k6U=
|
||||
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||
github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY=
|
||||
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||
github.com/go-openapi/jsonreference v0.19.6 h1:UBIxjkht+AWIgYzCDSv2GN+E/togfwXUJFRTWhl2Jjs=
|
||||
github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns=
|
||||
github.com/go-openapi/spec v0.20.4 h1:O8hJrt0UMnhHcluhIdUgCLRWyM2x7QkBXRvOs7m+O1M=
|
||||
github.com/go-openapi/spec v0.20.4/go.mod h1:faYFR1CvsJZ0mNsmsphTMSoRrNV3TEDoAM7FOEWeq8I=
|
||||
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
||||
github.com/go-openapi/swag v0.19.15 h1:D2NRCBzS9/pEY3gP9Nl8aDqGUcPFrwG2p+CNFrLyrCM=
|
||||
github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
|
||||
github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/pprof v0.0.0-20240409012703-83162a5b38cd h1:gbpYu9NMq8jhDVbvlGkMFWCjLFlqqEZjEmObmhUy6Vo=
|
||||
github.com/google/pprof v0.0.0-20240409012703-83162a5b38cd/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw=
|
||||
github.com/go-sql-driver/mysql v1.9.3 h1:U/N249h2WzJ3Ukj8SowVFjdtZKfu9vlLZxjPXV1aweo=
|
||||
github.com/go-sql-driver/mysql v1.9.3/go.mod h1:qn46aNg1333BRMNU69Lq93t8du/dwxI64Gl8i5p1WMU=
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e h1:ijClszYn+mADRFY17kjQEVQ1XRhq2/JR1M3sGqeJoxs=
|
||||
github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
|
||||
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
|
||||
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
|
||||
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo=
|
||||
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
|
||||
github.com/jackc/pgx/v5 v5.8.0 h1:TYPDoleBBme0xGSAX3/+NujXXtpZn9HBONkQC7IEZSo=
|
||||
github.com/jackc/pgx/v5 v5.8.0/go.mod h1:QVeDInX2m9VyzvNeiCJVjCkNFqzsNb43204HshNSZKw=
|
||||
github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo=
|
||||
github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
|
||||
github.com/jmoiron/sqlx v1.4.0 h1:1PLqN7S1UYp5t4SrVVnt4nUVNemrDAtxlulVe+Qgm3o=
|
||||
github.com/jmoiron/sqlx v1.4.0/go.mod h1:ZrZ7UsYB/weZdl2Bxg6jCRO9c3YHl8r3ahlKmRT4JLY=
|
||||
github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0=
|
||||
github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4=
|
||||
github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4=
|
||||
github.com/jonboulle/clockwork v0.4.0/go.mod h1:xgRqUGwRcjKCO1vbZUEtSLrqKoPSsUpK7fnezOII0kc=
|
||||
github.com/jonboulle/clockwork v0.5.0 h1:Hyh9A8u51kptdkR+cqRpT1EebBwTn1oK9YfGYbdFz6I=
|
||||
github.com/jonboulle/clockwork v0.5.0/go.mod h1:3mZlmanh0g2NDKO5TWZVJAfofYk64M7XN3SzBPjZF60=
|
||||
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
|
||||
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
|
||||
github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA=
|
||||
github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU=
|
||||
github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
|
||||
github.com/mfridman/interpolate v0.0.2 h1:pnuTK7MQIxxFz1Gr+rjSIx9u7qVjf5VOoM/u6BbAxPY=
|
||||
github.com/mfridman/interpolate v0.0.2/go.mod h1:p+7uk6oE07mpE/Ik1b8EckO0O4ZXiGAfshKBWLUM9Xg=
|
||||
github.com/ncruces/go-strftime v0.1.9 h1:bY0MQC28UADQmHmaF5dgpLmImcShSi2kHU9XLdhx/f4=
|
||||
github.com/ncruces/go-strftime v0.1.9/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls=
|
||||
github.com/ncruces/go-strftime v1.0.0 h1:HMFp8mLCTPp341M/ZnA4qaf7ZlsbTc+miZjCLOFAw7w=
|
||||
github.com/ncruces/go-strftime v1.0.0/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls=
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pressly/goose/v3 v3.22.0 h1:wd/7kNiPTuNAztWun7iaB98DrhulbWPrzMAaw2DEZNw=
|
||||
github.com/pressly/goose/v3 v3.22.0/go.mod h1:yJM3qwSj2pp7aAaCvso096sguezamNb2OBgxCnh/EYg=
|
||||
github.com/pressly/goose/v3 v3.26.0 h1:KJakav68jdH0WDvoAcj8+n61WqOIaPGgH0bJWS6jpmM=
|
||||
github.com/pressly/goose/v3 v3.26.0/go.mod h1:4hC1KrritdCxtuFsqgs1R4AU5bWtTAf+cnWvfhf2DNY=
|
||||
github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU=
|
||||
github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k=
|
||||
github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw=
|
||||
github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI=
|
||||
github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE=
|
||||
github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc=
|
||||
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
|
||||
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
|
||||
github.com/richardlehane/mscfb v1.0.6 h1:eN3bvvZCp00bs7Zf52bxNwAx5lJDBK1tCuH19qq5aC8=
|
||||
github.com/richardlehane/mscfb v1.0.6/go.mod h1:pe0+IUIc0AHh0+teNzBlJCtSyZdFOGgV4ZK9bsoV+Jo=
|
||||
github.com/richardlehane/msoleps v1.0.6 h1:9BvkpjvD+iUBalUY4esMwv6uBkfOip/Lzvd93jvR9gg=
|
||||
github.com/richardlehane/msoleps v1.0.6/go.mod h1:BWev5JBpU9Ko2WAgmZEuiz4/u3ZYTKbjLycmwiWUfWg=
|
||||
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
|
||||
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
|
||||
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
|
||||
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
|
||||
github.com/sethvargo/go-retry v0.3.0 h1:EEt31A35QhrcRZtrYFDTBg91cqZVnFL2navjDrah2SE=
|
||||
github.com/sethvargo/go-retry v0.3.0/go.mod h1:mNX17F0C/HguQMyMyJxcnU471gOZGxCLyYaFyAZraas=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/vmware/govmomi v0.43.0 h1:7Kg3Bkdly+TrE67BYXzRq7ZrDnn7xqpKX95uEh2f9Go=
|
||||
github.com/vmware/govmomi v0.43.0/go.mod h1:IOv5nTXCPqH9qVJAlRuAGffogaLsNs8aF+e7vLgsHJU=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
|
||||
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
||||
github.com/swaggo/swag v1.16.6 h1:qBNcx53ZaX+M5dxVyTrgQ0PJ/ACK+NzhwcbieTt+9yI=
|
||||
github.com/swaggo/swag v1.16.6/go.mod h1:ngP2etMK5a0P3QBizic5MEwpRmluJZPHjXcMoj4Xesg=
|
||||
github.com/tiendc/go-deepcopy v1.7.2 h1:Ut2yYR7W9tWjTQitganoIue4UGxZwCcJy3orjrrIj44=
|
||||
github.com/tiendc/go-deepcopy v1.7.2/go.mod h1:4bKjNC2r7boYOkD2IOuZpYjmlDdzjbpTRyCx+goBCJQ=
|
||||
github.com/vmware/govmomi v0.52.0 h1:JyxQ1IQdllrY7PJbv2am9mRsv3p9xWlIQ66bv+XnyLw=
|
||||
github.com/vmware/govmomi v0.52.0/go.mod h1:Yuc9xjznU3BH0rr6g7MNS1QGvxnJlE1vOvTJ7Lx7dqI=
|
||||
github.com/xuri/efp v0.0.1 h1:fws5Rv3myXyYni8uwj2qKjVaRP30PdjeYe2Y6FDsCL8=
|
||||
github.com/xuri/efp v0.0.1/go.mod h1:ybY/Jr0T0GTCnYjKqmdwxyxn2BQf2RcQIIvex5QldPI=
|
||||
github.com/xuri/excelize/v2 v2.10.0 h1:8aKsP7JD39iKLc6dH5Tw3dgV3sPRh8uRVXu/fMstfW4=
|
||||
github.com/xuri/excelize/v2 v2.10.0/go.mod h1:SC5TzhQkaOsTWpANfm+7bJCldzcnU/jrhqkTi/iBHBU=
|
||||
github.com/xuri/nfp v0.0.2-0.20250530014748-2ddeb826f9a9 h1:+C0TIdyyYmzadGaL/HBLbf3WdLgC29pgyhTjAT/0nuE=
|
||||
github.com/xuri/nfp v0.0.2-0.20250530014748-2ddeb826f9a9/go.mod h1:WwHg+CVyzlv/TX9xqBFXEZAuxOPxn2k1GNHwG41IIUQ=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
||||
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
|
||||
golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 h1:yixxcjnhBmY0nkL253HFVIm0JsFHwrHdT3Yh6szTnfY=
|
||||
golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8/go.mod h1:jj3sYF3dwk5D+ghuXyeI3r5MFf+NT2An6/9dOA95KSI=
|
||||
golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0=
|
||||
golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
|
||||
golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
|
||||
golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/crypto v0.47.0 h1:V6e3FRj+n4dbpw86FJ8Fv7XVOql7TEwpHapKoMJ/GO8=
|
||||
golang.org/x/crypto v0.47.0/go.mod h1:ff3Y9VzzKbwSSEzWqJsJVBnWmRwRSHt/6Op5n9bQc4A=
|
||||
golang.org/x/exp v0.0.0-20260112195511-716be5621a96 h1:Z/6YuSHTLOHfNFdb8zVZomZr7cqNgTJvA8+Qz75D8gU=
|
||||
golang.org/x/exp v0.0.0-20260112195511-716be5621a96/go.mod h1:nzimsREAkjBCIEFtHiYkrJyT+2uy9YZJB7H1k68CXZU=
|
||||
golang.org/x/image v0.25.0 h1:Y6uW6rH1y5y/LK1J8BPWZtr6yZ7hrsy6hFrXjgsc2fQ=
|
||||
golang.org/x/image v0.25.0/go.mod h1:tCAmOEGthTtkalusGp1g3xa2gke8J6c2N565dTyl9Rs=
|
||||
golang.org/x/mod v0.32.0 h1:9F4d3PHLljb6x//jOyokMv3eX+YDeepZSEo3mFJy93c=
|
||||
golang.org/x/mod v0.32.0/go.mod h1:SgipZ/3h2Ci89DlEtEXWUk/HteuRin+HHhN+WbNhguU=
|
||||
golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM=
|
||||
golang.org/x/net v0.49.0 h1:eeHFmOGUTtaaPSGNmjBKpbng9MulQsJURQUAfUwY++o=
|
||||
golang.org/x/net v0.49.0/go.mod h1:/ysNB2EvaqvesRkuLAyjI1ycPZlQHM3q01F02UY/MV8=
|
||||
golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
|
||||
golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34=
|
||||
golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/tools v0.25.0 h1:oFU9pkj/iJgs+0DT+VMHrx+oBKs/LJMV+Uvg78sl+fE=
|
||||
golang.org/x/tools v0.25.0/go.mod h1:/vtpO8WL1N9cQC3FN5zPqb//fRXskFHbLKk4OW1Q7rg=
|
||||
golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ=
|
||||
golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.33.0 h1:B3njUFyqtHDUI5jMn1YIr5B0IE2U0qck04r6d4KPAxE=
|
||||
golang.org/x/text v0.33.0/go.mod h1:LuMebE6+rBincTi9+xWTY8TztLzKHc/9C1uBCG27+q8=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.41.0 h1:a9b8iMweWG+S0OBnlU36rzLp20z1Rp10w+IY2czHTQc=
|
||||
golang.org/x/tools v0.41.0/go.mod h1:XSY6eDqxVNiYgezAVqqCeihT4j1U2CCsqvH3WhQpnlg=
|
||||
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
|
||||
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
modernc.org/fileutil v1.3.0 h1:gQ5SIzK3H9kdfai/5x41oQiKValumqNTDXMvKo62HvE=
|
||||
modernc.org/fileutil v1.3.0/go.mod h1:XatxS8fZi3pS8/hKG2GH/ArUogfxjpEKs3Ku3aK4JyQ=
|
||||
modernc.org/gc/v3 v3.0.0-20240801135723-a856999a2e4a h1:CfbpOLEo2IwNzJdMvE8aiRbPMxoTpgAJeyePh0SmO8M=
|
||||
modernc.org/gc/v3 v3.0.0-20240801135723-a856999a2e4a/go.mod h1:Qz0X07sNOR1jWYCrJMEnbW/X55x206Q7Vt4mz6/wHp4=
|
||||
modernc.org/mathutil v1.6.0 h1:fRe9+AmYlaej+64JsEEhoWuAYBkOtQiMEU7n/XgfYi4=
|
||||
modernc.org/mathutil v1.6.0/go.mod h1:Ui5Q9q1TR2gFm0AQRqQUaBWFLAhQpCwNcuhBOSedWPo=
|
||||
modernc.org/memory v1.8.0 h1:IqGTL6eFMaDZZhEWwcREgeMXYwmW83LYW8cROZYkg+E=
|
||||
modernc.org/memory v1.8.0/go.mod h1:XPZ936zp5OMKGWPqbD3JShgd/ZoQ7899TUuQqxY+peU=
|
||||
modernc.org/sqlite v1.33.0 h1:WWkA/T2G17okiLGgKAj4/RMIvgyMT19yQ038160IeYk=
|
||||
modernc.org/sqlite v1.33.0/go.mod h1:9uQ9hF/pCZoYZK73D/ud5Z7cIRIILSZI8NdIemVMTX8=
|
||||
modernc.org/strutil v1.2.0 h1:agBi9dp1I+eOnxXeiZawM8F4LawKv4NzGWSaLfyeNZA=
|
||||
modernc.org/strutil v1.2.0/go.mod h1:/mdcBmfOibveCTBxUl5B5l6W+TTH1FXPLHZE6bTosX0=
|
||||
modernc.org/cc/v4 v4.27.1 h1:9W30zRlYrefrDV2JE2O8VDtJ1yPGownxciz5rrbQZis=
|
||||
modernc.org/cc/v4 v4.27.1/go.mod h1:uVtb5OGqUKpoLWhqwNQo/8LwvoiEBLvZXIQ/SmO6mL0=
|
||||
modernc.org/ccgo/v4 v4.30.1 h1:4r4U1J6Fhj98NKfSjnPUN7Ze2c6MnAdL0hWw6+LrJpc=
|
||||
modernc.org/ccgo/v4 v4.30.1/go.mod h1:bIOeI1JL54Utlxn+LwrFyjCx2n2RDiYEaJVSrgdrRfM=
|
||||
modernc.org/fileutil v1.3.40 h1:ZGMswMNc9JOCrcrakF1HrvmergNLAmxOPjizirpfqBA=
|
||||
modernc.org/fileutil v1.3.40/go.mod h1:HxmghZSZVAz/LXcMNwZPA/DRrQZEVP9VX0V4LQGQFOc=
|
||||
modernc.org/gc/v2 v2.6.5 h1:nyqdV8q46KvTpZlsw66kWqwXRHdjIlJOhG6kxiV/9xI=
|
||||
modernc.org/gc/v2 v2.6.5/go.mod h1:YgIahr1ypgfe7chRuJi2gD7DBQiKSLMPgBQe9oIiito=
|
||||
modernc.org/gc/v3 v3.1.1 h1:k8T3gkXWY9sEiytKhcgyiZ2L0DTyCQ/nvX+LoCljoRE=
|
||||
modernc.org/gc/v3 v3.1.1/go.mod h1:HFK/6AGESC7Ex+EZJhJ2Gni6cTaYpSMmU/cT9RmlfYY=
|
||||
modernc.org/goabi0 v0.2.0 h1:HvEowk7LxcPd0eq6mVOAEMai46V+i7Jrj13t4AzuNks=
|
||||
modernc.org/goabi0 v0.2.0/go.mod h1:CEFRnnJhKvWT1c1JTI3Avm+tgOWbkOu5oPA8eH8LnMI=
|
||||
modernc.org/libc v1.67.4 h1:zZGmCMUVPORtKv95c2ReQN5VDjvkoRm9GWPTEPuvlWg=
|
||||
modernc.org/libc v1.67.4/go.mod h1:QvvnnJ5P7aitu0ReNpVIEyesuhmDLQ8kaEoyMjIFZJA=
|
||||
modernc.org/mathutil v1.7.1 h1:GCZVGXdaN8gTqB1Mf/usp1Y/hSqgI2vAGGP4jZMCxOU=
|
||||
modernc.org/mathutil v1.7.1/go.mod h1:4p5IwJITfppl0G4sUEDtCr4DthTaT47/N3aT6MhfgJg=
|
||||
modernc.org/memory v1.11.0 h1:o4QC8aMQzmcwCK3t3Ux/ZHmwFPzE6hf2Y5LbkRs+hbI=
|
||||
modernc.org/memory v1.11.0/go.mod h1:/JP4VbVC+K5sU2wZi9bHoq2MAkCnrt2r98UGeSK7Mjw=
|
||||
modernc.org/opt v0.1.4 h1:2kNGMRiUjrp4LcaPuLY2PzUfqM/w9N23quVwhKt5Qm8=
|
||||
modernc.org/opt v0.1.4/go.mod h1:03fq9lsNfvkYSfxrfUhZCWPk1lm4cq4N+Bh//bEtgns=
|
||||
modernc.org/sortutil v1.2.1 h1:+xyoGf15mM3NMlPDnFqrteY07klSFxLElE2PVuWIJ7w=
|
||||
modernc.org/sortutil v1.2.1/go.mod h1:7ZI3a3REbai7gzCLcotuw9AC4VZVpYMjDzETGsSMqJE=
|
||||
modernc.org/sqlite v1.44.0 h1:YjCKJnzZde2mLVy0cMKTSL4PxCmbIguOq9lGp8ZvGOc=
|
||||
modernc.org/sqlite v1.44.0/go.mod h1:2Dq41ir5/qri7QJJJKNZcP4UF7TsX/KNeykYgPDtGhE=
|
||||
modernc.org/strutil v1.2.1 h1:UneZBkQA+DX2Rp35KcM69cSsNES9ly8mQWD71HKlOA0=
|
||||
modernc.org/strutil v1.2.1/go.mod h1:EHkiggD70koQxjVdSBM3JKM7k6L0FbGE5eymy9i3B9A=
|
||||
modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y=
|
||||
modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
|
||||
|
||||
125
internal/metrics/metrics.go
Normal file
125
internal/metrics/metrics.go
Normal file
@@ -0,0 +1,125 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
)
|
||||
|
||||
var (
|
||||
registry = prometheus.NewRegistry()
|
||||
|
||||
HourlySnapshotTotal = prometheus.NewCounter(prometheus.CounterOpts{Name: "vctp_hourly_snapshots_total", Help: "Total number of hourly snapshot jobs completed."})
|
||||
HourlySnapshotFailures = prometheus.NewCounter(prometheus.CounterOpts{Name: "vctp_hourly_snapshots_failed_total", Help: "Hourly snapshot jobs that failed."})
|
||||
HourlySnapshotLast = prometheus.NewGauge(prometheus.GaugeOpts{Name: "vctp_hourly_snapshot_last_unix", Help: "Unix timestamp of the last hourly snapshot start time."})
|
||||
HourlySnapshotRows = prometheus.NewGauge(prometheus.GaugeOpts{Name: "vctp_hourly_snapshot_last_rows", Help: "Row count of the last hourly snapshot table."})
|
||||
|
||||
DailyAggregationsTotal = prometheus.NewCounter(prometheus.CounterOpts{Name: "vctp_daily_aggregations_total", Help: "Total number of daily aggregation jobs completed."})
|
||||
DailyAggregationFailures = prometheus.NewCounter(prometheus.CounterOpts{Name: "vctp_daily_aggregations_failed_total", Help: "Daily aggregation jobs that failed."})
|
||||
DailyAggregationDuration = prometheus.NewHistogram(prometheus.HistogramOpts{
|
||||
Name: "vctp_daily_aggregation_duration_seconds",
|
||||
Help: "Duration of daily aggregation jobs.",
|
||||
Buckets: prometheus.ExponentialBuckets(1, 2, 10),
|
||||
})
|
||||
|
||||
MonthlyAggregationsTotal = prometheus.NewCounter(prometheus.CounterOpts{Name: "vctp_monthly_aggregations_total", Help: "Total number of monthly aggregation jobs completed."})
|
||||
MonthlyAggregationFailures = prometheus.NewCounter(prometheus.CounterOpts{Name: "vctp_monthly_aggregations_failed_total", Help: "Monthly aggregation jobs that failed."})
|
||||
MonthlyAggregationDuration = prometheus.NewHistogram(prometheus.HistogramOpts{
|
||||
Name: "vctp_monthly_aggregation_duration_seconds",
|
||||
Help: "Duration of monthly aggregation jobs.",
|
||||
Buckets: prometheus.ExponentialBuckets(1, 2, 10),
|
||||
})
|
||||
|
||||
ReportsAvailable = prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "vctp_reports_available",
|
||||
Help: "Number of downloadable reports present on disk.",
|
||||
})
|
||||
|
||||
VcenterConnectFailures = prometheus.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "vctp_vcenter_connect_failures_total",
|
||||
Help: "Failed connections to vCenter during snapshot runs.",
|
||||
}, []string{"vcenter"})
|
||||
|
||||
VcenterSnapshotDuration = prometheus.NewHistogramVec(prometheus.HistogramOpts{
|
||||
Name: "vctp_vcenter_snapshot_duration_seconds",
|
||||
Help: "Duration of per-vCenter hourly snapshot jobs.",
|
||||
Buckets: prometheus.ExponentialBuckets(0.5, 2, 10),
|
||||
}, []string{"vcenter"})
|
||||
|
||||
VcenterInventorySize = prometheus.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "vctp_vcenter_inventory_size",
|
||||
Help: "Number of VMs seen in the last successful snapshot per vCenter.",
|
||||
}, []string{"vcenter"})
|
||||
)
|
||||
|
||||
func init() {
|
||||
registry.MustRegister(
|
||||
HourlySnapshotTotal,
|
||||
HourlySnapshotFailures,
|
||||
HourlySnapshotLast,
|
||||
HourlySnapshotRows,
|
||||
DailyAggregationsTotal,
|
||||
DailyAggregationFailures,
|
||||
DailyAggregationDuration,
|
||||
MonthlyAggregationsTotal,
|
||||
MonthlyAggregationFailures,
|
||||
MonthlyAggregationDuration,
|
||||
ReportsAvailable,
|
||||
VcenterConnectFailures,
|
||||
VcenterSnapshotDuration,
|
||||
VcenterInventorySize,
|
||||
)
|
||||
}
|
||||
|
||||
// Handler returns an http.Handler that serves Prometheus metrics.
|
||||
func Handler() http.Handler {
|
||||
return promhttp.HandlerFor(registry, promhttp.HandlerOpts{})
|
||||
}
|
||||
|
||||
// RecordVcenterSnapshot logs per-vCenter snapshot metrics.
|
||||
func RecordVcenterSnapshot(vcenter string, duration time.Duration, vmCount int64, err error) {
|
||||
VcenterSnapshotDuration.WithLabelValues(vcenter).Observe(duration.Seconds())
|
||||
if err != nil {
|
||||
VcenterConnectFailures.WithLabelValues(vcenter).Inc()
|
||||
return
|
||||
}
|
||||
VcenterInventorySize.WithLabelValues(vcenter).Set(float64(vmCount))
|
||||
}
|
||||
|
||||
// RecordHourlySnapshot logs aggregate hourly snapshot results.
|
||||
func RecordHourlySnapshot(start time.Time, rows int64, err error) {
|
||||
HourlySnapshotLast.Set(float64(start.Unix()))
|
||||
HourlySnapshotRows.Set(float64(rows))
|
||||
if err != nil {
|
||||
HourlySnapshotFailures.Inc()
|
||||
return
|
||||
}
|
||||
HourlySnapshotTotal.Inc()
|
||||
}
|
||||
|
||||
// RecordDailyAggregation logs daily aggregation metrics.
|
||||
func RecordDailyAggregation(duration time.Duration, err error) {
|
||||
DailyAggregationDuration.Observe(duration.Seconds())
|
||||
if err != nil {
|
||||
DailyAggregationFailures.Inc()
|
||||
return
|
||||
}
|
||||
DailyAggregationsTotal.Inc()
|
||||
}
|
||||
|
||||
// RecordMonthlyAggregation logs monthly aggregation metrics.
|
||||
func RecordMonthlyAggregation(duration time.Duration, err error) {
|
||||
MonthlyAggregationDuration.Observe(duration.Seconds())
|
||||
if err != nil {
|
||||
MonthlyAggregationFailures.Inc()
|
||||
return
|
||||
}
|
||||
MonthlyAggregationsTotal.Inc()
|
||||
}
|
||||
|
||||
// SetReportsAvailable updates the gauge for report files found on disk.
|
||||
func SetReportsAvailable(count int) {
|
||||
ReportsAvailable.Set(float64(count))
|
||||
}
|
||||
299
internal/report/create.go
Normal file
299
internal/report/create.go
Normal file
@@ -0,0 +1,299 @@
|
||||
package report
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
"vctp/db"
|
||||
|
||||
"github.com/xuri/excelize/v2"
|
||||
)
|
||||
|
||||
func CreateInventoryReport(logger *slog.Logger, Database db.Database, ctx context.Context) ([]byte, error) {
|
||||
//var xlsx *excelize.File
|
||||
sheetName := "Inventory Report"
|
||||
var buffer bytes.Buffer
|
||||
var cell string
|
||||
|
||||
logger.Debug("Querying inventory table")
|
||||
results, err := Database.Queries().GetReportInventory(ctx)
|
||||
if err != nil {
|
||||
logger.Error("Unable to query inventory table", "error", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(results) == 0 {
|
||||
logger.Error("Empty inventory results")
|
||||
return nil, fmt.Errorf("Empty inventory results")
|
||||
}
|
||||
|
||||
// Create excel workbook
|
||||
xlsx := excelize.NewFile()
|
||||
err = xlsx.SetSheetName("Sheet1", sheetName)
|
||||
if err != nil {
|
||||
logger.Error("Error setting sheet name", "error", err, "sheet_name", sheetName)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Set the document properties
|
||||
err = xlsx.SetDocProps(&excelize.DocProperties{
|
||||
Creator: "json2excel",
|
||||
Created: time.Now().Format(time.RFC3339),
|
||||
})
|
||||
if err != nil {
|
||||
logger.Error("Error setting document properties", "error", err, "sheet_name", sheetName)
|
||||
}
|
||||
|
||||
// Use reflection to determine column headings from the first item
|
||||
firstItem := results[0]
|
||||
v := reflect.ValueOf(firstItem)
|
||||
typeOfItem := v.Type()
|
||||
|
||||
// Create column headers dynamically
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
column := string(rune('A'+i)) + "1" // A1, B1, C1, etc.
|
||||
xlsx.SetCellValue(sheetName, column, typeOfItem.Field(i).Name)
|
||||
}
|
||||
|
||||
// Set autofilter on heading row
|
||||
cell, _ = excelize.CoordinatesToCellName(v.NumField(), 1)
|
||||
filterRange := "A1:" + cell
|
||||
logger.Debug("Setting autofilter", "range", filterRange)
|
||||
// As per docs any filters applied need to be manually processed by us (eg hiding rows with blanks)
|
||||
err = xlsx.AutoFilter(sheetName, filterRange, nil)
|
||||
if err != nil {
|
||||
logger.Error("Error setting autofilter", "error", err)
|
||||
}
|
||||
|
||||
// Bold top row
|
||||
headerStyle, err := xlsx.NewStyle(&excelize.Style{
|
||||
Font: &excelize.Font{
|
||||
Bold: true,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
logger.Error("Error generating header style", "error", err)
|
||||
} else {
|
||||
err = xlsx.SetRowStyle(sheetName, 1, 1, headerStyle)
|
||||
if err != nil {
|
||||
logger.Error("Error setting header style", "error", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Populate the Excel file with data from the Inventory table
|
||||
for i, item := range results {
|
||||
v = reflect.ValueOf(item)
|
||||
for j := 0; j < v.NumField(); j++ {
|
||||
column := string(rune('A'+j)) + strconv.Itoa(i+2) // Start from row 2
|
||||
value := getFieldValue(v.Field(j))
|
||||
xlsx.SetCellValue(sheetName, column, value)
|
||||
}
|
||||
}
|
||||
|
||||
// Freeze top row
|
||||
err = xlsx.SetPanes(sheetName, &excelize.Panes{
|
||||
Freeze: true,
|
||||
Split: false,
|
||||
XSplit: 0,
|
||||
YSplit: 1,
|
||||
TopLeftCell: "A2",
|
||||
ActivePane: "bottomLeft",
|
||||
Selection: []excelize.Selection{
|
||||
{SQRef: "A2", ActiveCell: "A2", Pane: "bottomLeft"},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
logger.Error("Error freezing top row", "error", err)
|
||||
}
|
||||
|
||||
// Set column autowidth
|
||||
err = SetColAutoWidth(xlsx, sheetName)
|
||||
if err != nil {
|
||||
logger.Error("Error setting auto width", "error", err)
|
||||
}
|
||||
|
||||
// Save the Excel file into a byte buffer
|
||||
if err := xlsx.Write(&buffer); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return buffer.Bytes(), nil
|
||||
}
|
||||
|
||||
func CreateUpdatesReport(logger *slog.Logger, Database db.Database, ctx context.Context) ([]byte, error) {
|
||||
//var xlsx *excelize.File
|
||||
sheetName := "Updates Report"
|
||||
var buffer bytes.Buffer
|
||||
var cell string
|
||||
|
||||
logger.Debug("Querying updates table")
|
||||
results, err := Database.Queries().GetReportUpdates(ctx)
|
||||
if err != nil {
|
||||
logger.Error("Unable to query updates table", "error", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(results) == 0 {
|
||||
logger.Error("Empty updates results")
|
||||
return nil, fmt.Errorf("Empty updates results")
|
||||
}
|
||||
|
||||
// Create excel workbook
|
||||
xlsx := excelize.NewFile()
|
||||
err = xlsx.SetSheetName("Sheet1", sheetName)
|
||||
if err != nil {
|
||||
logger.Error("Error setting sheet name", "error", err, "sheet_name", sheetName)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Set the document properties
|
||||
err = xlsx.SetDocProps(&excelize.DocProperties{
|
||||
Creator: "json2excel",
|
||||
Created: time.Now().Format(time.RFC3339),
|
||||
})
|
||||
if err != nil {
|
||||
logger.Error("Error setting document properties", "error", err, "sheet_name", sheetName)
|
||||
}
|
||||
|
||||
// Use reflection to determine column headings from the first item
|
||||
firstItem := results[0]
|
||||
v := reflect.ValueOf(firstItem)
|
||||
typeOfItem := v.Type()
|
||||
|
||||
// Create column headers dynamically
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
column := string(rune('A'+i)) + "1" // A1, B1, C1, etc.
|
||||
xlsx.SetCellValue(sheetName, column, typeOfItem.Field(i).Name)
|
||||
}
|
||||
|
||||
// Set autofilter on heading row
|
||||
cell, _ = excelize.CoordinatesToCellName(v.NumField(), 1)
|
||||
filterRange := "A1:" + cell
|
||||
logger.Debug("Setting autofilter", "range", filterRange)
|
||||
// As per docs any filters applied need to be manually processed by us (eg hiding rows with blanks)
|
||||
err = xlsx.AutoFilter(sheetName, filterRange, nil)
|
||||
if err != nil {
|
||||
logger.Error("Error setting autofilter", "error", err)
|
||||
}
|
||||
|
||||
// Bold top row
|
||||
headerStyle, err := xlsx.NewStyle(&excelize.Style{
|
||||
Font: &excelize.Font{
|
||||
Bold: true,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
logger.Error("Error generating header style", "error", err)
|
||||
} else {
|
||||
err = xlsx.SetRowStyle(sheetName, 1, 1, headerStyle)
|
||||
if err != nil {
|
||||
logger.Error("Error setting header style", "error", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Populate the Excel file with data from the Inventory table
|
||||
for i, item := range results {
|
||||
v = reflect.ValueOf(item)
|
||||
for j := 0; j < v.NumField(); j++ {
|
||||
column := string(rune('A'+j)) + strconv.Itoa(i+2) // Start from row 2
|
||||
value := getFieldValue(v.Field(j))
|
||||
xlsx.SetCellValue(sheetName, column, value)
|
||||
}
|
||||
}
|
||||
|
||||
// Freeze top row
|
||||
err = xlsx.SetPanes(sheetName, &excelize.Panes{
|
||||
Freeze: true,
|
||||
Split: false,
|
||||
XSplit: 0,
|
||||
YSplit: 1,
|
||||
TopLeftCell: "A2",
|
||||
ActivePane: "bottomLeft",
|
||||
Selection: []excelize.Selection{
|
||||
{SQRef: "A2", ActiveCell: "A2", Pane: "bottomLeft"},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
logger.Error("Error freezing top row", "error", err)
|
||||
}
|
||||
|
||||
// Set column autowidth
|
||||
err = SetColAutoWidth(xlsx, sheetName)
|
||||
if err != nil {
|
||||
logger.Error("Error setting auto width", "error", err)
|
||||
}
|
||||
|
||||
// Save the Excel file into a byte buffer
|
||||
if err := xlsx.Write(&buffer); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return buffer.Bytes(), nil
|
||||
}
|
||||
|
||||
// Helper function to get the actual value of sql.Null types
|
||||
func getFieldValue(field reflect.Value) interface{} {
|
||||
switch field.Kind() {
|
||||
case reflect.Struct:
|
||||
// Handle sql.Null types based on their concrete type
|
||||
switch field.Interface().(type) {
|
||||
case sql.NullString:
|
||||
ns := field.Interface().(sql.NullString)
|
||||
if ns.Valid {
|
||||
return ns.String
|
||||
}
|
||||
return ""
|
||||
case sql.NullInt64:
|
||||
ni := field.Interface().(sql.NullInt64)
|
||||
if ni.Valid {
|
||||
return ni.Int64
|
||||
}
|
||||
return -1
|
||||
case sql.NullFloat64:
|
||||
nf := field.Interface().(sql.NullFloat64)
|
||||
if nf.Valid {
|
||||
return nf.Float64
|
||||
}
|
||||
return nil
|
||||
case sql.NullBool:
|
||||
nb := field.Interface().(sql.NullBool)
|
||||
if nb.Valid {
|
||||
return nb.Bool
|
||||
}
|
||||
return false
|
||||
}
|
||||
}
|
||||
return field.Interface() // Return the value as-is for non-sql.Null types
|
||||
}
|
||||
|
||||
// Taken from https://github.com/qax-os/excelize/issues/92#issuecomment-821578446
|
||||
func SetColAutoWidth(xlsx *excelize.File, sheetName string) error {
|
||||
// Autofit all columns according to their text content
|
||||
cols, err := xlsx.GetCols(sheetName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for idx, col := range cols {
|
||||
largestWidth := 0
|
||||
for _, rowCell := range col {
|
||||
cellWidth := utf8.RuneCountInString(rowCell) + 2 // + 2 for margin
|
||||
if cellWidth > largestWidth {
|
||||
largestWidth = cellWidth
|
||||
}
|
||||
}
|
||||
//fmt.Printf("SetColAutoWidth calculated largest width for column index '%d' is '%d'\n", idx, largestWidth)
|
||||
name, err := excelize.ColumnNumberToName(idx + 1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
xlsx.SetColWidth(sheetName, name, name, float64(largestWidth))
|
||||
}
|
||||
// No errors at this point
|
||||
return nil
|
||||
}
|
||||
1835
internal/report/snapshots.go
Normal file
1835
internal/report/snapshots.go
Normal file
File diff suppressed because it is too large
Load Diff
80
internal/secrets/secrets.go
Normal file
80
internal/secrets/secrets.go
Normal file
@@ -0,0 +1,80 @@
|
||||
package secrets
|
||||
|
||||
import (
|
||||
"crypto/aes"
|
||||
"crypto/cipher"
|
||||
"crypto/rand"
|
||||
"encoding/base64"
|
||||
"io"
|
||||
"log/slog"
|
||||
)
|
||||
|
||||
type Secrets struct {
|
||||
Logger *slog.Logger
|
||||
EncryptionKey []byte
|
||||
}
|
||||
|
||||
func New(logger *slog.Logger, key []byte) *Secrets {
|
||||
return &Secrets{
|
||||
Logger: logger,
|
||||
EncryptionKey: key,
|
||||
}
|
||||
}
|
||||
|
||||
// Encrypt function that encrypts data using AES256-GCM and returns base64 encoded ciphertext
|
||||
func (s *Secrets) Encrypt(plainText []byte) (string, error) {
|
||||
block, err := aes.NewCipher(s.EncryptionKey)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Create a new GCM cipher
|
||||
gcm, err := cipher.NewGCM(block)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Create a nonce
|
||||
nonce := make([]byte, gcm.NonceSize())
|
||||
if _, err := io.ReadFull(rand.Reader, nonce); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Encrypt the plaintext using AES256-GCM
|
||||
cipherText := gcm.Seal(nonce, nonce, plainText, nil)
|
||||
|
||||
// Return the base64 encoded ciphertext
|
||||
return base64.StdEncoding.EncodeToString(cipherText), nil
|
||||
}
|
||||
|
||||
// Decrypt function that decrypts base64 encoded AES256-GCM ciphertext
|
||||
func (s *Secrets) Decrypt(base64CipherText string) ([]byte, error) {
|
||||
// Decode the base64 ciphertext
|
||||
cipherText, err := base64.StdEncoding.DecodeString(base64CipherText)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
block, err := aes.NewCipher(s.EncryptionKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create a new GCM cipher
|
||||
gcm, err := cipher.NewGCM(block)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Extract the nonce from the ciphertext
|
||||
nonceSize := gcm.NonceSize()
|
||||
nonce, cipherText := cipherText[:nonceSize], cipherText[nonceSize:]
|
||||
|
||||
// Decrypt the ciphertext
|
||||
plainText, err := gcm.Open(nil, nonce, cipherText, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return plainText, nil
|
||||
}
|
||||
148
internal/settings/settings.go
Normal file
148
internal/settings/settings.go
Normal file
@@ -0,0 +1,148 @@
|
||||
package settings
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"vctp/internal/utils"
|
||||
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
type Settings struct {
|
||||
SettingsPath string
|
||||
Logger *slog.Logger
|
||||
Values *SettingsYML
|
||||
}
|
||||
|
||||
// SettingsYML struct holds various runtime data that is too cumbersome to specify via command line, eg replacement properties
|
||||
type SettingsYML struct {
|
||||
Settings struct {
|
||||
LogLevel string `yaml:"log_level"`
|
||||
LogOutput string `yaml:"log_output"`
|
||||
DatabaseDriver string `yaml:"database_driver"`
|
||||
DatabaseURL string `yaml:"database_url"`
|
||||
BindIP string `yaml:"bind_ip"`
|
||||
BindPort int `yaml:"bind_port"`
|
||||
BindDisableTLS bool `yaml:"bind_disable_tls"`
|
||||
TLSCertFilename string `yaml:"tls_cert_filename"`
|
||||
TLSKeyFilename string `yaml:"tls_key_filename"`
|
||||
VcenterUsername string `yaml:"vcenter_username"`
|
||||
VcenterPassword string `yaml:"vcenter_password"`
|
||||
VcenterInsecure bool `yaml:"vcenter_insecure"`
|
||||
VcenterEventPollingSeconds int `yaml:"vcenter_event_polling_seconds"`
|
||||
VcenterInventoryPollingSeconds int `yaml:"vcenter_inventory_polling_seconds"`
|
||||
VcenterInventorySnapshotSeconds int `yaml:"vcenter_inventory_snapshot_seconds"`
|
||||
VcenterInventoryAggregateSeconds int `yaml:"vcenter_inventory_aggregate_seconds"`
|
||||
HourlySnapshotConcurrency int `yaml:"hourly_snapshot_concurrency"`
|
||||
HourlySnapshotMaxAgeDays int `yaml:"hourly_snapshot_max_age_days"`
|
||||
DailySnapshotMaxAgeMonths int `yaml:"daily_snapshot_max_age_months"`
|
||||
SnapshotCleanupCron string `yaml:"snapshot_cleanup_cron"`
|
||||
ReportsDir string `yaml:"reports_dir"`
|
||||
HourlyJobTimeoutSeconds int `yaml:"hourly_job_timeout_seconds"`
|
||||
HourlySnapshotTimeoutSeconds int `yaml:"hourly_snapshot_timeout_seconds"`
|
||||
HourlySnapshotRetrySeconds int `yaml:"hourly_snapshot_retry_seconds"`
|
||||
HourlySnapshotMaxRetries int `yaml:"hourly_snapshot_max_retries"`
|
||||
DailyJobTimeoutSeconds int `yaml:"daily_job_timeout_seconds"`
|
||||
MonthlyJobTimeoutSeconds int `yaml:"monthly_job_timeout_seconds"`
|
||||
MonthlyAggregationGranularity string `yaml:"monthly_aggregation_granularity"`
|
||||
MonthlyAggregationCron string `yaml:"monthly_aggregation_cron"`
|
||||
CleanupJobTimeoutSeconds int `yaml:"cleanup_job_timeout_seconds"`
|
||||
TenantsToFilter []string `yaml:"tenants_to_filter"`
|
||||
NodeChargeClusters []string `yaml:"node_charge_clusters"`
|
||||
SrmActiveActiveVms []string `yaml:"srm_activeactive_vms"`
|
||||
VcenterAddresses []string `yaml:"vcenter_addresses"`
|
||||
PostgresWorkMemMB int `yaml:"postgres_work_mem_mb"`
|
||||
} `yaml:"settings"`
|
||||
}
|
||||
|
||||
func New(logger *slog.Logger, settingsPath string) *Settings {
|
||||
return &Settings{
|
||||
SettingsPath: utils.GetFilePath(settingsPath),
|
||||
Logger: logger,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Settings) ReadYMLSettings() error {
|
||||
// Create config structure
|
||||
var settings SettingsYML
|
||||
|
||||
// Check for empty filename
|
||||
if len(s.SettingsPath) == 0 {
|
||||
return errors.New("settings file path not specified")
|
||||
}
|
||||
|
||||
//path := utils.GetFilePath(settingsPath)
|
||||
|
||||
// Open config file
|
||||
file, err := os.Open(s.SettingsPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to open settings file : '%s'", err)
|
||||
}
|
||||
s.Logger.Debug("Opened settings yaml file", "file_path", s.SettingsPath)
|
||||
defer file.Close()
|
||||
|
||||
// Init new YAML decode
|
||||
d := yaml.NewDecoder(file)
|
||||
|
||||
// Start YAML decoding from file
|
||||
if err := d.Decode(&settings); err != nil {
|
||||
return fmt.Errorf("unable to decode settings file : '%s'", err)
|
||||
}
|
||||
|
||||
// Avoid logging sensitive fields (e.g., credentials).
|
||||
redacted := settings
|
||||
redacted.Settings.VcenterPassword = "REDACTED"
|
||||
s.Logger.Debug("Updating settings", "settings", redacted)
|
||||
s.Values = &settings
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Settings) WriteYMLSettings() error {
|
||||
if s.Values == nil {
|
||||
return errors.New("settings are not loaded")
|
||||
}
|
||||
if len(s.SettingsPath) == 0 {
|
||||
return errors.New("settings file path not specified")
|
||||
}
|
||||
|
||||
data, err := yaml.Marshal(s.Values)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to encode settings file: %w", err)
|
||||
}
|
||||
|
||||
mode := os.FileMode(0o644)
|
||||
if info, err := os.Stat(s.SettingsPath); err == nil {
|
||||
mode = info.Mode().Perm()
|
||||
}
|
||||
|
||||
dir := filepath.Dir(s.SettingsPath)
|
||||
tmp, err := os.CreateTemp(dir, "vctp-settings-*.yml")
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to create temp settings file: %w", err)
|
||||
}
|
||||
tmpName := tmp.Name()
|
||||
defer func() {
|
||||
_ = os.Remove(tmpName)
|
||||
}()
|
||||
|
||||
if _, err := tmp.Write(data); err != nil {
|
||||
_ = tmp.Close()
|
||||
return fmt.Errorf("unable to write temp settings file: %w", err)
|
||||
}
|
||||
if err := tmp.Chmod(mode); err != nil {
|
||||
_ = tmp.Close()
|
||||
return fmt.Errorf("unable to set temp settings permissions: %w", err)
|
||||
}
|
||||
if err := tmp.Close(); err != nil {
|
||||
return fmt.Errorf("unable to close temp settings file: %w", err)
|
||||
}
|
||||
if err := os.Rename(tmpName, s.SettingsPath); err != nil {
|
||||
return fmt.Errorf("unable to replace settings file: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
32
internal/tasks/aggregateCommon.go
Normal file
32
internal/tasks/aggregateCommon.go
Normal file
@@ -0,0 +1,32 @@
|
||||
package tasks
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
"vctp/db"
|
||||
)
|
||||
|
||||
// runAggregateJob wraps aggregation cron jobs with timeout, migration check, and circuit breaker semantics.
|
||||
func (c *CronTask) runAggregateJob(ctx context.Context, jobName string, timeout time.Duration, fn func(context.Context) error) (err error) {
|
||||
jobCtx := ctx
|
||||
if timeout > 0 {
|
||||
var cancel context.CancelFunc
|
||||
jobCtx, cancel = context.WithTimeout(ctx, timeout)
|
||||
defer cancel()
|
||||
}
|
||||
tracker := NewCronTracker(c.Database)
|
||||
done, skip, err := tracker.Start(jobCtx, jobName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skip {
|
||||
return nil
|
||||
}
|
||||
defer func() { done(err) }()
|
||||
|
||||
if err := db.CheckMigrationState(jobCtx, c.Database.DB()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return fn(jobCtx)
|
||||
}
|
||||
188
internal/tasks/cronstatus.go
Normal file
188
internal/tasks/cronstatus.go
Normal file
@@ -0,0 +1,188 @@
|
||||
package tasks
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"time"
|
||||
"vctp/db"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
)
|
||||
|
||||
func NewCronTracker(database db.Database) *CronTracker {
|
||||
return &CronTracker{
|
||||
db: database,
|
||||
bindType: sqlx.BindType(database.DB().DriverName()),
|
||||
}
|
||||
}
|
||||
|
||||
// ClearAllInProgress resets any stuck in-progress flags (e.g., after crashes).
|
||||
func (c *CronTracker) ClearAllInProgress(ctx context.Context) error {
|
||||
if err := c.ensureTable(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
_, err := c.db.DB().ExecContext(ctx, `UPDATE cron_status SET in_progress = FALSE`)
|
||||
return err
|
||||
}
|
||||
|
||||
// ClearStale resets in_progress for a specific job if it has been running longer than maxAge.
|
||||
func (c *CronTracker) ClearStale(ctx context.Context, job string, maxAge time.Duration) error {
|
||||
if err := c.ensureTable(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
driver := strings.ToLower(c.db.DB().DriverName())
|
||||
var query string
|
||||
switch driver {
|
||||
case "sqlite":
|
||||
query = `
|
||||
UPDATE cron_status
|
||||
SET in_progress = FALSE
|
||||
WHERE job_name = ?
|
||||
AND in_progress = TRUE
|
||||
AND started_at > 0
|
||||
AND (strftime('%s','now') - started_at) > ?
|
||||
`
|
||||
case "pgx", "postgres":
|
||||
query = `
|
||||
UPDATE cron_status
|
||||
SET in_progress = FALSE
|
||||
WHERE job_name = $1
|
||||
AND in_progress = TRUE
|
||||
AND started_at > 0
|
||||
AND (EXTRACT(EPOCH FROM now())::BIGINT - started_at) > $2
|
||||
`
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
_, err := c.db.DB().ExecContext(ctx, query, job, int64(maxAge.Seconds()))
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *CronTracker) ensureTable(ctx context.Context) error {
|
||||
conn := c.db.DB()
|
||||
driver := conn.DriverName()
|
||||
var ddl string
|
||||
switch driver {
|
||||
case "pgx", "postgres":
|
||||
ddl = `
|
||||
CREATE TABLE IF NOT EXISTS cron_status (
|
||||
job_name TEXT PRIMARY KEY,
|
||||
started_at BIGINT NOT NULL,
|
||||
ended_at BIGINT NOT NULL,
|
||||
duration_ms BIGINT NOT NULL,
|
||||
last_error TEXT,
|
||||
in_progress BOOLEAN NOT NULL DEFAULT FALSE
|
||||
);`
|
||||
default:
|
||||
ddl = `
|
||||
CREATE TABLE IF NOT EXISTS cron_status (
|
||||
job_name TEXT PRIMARY KEY,
|
||||
started_at BIGINT NOT NULL,
|
||||
ended_at BIGINT NOT NULL,
|
||||
duration_ms BIGINT NOT NULL,
|
||||
last_error TEXT,
|
||||
in_progress BOOLEAN NOT NULL DEFAULT FALSE
|
||||
);`
|
||||
}
|
||||
_, err := conn.ExecContext(ctx, ddl)
|
||||
return err
|
||||
}
|
||||
|
||||
// Start marks a job as in-progress; returns a completion callback and whether to skip because it's already running.
|
||||
func (c *CronTracker) Start(ctx context.Context, job string) (func(error), bool, error) {
|
||||
if err := c.ensureTable(ctx); err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
conn := c.db.DB()
|
||||
now := time.Now().Unix()
|
||||
|
||||
tx, err := conn.BeginTxx(ctx, nil)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
var inProgress bool
|
||||
query := sqlx.Rebind(c.bindType, `SELECT in_progress FROM cron_status WHERE job_name = ?`)
|
||||
err = tx.QueryRowContext(ctx, query, job).Scan(&inProgress)
|
||||
if err != nil {
|
||||
// no row, insert
|
||||
if err := upsertCron(tx, c.bindType, job, now, false); err != nil {
|
||||
tx.Rollback()
|
||||
return nil, false, err
|
||||
}
|
||||
} else {
|
||||
if inProgress {
|
||||
tx.Rollback()
|
||||
return nil, true, nil
|
||||
}
|
||||
if err := markCronStart(tx, c.bindType, job, now); err != nil {
|
||||
tx.Rollback()
|
||||
return nil, false, err
|
||||
}
|
||||
}
|
||||
|
||||
if err := tx.Commit(); err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
done := func(runErr error) {
|
||||
_ = c.finish(context.Background(), job, now, runErr)
|
||||
}
|
||||
return done, false, nil
|
||||
}
|
||||
|
||||
func (c *CronTracker) finish(ctx context.Context, job string, startedAt int64, runErr error) error {
|
||||
conn := c.db.DB()
|
||||
duration := time.Since(time.Unix(startedAt, 0)).Milliseconds()
|
||||
tx, err := conn.BeginTxx(ctx, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
lastErr := ""
|
||||
if runErr != nil {
|
||||
lastErr = runErr.Error()
|
||||
}
|
||||
err = upsertCronFinish(tx, c.bindType, job, duration, lastErr)
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return err
|
||||
}
|
||||
return tx.Commit()
|
||||
}
|
||||
|
||||
func upsertCron(tx *sqlx.Tx, bindType int, job string, startedAt int64, inProgress bool) error {
|
||||
query := `
|
||||
INSERT INTO cron_status (job_name, started_at, ended_at, duration_ms, last_error, in_progress)
|
||||
VALUES (?, ?, 0, 0, NULL, ?)
|
||||
ON CONFLICT (job_name) DO UPDATE SET started_at = excluded.started_at, in_progress = excluded.in_progress, ended_at = excluded.ended_at, duration_ms = excluded.duration_ms, last_error = excluded.last_error
|
||||
`
|
||||
_, err := tx.Exec(sqlx.Rebind(bindType, query), job, startedAt, inProgress)
|
||||
return err
|
||||
}
|
||||
|
||||
func markCronStart(tx *sqlx.Tx, bindType int, job string, startedAt int64) error {
|
||||
query := `
|
||||
UPDATE cron_status
|
||||
SET started_at = ?, in_progress = TRUE, ended_at = 0, duration_ms = 0, last_error = NULL
|
||||
WHERE job_name = ?
|
||||
`
|
||||
_, err := tx.Exec(sqlx.Rebind(bindType, query), startedAt, job)
|
||||
return err
|
||||
}
|
||||
|
||||
func upsertCronFinish(tx *sqlx.Tx, bindType int, job string, durationMS int64, lastErr string) error {
|
||||
query := `
|
||||
UPDATE cron_status
|
||||
SET ended_at = ?, duration_ms = ?, last_error = ?, in_progress = FALSE
|
||||
WHERE job_name = ?
|
||||
`
|
||||
_, err := tx.Exec(sqlx.Rebind(bindType, query), time.Now().Unix(), durationMS, nullableString(lastErr), job)
|
||||
return err
|
||||
}
|
||||
|
||||
func nullableString(s string) interface{} {
|
||||
if s == "" {
|
||||
return nil
|
||||
}
|
||||
return s
|
||||
}
|
||||
1182
internal/tasks/dailyAggregate.go
Normal file
1182
internal/tasks/dailyAggregate.go
Normal file
File diff suppressed because it is too large
Load Diff
191
internal/tasks/inventoryDatabase.go
Normal file
191
internal/tasks/inventoryDatabase.go
Normal file
@@ -0,0 +1,191 @@
|
||||
package tasks
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"vctp/db"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
)
|
||||
|
||||
func insertHourlyCache(ctx context.Context, dbConn *sqlx.DB, rows []InventorySnapshotRow) error {
|
||||
if len(rows) == 0 {
|
||||
return nil
|
||||
}
|
||||
if err := db.EnsureVmHourlyStats(ctx, dbConn); err != nil {
|
||||
return err
|
||||
}
|
||||
driver := strings.ToLower(dbConn.DriverName())
|
||||
conflict := ""
|
||||
verb := "INSERT INTO"
|
||||
if driver == "sqlite" {
|
||||
verb = "INSERT OR REPLACE INTO"
|
||||
} else {
|
||||
conflict = ` ON CONFLICT ("Vcenter","VmId","SnapshotTime") DO UPDATE SET
|
||||
"VmUuid"=EXCLUDED."VmUuid",
|
||||
"Name"=EXCLUDED."Name",
|
||||
"CreationTime"=EXCLUDED."CreationTime",
|
||||
"DeletionTime"=EXCLUDED."DeletionTime",
|
||||
"ResourcePool"=EXCLUDED."ResourcePool",
|
||||
"Datacenter"=EXCLUDED."Datacenter",
|
||||
"Cluster"=EXCLUDED."Cluster",
|
||||
"Folder"=EXCLUDED."Folder",
|
||||
"ProvisionedDisk"=EXCLUDED."ProvisionedDisk",
|
||||
"VcpuCount"=EXCLUDED."VcpuCount",
|
||||
"RamGB"=EXCLUDED."RamGB",
|
||||
"IsTemplate"=EXCLUDED."IsTemplate",
|
||||
"PoweredOn"=EXCLUDED."PoweredOn",
|
||||
"SrmPlaceholder"=EXCLUDED."SrmPlaceholder"`
|
||||
}
|
||||
|
||||
cols := []string{
|
||||
"SnapshotTime", "Vcenter", "VmId", "VmUuid", "Name", "CreationTime", "DeletionTime", "ResourcePool",
|
||||
"Datacenter", "Cluster", "Folder", "ProvisionedDisk", "VcpuCount", "RamGB", "IsTemplate", "PoweredOn", "SrmPlaceholder",
|
||||
}
|
||||
bind := sqlx.BindType(dbConn.DriverName())
|
||||
placeholders := strings.TrimRight(strings.Repeat("?, ", len(cols)), ", ")
|
||||
stmtText := fmt.Sprintf(`%s vm_hourly_stats ("%s") VALUES (%s)%s`, verb, strings.Join(cols, `","`), placeholders, conflict)
|
||||
stmtText = sqlx.Rebind(bind, stmtText)
|
||||
|
||||
tx, err := dbConn.BeginTxx(ctx, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
stmt, err := tx.PreparexContext(ctx, stmtText)
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return err
|
||||
}
|
||||
defer stmt.Close()
|
||||
|
||||
for _, r := range rows {
|
||||
args := []interface{}{
|
||||
r.SnapshotTime, r.Vcenter, r.VmId, r.VmUuid, r.Name, r.CreationTime, r.DeletionTime, r.ResourcePool,
|
||||
r.Datacenter, r.Cluster, r.Folder, r.ProvisionedDisk, r.VcpuCount, r.RamGB, r.IsTemplate, r.PoweredOn, r.SrmPlaceholder,
|
||||
}
|
||||
if _, err := stmt.ExecContext(ctx, args...); err != nil {
|
||||
tx.Rollback()
|
||||
return err
|
||||
}
|
||||
}
|
||||
return tx.Commit()
|
||||
}
|
||||
|
||||
func insertHourlyBatch(ctx context.Context, dbConn *sqlx.DB, tableName string, rows []InventorySnapshotRow) error {
|
||||
if len(rows) == 0 {
|
||||
return nil
|
||||
}
|
||||
if err := db.EnsureVmHourlyStats(ctx, dbConn); err != nil {
|
||||
return err
|
||||
}
|
||||
tx, err := dbConn.BeginTxx(ctx, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
baseCols := []string{
|
||||
"InventoryId", "Name", "Vcenter", "VmId", "EventKey", "CloudId", "CreationTime", "DeletionTime",
|
||||
"ResourcePool", "Datacenter", "Cluster", "Folder", "ProvisionedDisk", "VcpuCount",
|
||||
"RamGB", "IsTemplate", "PoweredOn", "SrmPlaceholder", "VmUuid", "SnapshotTime",
|
||||
}
|
||||
bind := sqlx.BindType(dbConn.DriverName())
|
||||
buildStmt := func(cols []string) (*sqlx.Stmt, error) {
|
||||
colList := `"` + strings.Join(cols, `", "`) + `"`
|
||||
placeholders := strings.TrimRight(strings.Repeat("?, ", len(cols)), ", ")
|
||||
return tx.PreparexContext(ctx, sqlx.Rebind(bind, fmt.Sprintf(`INSERT INTO %s (%s) VALUES (%s)`, tableName, colList, placeholders)))
|
||||
}
|
||||
|
||||
stmt, err := buildStmt(baseCols)
|
||||
if err != nil {
|
||||
// Fallback for legacy tables that still have IsPresent.
|
||||
withLegacy := append(append([]string{}, baseCols...), "IsPresent")
|
||||
stmt, err = buildStmt(withLegacy)
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return err
|
||||
}
|
||||
defer stmt.Close()
|
||||
for _, row := range rows {
|
||||
args := []interface{}{
|
||||
row.InventoryId,
|
||||
row.Name,
|
||||
row.Vcenter,
|
||||
row.VmId,
|
||||
row.EventKey,
|
||||
row.CloudId,
|
||||
row.CreationTime,
|
||||
row.DeletionTime,
|
||||
row.ResourcePool,
|
||||
row.Datacenter,
|
||||
row.Cluster,
|
||||
row.Folder,
|
||||
row.ProvisionedDisk,
|
||||
row.VcpuCount,
|
||||
row.RamGB,
|
||||
row.IsTemplate,
|
||||
row.PoweredOn,
|
||||
row.SrmPlaceholder,
|
||||
row.VmUuid,
|
||||
row.SnapshotTime,
|
||||
"TRUE",
|
||||
}
|
||||
if _, err := stmt.ExecContext(ctx, args...); err != nil {
|
||||
tx.Rollback()
|
||||
return err
|
||||
}
|
||||
}
|
||||
return tx.Commit()
|
||||
}
|
||||
defer stmt.Close()
|
||||
|
||||
for _, row := range rows {
|
||||
args := []interface{}{
|
||||
row.InventoryId,
|
||||
row.Name,
|
||||
row.Vcenter,
|
||||
row.VmId,
|
||||
row.EventKey,
|
||||
row.CloudId,
|
||||
row.CreationTime,
|
||||
row.DeletionTime,
|
||||
row.ResourcePool,
|
||||
row.Datacenter,
|
||||
row.Cluster,
|
||||
row.Folder,
|
||||
row.ProvisionedDisk,
|
||||
row.VcpuCount,
|
||||
row.RamGB,
|
||||
row.IsTemplate,
|
||||
row.PoweredOn,
|
||||
row.SrmPlaceholder,
|
||||
row.VmUuid,
|
||||
row.SnapshotTime,
|
||||
}
|
||||
if _, err := stmt.ExecContext(ctx, args...); err != nil {
|
||||
tx.Rollback()
|
||||
return err
|
||||
}
|
||||
}
|
||||
return tx.Commit()
|
||||
}
|
||||
|
||||
func dropSnapshotTable(ctx context.Context, dbConn *sqlx.DB, table string) error {
|
||||
if _, err := db.SafeTableName(table); err != nil {
|
||||
return err
|
||||
}
|
||||
_, err := dbConn.ExecContext(ctx, fmt.Sprintf("DROP TABLE IF EXISTS %s", table))
|
||||
return err
|
||||
}
|
||||
|
||||
func clearTable(ctx context.Context, dbConn *sqlx.DB, table string) error {
|
||||
if _, err := db.SafeTableName(table); err != nil {
|
||||
return err
|
||||
}
|
||||
_, err := dbConn.ExecContext(ctx, fmt.Sprintf("DELETE FROM %s", table))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to clear table %s: %w", table, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
548
internal/tasks/inventoryHelpers.go
Normal file
548
internal/tasks/inventoryHelpers.go
Normal file
@@ -0,0 +1,548 @@
|
||||
package tasks
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"vctp/db"
|
||||
"vctp/db/queries"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
)
|
||||
|
||||
var snapshotProbeLimiter = make(chan struct{}, 1)
|
||||
|
||||
func acquireSnapshotProbe(ctx context.Context) (func(), error) {
|
||||
select {
|
||||
case snapshotProbeLimiter <- struct{}{}:
|
||||
return func() { <-snapshotProbeLimiter }, nil
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
func boolStringFromInterface(value interface{}) string {
|
||||
switch v := value.(type) {
|
||||
case nil:
|
||||
return ""
|
||||
case string:
|
||||
return v
|
||||
case []byte:
|
||||
return string(v)
|
||||
case bool:
|
||||
if v {
|
||||
return "TRUE"
|
||||
}
|
||||
return "FALSE"
|
||||
case int:
|
||||
if v != 0 {
|
||||
return "TRUE"
|
||||
}
|
||||
return "FALSE"
|
||||
case int64:
|
||||
if v != 0 {
|
||||
return "TRUE"
|
||||
}
|
||||
return "FALSE"
|
||||
default:
|
||||
return fmt.Sprint(v)
|
||||
}
|
||||
}
|
||||
|
||||
// latestHourlySnapshotBefore finds the most recent hourly snapshot table prior to the given time, skipping empty tables.
|
||||
func latestHourlySnapshotBefore(ctx context.Context, dbConn *sqlx.DB, cutoff time.Time, logger *slog.Logger) (string, error) {
|
||||
tables, err := listLatestHourlyWithRows(ctx, dbConn, "", cutoff.Unix(), 1, logger)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if len(tables) == 0 {
|
||||
return "", nil
|
||||
}
|
||||
return tables[0].Table, nil
|
||||
}
|
||||
|
||||
// parseSnapshotTime extracts the unix suffix from an inventory_hourly table name.
|
||||
func parseSnapshotTime(table string) (int64, bool) {
|
||||
const prefix = "inventory_hourly_"
|
||||
if !strings.HasPrefix(table, prefix) {
|
||||
return 0, false
|
||||
}
|
||||
ts, err := strconv.ParseInt(strings.TrimPrefix(table, prefix), 10, 64)
|
||||
if err != nil {
|
||||
return 0, false
|
||||
}
|
||||
return ts, true
|
||||
}
|
||||
|
||||
// listLatestHourlyWithRows returns recent hourly snapshot tables (ordered desc by time) that have rows, optionally filtered by vcenter.
|
||||
func listLatestHourlyWithRows(ctx context.Context, dbConn *sqlx.DB, vcenter string, beforeUnix int64, limit int, logger *slog.Logger) ([]snapshotTable, error) {
|
||||
if limit <= 0 {
|
||||
limit = 50
|
||||
}
|
||||
rows, err := dbConn.QueryxContext(ctx, `
|
||||
SELECT table_name, snapshot_time, snapshot_count
|
||||
FROM snapshot_registry
|
||||
WHERE snapshot_type = 'hourly' AND snapshot_time < ?
|
||||
ORDER BY snapshot_time DESC
|
||||
LIMIT ?
|
||||
`, beforeUnix, limit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var out []snapshotTable
|
||||
for rows.Next() {
|
||||
var name string
|
||||
var ts int64
|
||||
var count sql.NullInt64
|
||||
if scanErr := rows.Scan(&name, &ts, &count); scanErr != nil {
|
||||
continue
|
||||
}
|
||||
if err := db.ValidateTableName(name); err != nil {
|
||||
continue
|
||||
}
|
||||
if count.Valid && count.Int64 == 0 {
|
||||
if logger != nil {
|
||||
logger.Debug("skipping snapshot table with zero count", "table", name, "snapshot_time", ts, "vcenter", vcenter)
|
||||
}
|
||||
continue
|
||||
}
|
||||
probed := false
|
||||
var probeErr error
|
||||
probeTimeout := false
|
||||
// If count is known and >0, trust it; if NULL, accept optimistically to avoid heavy probes.
|
||||
hasRows := !count.Valid || count.Int64 > 0
|
||||
start := time.Now()
|
||||
if vcenter != "" && hasRows {
|
||||
probed = true
|
||||
probeCtx, cancel := context.WithTimeout(ctx, 2*time.Second)
|
||||
release, err := acquireSnapshotProbe(probeCtx)
|
||||
if err != nil {
|
||||
probeErr = err
|
||||
hasRows = false
|
||||
cancel()
|
||||
} else {
|
||||
vrows, qerr := querySnapshotRows(probeCtx, dbConn, name, []string{"VmId"}, `"Vcenter" = ? LIMIT 1`, vcenter)
|
||||
if qerr == nil {
|
||||
hasRows = vrows.Next()
|
||||
vrows.Close()
|
||||
} else {
|
||||
probeErr = qerr
|
||||
hasRows = false
|
||||
}
|
||||
release()
|
||||
cancel()
|
||||
}
|
||||
probeTimeout = errors.Is(probeErr, context.DeadlineExceeded) || errors.Is(probeErr, context.Canceled)
|
||||
}
|
||||
elapsed := time.Since(start)
|
||||
if logger != nil {
|
||||
logger.Debug("evaluated snapshot table", "table", name, "snapshot_time", ts, "snapshot_count", count, "probed", probed, "has_rows", hasRows, "elapsed", elapsed, "vcenter", vcenter, "probe_error", probeErr, "probe_timeout", probeTimeout)
|
||||
}
|
||||
if !hasRows {
|
||||
continue
|
||||
}
|
||||
out = append(out, snapshotTable{Table: name, Time: ts, Count: count})
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// SnapshotTooSoon reports whether the gap between prev and curr is significantly shorter than expected.
|
||||
func SnapshotTooSoon(prevUnix, currUnix int64, expectedSeconds int64) bool {
|
||||
if prevUnix == 0 || currUnix == 0 || expectedSeconds <= 0 {
|
||||
return false
|
||||
}
|
||||
return currUnix-prevUnix < expectedSeconds
|
||||
}
|
||||
|
||||
// querySnapshotRows builds a SELECT with proper rebind for the given table/columns/where.
|
||||
func querySnapshotRows(ctx context.Context, dbConn *sqlx.DB, table string, columns []string, where string, args ...interface{}) (*sqlx.Rows, error) {
|
||||
if err := db.ValidateTableName(table); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
colExpr := "*"
|
||||
if len(columns) > 0 {
|
||||
colExpr = `"` + strings.Join(columns, `","`) + `"`
|
||||
}
|
||||
query := fmt.Sprintf(`SELECT %s FROM %s`, colExpr, table)
|
||||
if strings.TrimSpace(where) != "" {
|
||||
query = fmt.Sprintf(`%s WHERE %s`, query, where)
|
||||
}
|
||||
query = sqlx.Rebind(sqlx.BindType(dbConn.DriverName()), query)
|
||||
return dbConn.QueryxContext(ctx, query, args...)
|
||||
}
|
||||
|
||||
func updateDeletionTimeInSnapshot(ctx context.Context, dbConn *sqlx.DB, table, vcenter, vmID, vmUUID, name string, deletionUnix int64) (int64, error) {
|
||||
if err := db.ValidateTableName(table); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
matchColumn := ""
|
||||
matchValue := ""
|
||||
switch {
|
||||
case vmID != "":
|
||||
matchColumn = "VmId"
|
||||
matchValue = vmID
|
||||
case vmUUID != "":
|
||||
matchColumn = "VmUuid"
|
||||
matchValue = vmUUID
|
||||
case name != "":
|
||||
matchColumn = "Name"
|
||||
matchValue = name
|
||||
default:
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
query := fmt.Sprintf(`UPDATE %s SET "DeletionTime" = ? WHERE "Vcenter" = ? AND "%s" = ? AND ("DeletionTime" IS NULL OR "DeletionTime" = 0 OR "DeletionTime" > ?)`, table, matchColumn)
|
||||
query = sqlx.Rebind(sqlx.BindType(dbConn.DriverName()), query)
|
||||
result, err := dbConn.ExecContext(ctx, query, deletionUnix, vcenter, matchValue, deletionUnix)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
rowsAffected, err := result.RowsAffected()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return rowsAffected, nil
|
||||
}
|
||||
|
||||
func updateDeletionTimeInHourlyCache(ctx context.Context, dbConn *sqlx.DB, vcenter, vmID, vmUUID, name string, snapshotUnix, deletionUnix int64) (int64, error) {
|
||||
if snapshotUnix <= 0 {
|
||||
return 0, nil
|
||||
}
|
||||
matchColumn := ""
|
||||
matchValue := ""
|
||||
switch {
|
||||
case vmID != "":
|
||||
matchColumn = "VmId"
|
||||
matchValue = vmID
|
||||
case vmUUID != "":
|
||||
matchColumn = "VmUuid"
|
||||
matchValue = vmUUID
|
||||
case name != "":
|
||||
matchColumn = "Name"
|
||||
matchValue = name
|
||||
default:
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
query := fmt.Sprintf(`UPDATE vm_hourly_stats SET "DeletionTime" = ? WHERE "Vcenter" = ? AND "SnapshotTime" = ? AND "%s" = ? AND ("DeletionTime" IS NULL OR "DeletionTime" = 0 OR "DeletionTime" > ?)`, matchColumn)
|
||||
query = sqlx.Rebind(sqlx.BindType(dbConn.DriverName()), query)
|
||||
result, err := dbConn.ExecContext(ctx, query, deletionUnix, vcenter, snapshotUnix, matchValue, deletionUnix)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
rowsAffected, err := result.RowsAffected()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return rowsAffected, nil
|
||||
}
|
||||
|
||||
// markMissingFromPrevious marks VMs that were present in the previous snapshot but missing now.
|
||||
func (c *CronTask) markMissingFromPrevious(ctx context.Context, dbConn *sqlx.DB, prevTable string, vcenter string, snapshotTime time.Time,
|
||||
currentByID map[string]InventorySnapshotRow, currentByUuid map[string]struct{}, currentByName map[string]struct{},
|
||||
invByID map[string]queries.Inventory, invByUuid map[string]queries.Inventory, invByName map[string]queries.Inventory) (int, bool) {
|
||||
|
||||
if err := db.ValidateTableName(prevTable); err != nil {
|
||||
return 0, false
|
||||
}
|
||||
|
||||
type prevRow struct {
|
||||
VmId sql.NullString `db:"VmId"`
|
||||
VmUuid sql.NullString `db:"VmUuid"`
|
||||
Name string `db:"Name"`
|
||||
Cluster sql.NullString `db:"Cluster"`
|
||||
Datacenter sql.NullString `db:"Datacenter"`
|
||||
DeletionTime sql.NullInt64 `db:"DeletionTime"`
|
||||
}
|
||||
|
||||
rows, err := querySnapshotRows(ctx, dbConn, prevTable, []string{"VmId", "VmUuid", "Name", "Cluster", "Datacenter", "DeletionTime"}, `"Vcenter" = ?`, vcenter)
|
||||
if err != nil {
|
||||
c.Logger.Warn("failed to read previous snapshot for deletion detection", "error", err, "table", prevTable, "vcenter", vcenter)
|
||||
return 0, false
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
missing := 0
|
||||
tableUpdated := false
|
||||
for rows.Next() {
|
||||
var r prevRow
|
||||
if err := rows.StructScan(&r); err != nil {
|
||||
continue
|
||||
}
|
||||
vmID := r.VmId.String
|
||||
uuid := r.VmUuid.String
|
||||
name := r.Name
|
||||
cluster := r.Cluster.String
|
||||
|
||||
found := false
|
||||
if vmID != "" {
|
||||
if _, ok := currentByID[vmID]; ok {
|
||||
found = true
|
||||
}
|
||||
}
|
||||
if !found && uuid != "" {
|
||||
if _, ok := currentByUuid[uuid]; ok {
|
||||
found = true
|
||||
}
|
||||
}
|
||||
if !found && name != "" {
|
||||
if _, ok := currentByName[name]; ok {
|
||||
found = true
|
||||
}
|
||||
}
|
||||
// If the name is missing but UUID+Cluster still exists in inventory/current, treat it as present (rename, not delete).
|
||||
if !found && uuid != "" && cluster != "" {
|
||||
if inv, ok := invByUuid[uuid]; ok && strings.EqualFold(inv.Cluster.String, cluster) {
|
||||
found = true
|
||||
}
|
||||
}
|
||||
if found {
|
||||
continue
|
||||
}
|
||||
|
||||
var inv queries.Inventory
|
||||
var ok bool
|
||||
if vmID != "" {
|
||||
inv, ok = invByID[vmID]
|
||||
}
|
||||
if !ok && uuid != "" {
|
||||
inv, ok = invByUuid[uuid]
|
||||
}
|
||||
if !ok && name != "" {
|
||||
inv, ok = invByName[name]
|
||||
}
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
delTime := inv.DeletionTime
|
||||
if !delTime.Valid {
|
||||
delTime = sql.NullInt64{Int64: snapshotTime.Unix(), Valid: true}
|
||||
if err := c.Database.Queries().InventoryMarkDeleted(ctx, queries.InventoryMarkDeletedParams{
|
||||
DeletionTime: delTime,
|
||||
VmId: inv.VmId,
|
||||
DatacenterName: inv.Datacenter,
|
||||
}); err != nil {
|
||||
c.Logger.Warn("failed to mark inventory record deleted from previous snapshot", "error", err, "vm_id", inv.VmId.String)
|
||||
}
|
||||
}
|
||||
// Also update lifecycle cache so deletion time is available for rollups.
|
||||
vmUUID := ""
|
||||
if inv.VmUuid.Valid {
|
||||
vmUUID = inv.VmUuid.String
|
||||
}
|
||||
if err := db.MarkVmDeletedWithDetails(ctx, dbConn, vcenter, inv.VmId.String, vmUUID, inv.Name, inv.Cluster.String, delTime.Int64); err != nil {
|
||||
c.Logger.Warn("failed to mark lifecycle cache deleted from previous snapshot", "error", err, "vm_id", inv.VmId.String, "vm_uuid", vmUUID, "vcenter", vcenter)
|
||||
}
|
||||
if rowsAffected, err := updateDeletionTimeInSnapshot(ctx, dbConn, prevTable, vcenter, inv.VmId.String, vmUUID, inv.Name, delTime.Int64); err != nil {
|
||||
c.Logger.Warn("failed to update hourly snapshot deletion time", "error", err, "table", prevTable, "vm_id", inv.VmId.String, "vm_uuid", vmUUID, "vcenter", vcenter)
|
||||
} else if rowsAffected > 0 {
|
||||
tableUpdated = true
|
||||
c.Logger.Debug("updated hourly snapshot deletion time", "table", prevTable, "vm_id", inv.VmId.String, "vm_uuid", vmUUID, "vcenter", vcenter, "deletion_time", delTime.Int64)
|
||||
if snapUnix, ok := parseSnapshotTime(prevTable); ok {
|
||||
if cacheRows, err := updateDeletionTimeInHourlyCache(ctx, dbConn, vcenter, inv.VmId.String, vmUUID, inv.Name, snapUnix, delTime.Int64); err != nil {
|
||||
c.Logger.Warn("failed to update hourly cache deletion time", "error", err, "snapshot_time", snapUnix, "vm_id", inv.VmId.String, "vm_uuid", vmUUID, "vcenter", vcenter)
|
||||
} else if cacheRows > 0 {
|
||||
c.Logger.Debug("updated hourly cache deletion time", "snapshot_time", snapUnix, "vm_id", inv.VmId.String, "vm_uuid", vmUUID, "vcenter", vcenter, "deletion_time", delTime.Int64)
|
||||
}
|
||||
}
|
||||
}
|
||||
c.Logger.Debug("Detected VM missing compared to previous snapshot", "name", inv.Name, "vm_id", inv.VmId.String, "vm_uuid", inv.VmUuid.String, "vcenter", vcenter, "snapshot_time", snapshotTime, "prev_table", prevTable)
|
||||
missing++
|
||||
}
|
||||
|
||||
return missing, tableUpdated
|
||||
}
|
||||
|
||||
// countNewFromPrevious returns how many VMs are present in the current snapshot but not in the previous snapshot.
|
||||
func countNewFromPrevious(ctx context.Context, dbConn *sqlx.DB, prevTable string, vcenter string, current map[string]InventorySnapshotRow) int {
|
||||
if err := db.ValidateTableName(prevTable); err != nil {
|
||||
return len(current)
|
||||
}
|
||||
query := fmt.Sprintf(`SELECT "VmId","VmUuid","Name" FROM %s WHERE "Vcenter" = ?`, prevTable)
|
||||
query = sqlx.Rebind(sqlx.BindType(dbConn.DriverName()), query)
|
||||
|
||||
rows, err := dbConn.QueryxContext(ctx, query, vcenter)
|
||||
if err != nil {
|
||||
return len(current)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
prevIDs := make(map[string]struct{})
|
||||
prevUUIDs := make(map[string]struct{})
|
||||
prevNames := make(map[string]struct{})
|
||||
for rows.Next() {
|
||||
var vmID, vmUUID, name string
|
||||
if scanErr := rows.Scan(&vmID, &vmUUID, &name); scanErr != nil {
|
||||
continue
|
||||
}
|
||||
if vmID != "" {
|
||||
prevIDs[vmID] = struct{}{}
|
||||
}
|
||||
if vmUUID != "" {
|
||||
prevUUIDs[vmUUID] = struct{}{}
|
||||
}
|
||||
if name != "" {
|
||||
prevNames[name] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
newCount := 0
|
||||
for _, cur := range current {
|
||||
id := cur.VmId.String
|
||||
uuid := cur.VmUuid.String
|
||||
name := cur.Name
|
||||
if id != "" {
|
||||
if _, ok := prevIDs[id]; ok {
|
||||
continue
|
||||
}
|
||||
}
|
||||
if uuid != "" {
|
||||
if _, ok := prevUUIDs[uuid]; ok {
|
||||
continue
|
||||
}
|
||||
}
|
||||
if name != "" {
|
||||
if _, ok := prevNames[name]; ok {
|
||||
continue
|
||||
}
|
||||
}
|
||||
newCount++
|
||||
}
|
||||
return newCount
|
||||
}
|
||||
|
||||
// listNewFromPrevious returns the rows present now but not in the previous snapshot.
|
||||
func listNewFromPrevious(ctx context.Context, dbConn *sqlx.DB, prevTable string, vcenter string, current map[string]InventorySnapshotRow) []InventorySnapshotRow {
|
||||
if err := db.ValidateTableName(prevTable); err != nil {
|
||||
all := make([]InventorySnapshotRow, 0, len(current))
|
||||
for _, cur := range current {
|
||||
all = append(all, cur)
|
||||
}
|
||||
return all
|
||||
}
|
||||
query := fmt.Sprintf(`SELECT "VmId","VmUuid","Name" FROM %s WHERE "Vcenter" = ?`, prevTable)
|
||||
query = sqlx.Rebind(sqlx.BindType(dbConn.DriverName()), query)
|
||||
|
||||
rows, err := dbConn.QueryxContext(ctx, query, vcenter)
|
||||
if err != nil {
|
||||
all := make([]InventorySnapshotRow, 0, len(current))
|
||||
for _, cur := range current {
|
||||
all = append(all, cur)
|
||||
}
|
||||
return all
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
prevIDs := make(map[string]struct{})
|
||||
prevUUIDs := make(map[string]struct{})
|
||||
prevNames := make(map[string]struct{})
|
||||
for rows.Next() {
|
||||
var vmID, vmUUID, name string
|
||||
if scanErr := rows.Scan(&vmID, &vmUUID, &name); scanErr != nil {
|
||||
continue
|
||||
}
|
||||
if vmID != "" {
|
||||
prevIDs[vmID] = struct{}{}
|
||||
}
|
||||
if vmUUID != "" {
|
||||
prevUUIDs[vmUUID] = struct{}{}
|
||||
}
|
||||
if name != "" {
|
||||
prevNames[name] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
newRows := make([]InventorySnapshotRow, 0)
|
||||
for _, cur := range current {
|
||||
id := cur.VmId.String
|
||||
uuid := cur.VmUuid.String
|
||||
name := cur.Name
|
||||
if id != "" {
|
||||
if _, ok := prevIDs[id]; ok {
|
||||
continue
|
||||
}
|
||||
}
|
||||
if uuid != "" {
|
||||
if _, ok := prevUUIDs[uuid]; ok {
|
||||
continue
|
||||
}
|
||||
}
|
||||
if name != "" {
|
||||
if _, ok := prevNames[name]; ok {
|
||||
continue
|
||||
}
|
||||
}
|
||||
newRows = append(newRows, cur)
|
||||
}
|
||||
return newRows
|
||||
}
|
||||
|
||||
// findVMInHourlySnapshots searches recent hourly snapshot tables for a VM by ID for the given vCenter.
|
||||
// extraTables are searched first (e.g., known previous snapshot tables).
|
||||
func findVMInHourlySnapshots(ctx context.Context, dbConn *sqlx.DB, vcenter string, vmID string, extraTables ...string) (InventorySnapshotRow, string, bool) {
|
||||
if vmID == "" {
|
||||
return InventorySnapshotRow{}, "", false
|
||||
}
|
||||
// Use a short timeout to avoid hanging if the DB is busy.
|
||||
ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// First search any explicit tables provided.
|
||||
for _, table := range extraTables {
|
||||
if table == "" {
|
||||
continue
|
||||
}
|
||||
if err := db.ValidateTableName(table); err != nil {
|
||||
continue
|
||||
}
|
||||
query := fmt.Sprintf(`SELECT "VmId","VmUuid","Name","Datacenter","Cluster" FROM %s WHERE "Vcenter" = ? AND "VmId" = ? LIMIT 1`, table)
|
||||
query = sqlx.Rebind(sqlx.BindType(dbConn.DriverName()), query)
|
||||
var row InventorySnapshotRow
|
||||
if err := dbConn.QueryRowxContext(ctx, query, vcenter, vmID).Scan(&row.VmId, &row.VmUuid, &row.Name, &row.Datacenter, &row.Cluster); err == nil {
|
||||
return row, table, true
|
||||
}
|
||||
}
|
||||
|
||||
// Try a handful of most recent hourly tables from the registry.
|
||||
rows, err := dbConn.QueryxContext(ctx, `
|
||||
SELECT table_name
|
||||
FROM snapshot_registry
|
||||
WHERE snapshot_type = 'hourly'
|
||||
ORDER BY snapshot_time DESC
|
||||
LIMIT 20
|
||||
`)
|
||||
if err != nil {
|
||||
return InventorySnapshotRow{}, "", false
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
checked := 0
|
||||
for rows.Next() {
|
||||
var table string
|
||||
if scanErr := rows.Scan(&table); scanErr != nil {
|
||||
continue
|
||||
}
|
||||
if err := db.ValidateTableName(table); err != nil {
|
||||
continue
|
||||
}
|
||||
query := fmt.Sprintf(`SELECT "VmId","VmUuid","Name","Datacenter","Cluster" FROM %s WHERE "Vcenter" = ? AND "VmId" = ? LIMIT 1`, table)
|
||||
query = sqlx.Rebind(sqlx.BindType(dbConn.DriverName()), query)
|
||||
var row InventorySnapshotRow
|
||||
if err := dbConn.QueryRowxContext(ctx, query, vcenter, vmID).Scan(&row.VmId, &row.VmUuid, &row.Name, &row.Datacenter, &row.Cluster); err == nil {
|
||||
return row, table, true
|
||||
}
|
||||
checked++
|
||||
if checked >= 10 { // limit work
|
||||
break
|
||||
}
|
||||
}
|
||||
return InventorySnapshotRow{}, "", false
|
||||
}
|
||||
290
internal/tasks/inventoryLifecycle.go
Normal file
290
internal/tasks/inventoryLifecycle.go
Normal file
@@ -0,0 +1,290 @@
|
||||
package tasks
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"log/slog"
|
||||
"time"
|
||||
|
||||
"vctp/db"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
)
|
||||
|
||||
// presenceKeys builds lookup keys for vm presence comparison.
|
||||
func presenceKeys(vmID, vmUUID, name string) []string {
|
||||
keys := make([]string, 0, 3)
|
||||
if vmID != "" {
|
||||
keys = append(keys, "id:"+vmID)
|
||||
}
|
||||
if vmUUID != "" {
|
||||
keys = append(keys, "uuid:"+vmUUID)
|
||||
}
|
||||
if name != "" {
|
||||
keys = append(keys, "name:"+name)
|
||||
}
|
||||
return keys
|
||||
}
|
||||
|
||||
// backfillLifecycleDeletionsToday looks for VMs in the lifecycle cache that are not in the current inventory,
|
||||
// have no DeletedAt, and determines their deletion time from today's hourly snapshots, optionally checking the next snapshot (next day) to confirm.
|
||||
// It returns any hourly snapshot tables that were updated with deletion times.
|
||||
func backfillLifecycleDeletionsToday(ctx context.Context, logger *slog.Logger, dbConn *sqlx.DB, vcenter string, snapshotTime time.Time, present map[string]InventorySnapshotRow) ([]string, error) {
|
||||
dayStart := truncateDate(snapshotTime)
|
||||
dayEnd := dayStart.Add(24 * time.Hour)
|
||||
|
||||
candidates, err := loadLifecycleCandidates(ctx, dbConn, vcenter, present)
|
||||
if err != nil || len(candidates) == 0 {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tables, err := listHourlyTablesForDay(ctx, dbConn, dayStart, dayEnd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(tables) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
nextPresence := make(map[string]struct{})
|
||||
if nextTable, nextErr := nextSnapshotAfter(ctx, dbConn, dayEnd, vcenter); nextErr == nil && nextTable != "" {
|
||||
nextPresence = loadPresenceKeys(ctx, dbConn, nextTable, vcenter)
|
||||
}
|
||||
|
||||
updatedTables := make(map[string]struct{})
|
||||
for i := range candidates {
|
||||
cand := &candidates[i]
|
||||
deletion, firstMiss, lastSeenTable := findDeletionInTables(ctx, dbConn, tables, vcenter, cand)
|
||||
if deletion == 0 && len(nextPresence) > 0 && firstMiss > 0 {
|
||||
if !isPresent(nextPresence, *cand) {
|
||||
// Single miss at end of day, confirmed by next-day absence.
|
||||
deletion = firstMiss
|
||||
logger.Debug("cross-day deletion inferred from next snapshot", "vcenter", vcenter, "vm_id", cand.vmID, "vm_uuid", cand.vmUUID, "name", cand.name, "deletion", deletion)
|
||||
}
|
||||
}
|
||||
if deletion > 0 {
|
||||
if err := db.MarkVmDeletedWithDetails(ctx, dbConn, vcenter, cand.vmID, cand.vmUUID, cand.name, cand.cluster, deletion); err != nil {
|
||||
logger.Warn("lifecycle backfill mark deleted failed", "vcenter", vcenter, "vm_id", cand.vmID, "vm_uuid", cand.vmUUID, "name", cand.name, "cluster", cand.cluster, "deletion", deletion, "error", err)
|
||||
continue
|
||||
}
|
||||
if lastSeenTable != "" {
|
||||
if rowsAffected, err := updateDeletionTimeInSnapshot(ctx, dbConn, lastSeenTable, vcenter, cand.vmID, cand.vmUUID, cand.name, deletion); err != nil {
|
||||
logger.Warn("lifecycle backfill failed to update hourly snapshot deletion time", "vcenter", vcenter, "vm_id", cand.vmID, "vm_uuid", cand.vmUUID, "name", cand.name, "cluster", cand.cluster, "table", lastSeenTable, "deletion", deletion, "error", err)
|
||||
} else if rowsAffected > 0 {
|
||||
updatedTables[lastSeenTable] = struct{}{}
|
||||
logger.Debug("lifecycle backfill updated hourly snapshot deletion time", "vcenter", vcenter, "vm_id", cand.vmID, "vm_uuid", cand.vmUUID, "name", cand.name, "cluster", cand.cluster, "table", lastSeenTable, "deletion", deletion)
|
||||
if snapUnix, ok := parseSnapshotTime(lastSeenTable); ok {
|
||||
if cacheRows, err := updateDeletionTimeInHourlyCache(ctx, dbConn, vcenter, cand.vmID, cand.vmUUID, cand.name, snapUnix, deletion); err != nil {
|
||||
logger.Warn("lifecycle backfill failed to update hourly cache deletion time", "vcenter", vcenter, "vm_id", cand.vmID, "vm_uuid", cand.vmUUID, "name", cand.name, "snapshot_time", snapUnix, "deletion", deletion, "error", err)
|
||||
} else if cacheRows > 0 {
|
||||
logger.Debug("lifecycle backfill updated hourly cache deletion time", "vcenter", vcenter, "vm_id", cand.vmID, "vm_uuid", cand.vmUUID, "name", cand.name, "snapshot_time", snapUnix, "deletion", deletion)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
logger.Debug("lifecycle backfill applied", "vcenter", vcenter, "vm_id", cand.vmID, "vm_uuid", cand.vmUUID, "name", cand.name, "cluster", cand.cluster, "deletion", deletion)
|
||||
}
|
||||
}
|
||||
if len(updatedTables) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
tablesUpdated := make([]string, 0, len(updatedTables))
|
||||
for table := range updatedTables {
|
||||
tablesUpdated = append(tablesUpdated, table)
|
||||
}
|
||||
return tablesUpdated, nil
|
||||
}
|
||||
|
||||
type lifecycleCandidate struct {
|
||||
vmID string
|
||||
vmUUID string
|
||||
name string
|
||||
cluster string
|
||||
}
|
||||
|
||||
func loadLifecycleCandidates(ctx context.Context, dbConn *sqlx.DB, vcenter string, present map[string]InventorySnapshotRow) ([]lifecycleCandidate, error) {
|
||||
rows, err := dbConn.QueryxContext(ctx, `
|
||||
SELECT "VmId","VmUuid","Name","Cluster"
|
||||
FROM vm_lifecycle_cache
|
||||
WHERE "Vcenter" = ? AND ("DeletedAt" IS NULL OR "DeletedAt" = 0)
|
||||
`, vcenter)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var cands []lifecycleCandidate
|
||||
for rows.Next() {
|
||||
var vmID, vmUUID, name, cluster sql.NullString
|
||||
if scanErr := rows.Scan(&vmID, &vmUUID, &name, &cluster); scanErr != nil {
|
||||
continue
|
||||
}
|
||||
if vmID.String == "" {
|
||||
continue
|
||||
}
|
||||
if _, ok := present[vmID.String]; ok {
|
||||
continue // still present, skip
|
||||
}
|
||||
cands = append(cands, lifecycleCandidate{
|
||||
vmID: vmID.String,
|
||||
vmUUID: vmUUID.String,
|
||||
name: name.String,
|
||||
cluster: cluster.String,
|
||||
})
|
||||
}
|
||||
return cands, nil
|
||||
}
|
||||
|
||||
type snapshotTable struct {
|
||||
Table string `db:"table_name"`
|
||||
Time int64 `db:"snapshot_time"`
|
||||
Count sql.NullInt64 `db:"snapshot_count"`
|
||||
}
|
||||
|
||||
func listHourlyTablesForDay(ctx context.Context, dbConn *sqlx.DB, dayStart, dayEnd time.Time) ([]snapshotTable, error) {
|
||||
log := loggerFromCtx(ctx, nil)
|
||||
rows, err := dbConn.QueryxContext(ctx, `
|
||||
SELECT table_name, snapshot_time, snapshot_count
|
||||
FROM snapshot_registry
|
||||
WHERE snapshot_type = 'hourly' AND snapshot_time >= ? AND snapshot_time < ?
|
||||
ORDER BY snapshot_time ASC
|
||||
`, dayStart.Unix(), dayEnd.Unix())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var tables []snapshotTable
|
||||
for rows.Next() {
|
||||
var t snapshotTable
|
||||
if err := rows.StructScan(&t); err != nil {
|
||||
continue
|
||||
}
|
||||
if err := db.ValidateTableName(t.Table); err != nil {
|
||||
continue
|
||||
}
|
||||
// Trust snapshot_count if present; otherwise optimistically include to avoid long probes.
|
||||
if t.Count.Valid && t.Count.Int64 <= 0 {
|
||||
if log != nil {
|
||||
log.Debug("skipping snapshot table with zero count", "table", t.Table, "snapshot_time", t.Time)
|
||||
}
|
||||
continue
|
||||
}
|
||||
tables = append(tables, t)
|
||||
}
|
||||
return tables, nil
|
||||
}
|
||||
|
||||
func nextSnapshotAfter(ctx context.Context, dbConn *sqlx.DB, after time.Time, vcenter string) (string, error) {
|
||||
rows, err := dbConn.QueryxContext(ctx, `
|
||||
SELECT table_name
|
||||
FROM snapshot_registry
|
||||
WHERE snapshot_type = 'hourly' AND snapshot_time >= ?
|
||||
ORDER BY snapshot_time ASC
|
||||
LIMIT 1
|
||||
`, after.Unix())
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer rows.Close()
|
||||
for rows.Next() {
|
||||
var name string
|
||||
if err := rows.Scan(&name); err != nil {
|
||||
continue
|
||||
}
|
||||
if err := db.ValidateTableName(name); err != nil {
|
||||
continue
|
||||
}
|
||||
// ensure the snapshot table actually has entries for this vcenter
|
||||
vrows, qerr := querySnapshotRows(ctx, dbConn, name, []string{"VmId"}, `"Vcenter" = ? LIMIT 1`, vcenter)
|
||||
if qerr != nil {
|
||||
continue
|
||||
}
|
||||
hasVcenter := vrows.Next()
|
||||
vrows.Close()
|
||||
if hasVcenter {
|
||||
return name, nil
|
||||
}
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func loadPresenceKeys(ctx context.Context, dbConn *sqlx.DB, table, vcenter string) map[string]struct{} {
|
||||
out := make(map[string]struct{})
|
||||
rows, err := querySnapshotRows(ctx, dbConn, table, []string{"VmId", "VmUuid", "Name"}, `"Vcenter" = ?`, vcenter)
|
||||
if err != nil {
|
||||
return out
|
||||
}
|
||||
defer rows.Close()
|
||||
for rows.Next() {
|
||||
var vmId, vmUuid, name sql.NullString
|
||||
if err := rows.Scan(&vmId, &vmUuid, &name); err == nil {
|
||||
for _, k := range presenceKeys(vmId.String, vmUuid.String, name.String) {
|
||||
out[k] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func isPresent(presence map[string]struct{}, cand lifecycleCandidate) bool {
|
||||
for _, k := range presenceKeys(cand.vmID, cand.vmUUID, cand.name) {
|
||||
if _, ok := presence[k]; ok {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// findDeletionInTables walks ordered hourly tables for a vCenter and returns the first confirmed deletion time
|
||||
// (requiring two consecutive misses), the time of the first miss for cross-day handling, and the last table where
|
||||
// the VM was seen so we can backfill deletion time into that snapshot.
|
||||
func findDeletionInTables(ctx context.Context, dbConn *sqlx.DB, tables []snapshotTable, vcenter string, cand *lifecycleCandidate) (int64, int64, string) {
|
||||
var lastSeen int64
|
||||
var lastSeenTable string
|
||||
var firstMiss int64
|
||||
for i, tbl := range tables {
|
||||
rows, err := querySnapshotRows(ctx, dbConn, tbl.Table, []string{"VmId", "VmUuid", "Name", "Cluster"}, `"Vcenter" = ? AND "VmId" = ?`, vcenter, cand.vmID)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
seen := false
|
||||
if rows.Next() {
|
||||
var vmId, vmUuid, name, cluster sql.NullString
|
||||
if scanErr := rows.Scan(&vmId, &vmUuid, &name, &cluster); scanErr == nil {
|
||||
seen = true
|
||||
lastSeen = tbl.Time
|
||||
lastSeenTable = tbl.Table
|
||||
if cand.vmUUID == "" && vmUuid.Valid {
|
||||
cand.vmUUID = vmUuid.String
|
||||
}
|
||||
if cand.name == "" && name.Valid {
|
||||
cand.name = name.String
|
||||
}
|
||||
if cand.cluster == "" && cluster.Valid {
|
||||
cand.cluster = cluster.String
|
||||
}
|
||||
}
|
||||
}
|
||||
rows.Close()
|
||||
|
||||
if lastSeen > 0 && !seen && firstMiss == 0 {
|
||||
firstMiss = tbl.Time
|
||||
if i+1 < len(tables) {
|
||||
if seen2, _ := candSeenInTable(ctx, dbConn, tables[i+1].Table, vcenter, cand.vmID); !seen2 {
|
||||
return firstMiss, firstMiss, lastSeenTable
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0, firstMiss, lastSeenTable
|
||||
}
|
||||
|
||||
func candSeenInTable(ctx context.Context, dbConn *sqlx.DB, table, vcenter, vmID string) (bool, error) {
|
||||
rows, err := querySnapshotRows(ctx, dbConn, table, []string{"VmId"}, `"Vcenter" = ? AND "VmId" = ? LIMIT 1`, vcenter, vmID)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer rows.Close()
|
||||
return rows.Next(), nil
|
||||
}
|
||||
1402
internal/tasks/inventorySnapshots.go
Normal file
1402
internal/tasks/inventorySnapshots.go
Normal file
File diff suppressed because it is too large
Load Diff
431
internal/tasks/monitorVcenter.go
Normal file
431
internal/tasks/monitorVcenter.go
Normal file
@@ -0,0 +1,431 @@
|
||||
package tasks
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"strings"
|
||||
"time"
|
||||
"vctp/db/queries"
|
||||
"vctp/internal/utils"
|
||||
"vctp/internal/vcenter"
|
||||
|
||||
"github.com/vmware/govmomi/vim25/mo"
|
||||
"github.com/vmware/govmomi/vim25/types"
|
||||
)
|
||||
|
||||
// use gocron to check vcenters for VMs or updates we don't know about
|
||||
func (c *CronTask) RunVcenterPoll(ctx context.Context, logger *slog.Logger) error {
|
||||
startedAt := time.Now()
|
||||
defer func() {
|
||||
logger.Info("Vcenter poll job finished", "duration", time.Since(startedAt))
|
||||
}()
|
||||
var matchFound bool
|
||||
|
||||
// reload settings in case vcenter list has changed
|
||||
c.Settings.ReadYMLSettings()
|
||||
|
||||
for _, url := range c.Settings.Values.Settings.VcenterAddresses {
|
||||
c.Logger.Debug("connecting to vcenter", "url", url)
|
||||
vc := vcenter.New(c.Logger, c.VcCreds)
|
||||
vc.Login(url)
|
||||
|
||||
// Get list of VMs from vcenter
|
||||
vcVms, err := vc.GetAllVmReferences()
|
||||
|
||||
// Get list of VMs from inventory table
|
||||
c.Logger.Debug("Querying inventory table")
|
||||
results, err := c.Database.Queries().GetInventoryByVcenter(ctx, url)
|
||||
if err != nil {
|
||||
c.Logger.Error("Unable to query inventory table", "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
if len(results) == 0 {
|
||||
c.Logger.Error("Empty inventory results")
|
||||
return fmt.Errorf("Empty inventory results")
|
||||
}
|
||||
|
||||
// Iterate VMs from vcenter and see if they were in the database
|
||||
for _, vm := range vcVms {
|
||||
matchFound = false
|
||||
|
||||
// Skip any vCLS VMs
|
||||
if strings.HasPrefix(vm.Name(), "vCLS-") {
|
||||
//c.Logger.Debug("Skipping internal VM", "vm_name", vm.Name())
|
||||
continue
|
||||
}
|
||||
|
||||
// TODO - should we compare the UUID as well?
|
||||
for _, dbvm := range results {
|
||||
if dbvm.VmId.String == vm.Reference().Value {
|
||||
//c.Logger.Debug("Found match for VM", "vm_name", dbvm.Name, "id", dbvm.VmId.String)
|
||||
matchFound = true
|
||||
|
||||
// Get the full VM object
|
||||
vmObj, err := vc.ConvertObjToMoVM(vm)
|
||||
if err != nil {
|
||||
c.Logger.Error("Failed to find VM in vcenter", "vm_id", dbvm.VmId.String, "error", err)
|
||||
continue
|
||||
}
|
||||
|
||||
if vmObj.Config == nil {
|
||||
c.Logger.Error("VM has no config properties", "vm_id", dbvm.VmId.String, "vm_name", vmObj.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
// Check that this is definitely the right VM
|
||||
if dbvm.VmUuid.String == vmObj.Config.Uuid {
|
||||
// TODO - compare database against current values, create update record if not matching
|
||||
err = c.UpdateVmInventory(vmObj, vc, ctx, dbvm)
|
||||
} else {
|
||||
c.Logger.Error("VM uuid doesn't match database record", "vm_name", dbvm.Name, "id", dbvm.VmId.String, "vc_uuid", vmObj.Config.Uuid, "db_uuid", dbvm.VmUuid.String)
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !matchFound {
|
||||
c.Logger.Debug("Need to add VM to inventory table", "MoRef", vm.Reference())
|
||||
vmObj, err := vc.ConvertObjToMoVM(vm)
|
||||
if err != nil {
|
||||
c.Logger.Error("Received error getting vm maangedobject", "error", err)
|
||||
continue
|
||||
}
|
||||
|
||||
// retrieve VM properties and insert into inventory
|
||||
err = c.AddVmToInventory(vmObj, vc, ctx)
|
||||
if err != nil {
|
||||
c.Logger.Error("Received error with VM add", "error", err)
|
||||
continue
|
||||
}
|
||||
|
||||
// add sleep to slow down mass VM additions
|
||||
utils.SleepWithContext(ctx, (10 * time.Millisecond))
|
||||
}
|
||||
}
|
||||
c.Logger.Debug("Finished checking vcenter", "url", url)
|
||||
_ = vc.Logout(ctx)
|
||||
}
|
||||
|
||||
c.Logger.Debug("Finished polling vcenters")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateVmInventory will compare database against current vcenter values, and create update record if not matching
|
||||
func (c *CronTask) UpdateVmInventory(vmObj *mo.VirtualMachine, vc *vcenter.Vcenter, ctx context.Context, dbVm queries.Inventory) error {
|
||||
var (
|
||||
err error
|
||||
numVcpus int32
|
||||
numRam int32
|
||||
srmPlaceholder string
|
||||
updateType string
|
||||
rpName string
|
||||
existingUpdateFound bool
|
||||
)
|
||||
|
||||
params := queries.CreateUpdateParams{
|
||||
InventoryId: sql.NullInt64{Int64: dbVm.Iid, Valid: dbVm.Iid > 0},
|
||||
}
|
||||
srmPlaceholder = "FALSE" // default value
|
||||
updateType = "unknown" // default value
|
||||
existingUpdateFound = false // default value
|
||||
numRam = vmObj.Config.Hardware.MemoryMB
|
||||
numVcpus = vmObj.Config.Hardware.NumCPU
|
||||
|
||||
if numRam != int32(dbVm.InitialRam.Int64) {
|
||||
params.NewRam = sql.NullInt64{Int64: int64(numRam), Valid: numRam > 0}
|
||||
updateType = "reconfigure"
|
||||
}
|
||||
|
||||
if numVcpus != int32(dbVm.InitialVcpus.Int64) {
|
||||
params.NewVcpus = sql.NullInt64{Int64: int64(numVcpus), Valid: numVcpus > 0}
|
||||
updateType = "reconfigure"
|
||||
}
|
||||
|
||||
// Determine if the VM is a normal VM or an SRM placeholder
|
||||
if vmObj.Config.ManagedBy != nil && vmObj.Config.ManagedBy.ExtensionKey == "com.vmware.vcDr" {
|
||||
if vmObj.Config.ManagedBy.Type == "placeholderVm" {
|
||||
c.Logger.Debug("VM is a placeholder")
|
||||
srmPlaceholder = "TRUE"
|
||||
} else {
|
||||
//c.Logger.Debug("VM is managed by SRM but not a placeholder", "details", vmObj.Config.ManagedBy)
|
||||
}
|
||||
}
|
||||
|
||||
if srmPlaceholder != dbVm.SrmPlaceholder {
|
||||
c.Logger.Debug("VM has changed placeholder type", "db_value", dbVm.SrmPlaceholder, "current_Value", srmPlaceholder)
|
||||
params.PlaceholderChange = sql.NullString{String: srmPlaceholder, Valid: srmPlaceholder != ""}
|
||||
if updateType == "unknown" {
|
||||
updateType = "srm"
|
||||
}
|
||||
}
|
||||
|
||||
rpName, err = vc.GetVmResourcePool(*vmObj)
|
||||
if err != nil {
|
||||
c.Logger.Error("Unable to determine resource pool name", "error", err)
|
||||
}
|
||||
if rpName != dbVm.ResourcePool.String {
|
||||
c.Logger.Debug("VM has changed resource pool", "db_value", dbVm.ResourcePool.String, "current_Value", rpName)
|
||||
params.NewResourcePool = sql.NullString{String: rpName, Valid: rpName != ""}
|
||||
if updateType == "unknown" {
|
||||
updateType = "move"
|
||||
}
|
||||
}
|
||||
|
||||
if updateType != "unknown" {
|
||||
// Check if we already have an existing update record for this same change
|
||||
checkParams := queries.GetVmUpdatesParams{
|
||||
InventoryId: sql.NullInt64{Int64: dbVm.Iid, Valid: dbVm.Iid > 0},
|
||||
UpdateType: updateType,
|
||||
}
|
||||
|
||||
existingUpdates, err := c.Database.Queries().GetVmUpdates(ctx, checkParams)
|
||||
if err != nil {
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
c.Logger.Debug("No update records found")
|
||||
} else {
|
||||
c.Logger.Error("Unbale to query database for vm update records", "error", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for _, u := range existingUpdates {
|
||||
// check if we already recorded this same update
|
||||
if u.UpdateType == updateType {
|
||||
switch u.UpdateType {
|
||||
case "srm":
|
||||
if u.PlaceholderChange.String == srmPlaceholder {
|
||||
c.Logger.Debug("SRM update already exists for vm", "update_value", u.PlaceholderChange.String, "inventory_id", u.InventoryId.Int64, "vm_name", u.Name.String)
|
||||
existingUpdateFound = true
|
||||
}
|
||||
case "move":
|
||||
if u.NewResourcePool.String == rpName {
|
||||
c.Logger.Debug("Resource pool update already exists for vm", "update_value", u.NewResourcePool.String, "inventory_id", u.InventoryId.Int64, "vm_name", u.Name.String)
|
||||
existingUpdateFound = true
|
||||
}
|
||||
case "reconfigure":
|
||||
if u.NewRam.Int64 == int64(numRam) || u.NewVcpus.Int64 == int64(numVcpus) {
|
||||
c.Logger.Debug("RAM/vCPU update already exists for vm", "update_ram", u.NewRam.Int64, "update_vcpu", u.NewVcpus.Int64, "inventory_id", u.InventoryId.Int64, "vm_name", u.Name.String)
|
||||
existingUpdateFound = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !existingUpdateFound {
|
||||
params.UpdateType = updateType
|
||||
updateTime := time.Now().Unix()
|
||||
params.UpdateTime = sql.NullInt64{Int64: updateTime, Valid: updateTime > 0}
|
||||
c.Logger.Info("Detected new change in VM, inserting update record into database", "update_type", updateType, "params", params)
|
||||
|
||||
result, err := c.Database.Queries().CreateUpdate(ctx, params)
|
||||
if err != nil {
|
||||
c.Logger.Error("Failed creating database record", "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
c.Logger.Debug("created database record", "insert_result", result)
|
||||
// add sleep to slow down mass VM additions
|
||||
utils.SleepWithContext(ctx, (10 * time.Millisecond))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *CronTask) AddVmToInventory(vmObject *mo.VirtualMachine, vc *vcenter.Vcenter, ctx context.Context) error {
|
||||
var (
|
||||
numVcpus int32
|
||||
numRam int32
|
||||
totalDiskGB float64
|
||||
creationTS int64
|
||||
srmPlaceholder string
|
||||
foundVmConfig bool
|
||||
isTemplate string
|
||||
poweredOn string
|
||||
folderPath string
|
||||
clusterName string
|
||||
err error
|
||||
)
|
||||
|
||||
if vmObject == nil {
|
||||
return errors.New("can't process empty vm object")
|
||||
}
|
||||
|
||||
if strings.HasPrefix(vmObject.Name, "vCLS-") {
|
||||
c.Logger.Debug("Skipping internal vCLS VM", "vm_name", vmObject.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
c.Logger.Debug("found VM")
|
||||
|
||||
/*
|
||||
if vmObject.Name == "DBRaaS_testVMTemplate" {
|
||||
c.Logger.Debug("Found problematic VM")
|
||||
//prettyPrint(vmObject)
|
||||
}
|
||||
*/
|
||||
|
||||
// calculate VM properties we want to store
|
||||
if vmObject.Config != nil {
|
||||
// Skip any template VMs
|
||||
if vmObject.Config.Template {
|
||||
c.Logger.Debug("Not adding templates to inventory")
|
||||
return nil
|
||||
} else {
|
||||
isTemplate = "FALSE"
|
||||
}
|
||||
|
||||
numRam = vmObject.Config.Hardware.MemoryMB
|
||||
numVcpus = vmObject.Config.Hardware.NumCPU
|
||||
srmPlaceholder = "FALSE" // Default assumption
|
||||
|
||||
// Calculate creation date
|
||||
if vmObject.Config.CreateDate.IsZero() {
|
||||
c.Logger.Debug("Creation date not available for this VM")
|
||||
} else {
|
||||
creationTS = vmObject.Config.CreateDate.Unix()
|
||||
}
|
||||
|
||||
// Calculate disk size
|
||||
var totalDiskBytes int64
|
||||
|
||||
// Calculate the total disk allocated in GB
|
||||
for _, device := range vmObject.Config.Hardware.Device {
|
||||
if disk, ok := device.(*types.VirtualDisk); ok {
|
||||
|
||||
// Print the filename of the backing device
|
||||
if backing, ok := disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo); ok {
|
||||
c.Logger.Debug("Adding disk", "size_bytes", disk.CapacityInBytes, "backing_file", backing.FileName)
|
||||
} else {
|
||||
c.Logger.Debug("Adding disk, unknown backing type", "size_bytes", disk.CapacityInBytes)
|
||||
}
|
||||
|
||||
totalDiskBytes += disk.CapacityInBytes
|
||||
}
|
||||
}
|
||||
totalDiskGB = float64(totalDiskBytes / 1024 / 1024 / 1024)
|
||||
c.Logger.Debug("Converted total disk size", "bytes", totalDiskBytes, "GB", totalDiskGB)
|
||||
|
||||
// Determine if the VM is a normal VM or an SRM placeholder
|
||||
if vmObject.Config.ManagedBy != nil && vmObject.Config.ManagedBy.ExtensionKey == "com.vmware.vcDr" {
|
||||
if vmObject.Config.ManagedBy.Type == "placeholderVm" {
|
||||
c.Logger.Debug("VM is a placeholder")
|
||||
srmPlaceholder = "TRUE"
|
||||
} else {
|
||||
c.Logger.Debug("VM is managed by SRM but not a placeholder", "details", vmObject.Config.ManagedBy)
|
||||
}
|
||||
}
|
||||
|
||||
// Retrieve the full folder path of the VM
|
||||
folderPath, err = vc.GetVMFolderPath(*vmObject)
|
||||
if err != nil {
|
||||
c.Logger.Error("failed to get vm folder path", "error", err)
|
||||
folderPath = ""
|
||||
} else {
|
||||
c.Logger.Debug("Found vm folder path", "folder_path", folderPath)
|
||||
}
|
||||
|
||||
foundVmConfig = true
|
||||
} else {
|
||||
c.Logger.Warn("Empty VM config")
|
||||
}
|
||||
|
||||
//c.Logger.Debug("VM has runtime data", "power_state", vmObject.Runtime.PowerState)
|
||||
if vmObject.Runtime.PowerState == "poweredOff" {
|
||||
poweredOn = "FALSE"
|
||||
} else {
|
||||
poweredOn = "TRUE"
|
||||
}
|
||||
|
||||
rpName, err := vc.GetVmResourcePool(*vmObject)
|
||||
if err != nil {
|
||||
c.Logger.Error("Unable to determine resource pool name", "error", err)
|
||||
}
|
||||
|
||||
// Get VM's host and use that to determine cluster
|
||||
//c.Logger.Debug("Checking for VM host by runtime data", "runtime", vmObject.Runtime)
|
||||
clusterName, err = vc.GetClusterFromHost(vmObject.Runtime.Host)
|
||||
if err != nil {
|
||||
c.Logger.Error("Unable to determine cluster name", "error", err)
|
||||
} else {
|
||||
c.Logger.Debug("cluster", "name", clusterName)
|
||||
}
|
||||
|
||||
dcName, err := vc.GetDatacenterForVM(*vmObject)
|
||||
if err != nil {
|
||||
c.Logger.Error("Unable to determine datacenter name", "error", err)
|
||||
} else {
|
||||
c.Logger.Debug("dc", "name", dcName)
|
||||
}
|
||||
|
||||
if foundVmConfig {
|
||||
c.Logger.Debug("Adding to Inventory table", "vm_name", vmObject.Name, "vcpus", numVcpus, "ram", numRam)
|
||||
|
||||
params := queries.CreateInventoryParams{
|
||||
Name: vmObject.Name,
|
||||
Vcenter: vc.Vurl,
|
||||
VmId: sql.NullString{String: vmObject.Reference().Value, Valid: vmObject.Reference().Value != ""},
|
||||
Datacenter: sql.NullString{String: dcName, Valid: dcName != ""},
|
||||
Cluster: sql.NullString{String: clusterName, Valid: clusterName != ""},
|
||||
CreationTime: sql.NullInt64{Int64: creationTS, Valid: creationTS > 0},
|
||||
InitialVcpus: sql.NullInt64{Int64: int64(numVcpus), Valid: numVcpus > 0},
|
||||
InitialRam: sql.NullInt64{Int64: int64(numRam), Valid: numRam > 0},
|
||||
ProvisionedDisk: sql.NullFloat64{Float64: totalDiskGB, Valid: totalDiskGB > 0},
|
||||
Folder: sql.NullString{String: folderPath, Valid: folderPath != ""},
|
||||
ResourcePool: sql.NullString{String: rpName, Valid: rpName != ""},
|
||||
VmUuid: sql.NullString{String: vmObject.Config.Uuid, Valid: vmObject.Config.Uuid != ""},
|
||||
SrmPlaceholder: srmPlaceholder,
|
||||
IsTemplate: isTemplate,
|
||||
PoweredOn: poweredOn,
|
||||
}
|
||||
|
||||
c.Logger.Debug("database params", "params", params)
|
||||
// Insert the new inventory record into the database
|
||||
result, err := c.Database.Queries().CreateInventory(ctx, params)
|
||||
if err != nil {
|
||||
c.Logger.Error("unable to perform database insert", "error", err)
|
||||
} else {
|
||||
c.Logger.Debug("created database record", "insert_result", result)
|
||||
}
|
||||
} else {
|
||||
c.Logger.Debug("Not adding to Inventory due to missing vcenter config property", "vm_name", vmObject.Name)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
/*
|
||||
// prettyPrint comes from https://gist.github.com/sfate/9d45f6c5405dc4c9bf63bf95fe6d1a7c
|
||||
func prettyPrint(args ...interface{}) {
|
||||
var caller string
|
||||
|
||||
timeNow := time.Now().Format("01-02-2006 15:04:05")
|
||||
prefix := fmt.Sprintf("[%s] %s -- ", "PrettyPrint", timeNow)
|
||||
_, fileName, fileLine, ok := runtime.Caller(1)
|
||||
|
||||
if ok {
|
||||
caller = fmt.Sprintf("%s:%d", fileName, fileLine)
|
||||
} else {
|
||||
caller = ""
|
||||
}
|
||||
|
||||
fmt.Printf("\n%s%s\n", prefix, caller)
|
||||
|
||||
if len(args) == 2 {
|
||||
label := args[0]
|
||||
value := args[1]
|
||||
|
||||
s, _ := json.MarshalIndent(value, "", "\t")
|
||||
fmt.Printf("%s%s: %s\n", prefix, label, string(s))
|
||||
} else {
|
||||
s, _ := json.MarshalIndent(args, "", "\t")
|
||||
fmt.Printf("%s%s\n", prefix, string(s))
|
||||
}
|
||||
}
|
||||
*/
|
||||
739
internal/tasks/monthlyAggregate.go
Normal file
739
internal/tasks/monthlyAggregate.go
Normal file
@@ -0,0 +1,739 @@
|
||||
package tasks
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"os"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
"vctp/db"
|
||||
"vctp/internal/metrics"
|
||||
"vctp/internal/report"
|
||||
)
|
||||
|
||||
// RunVcenterMonthlyAggregate summarizes the previous month's daily snapshots.
|
||||
func (c *CronTask) RunVcenterMonthlyAggregate(ctx context.Context, logger *slog.Logger) (err error) {
|
||||
jobTimeout := durationFromSeconds(c.Settings.Values.Settings.MonthlyJobTimeoutSeconds, 20*time.Minute)
|
||||
return c.runAggregateJob(ctx, "monthly_aggregate", jobTimeout, func(jobCtx context.Context) error {
|
||||
startedAt := time.Now()
|
||||
defer func() {
|
||||
logger.Info("Monthly summary job finished", "duration", time.Since(startedAt))
|
||||
}()
|
||||
now := time.Now()
|
||||
firstOfThisMonth := time.Date(now.Year(), now.Month(), 1, 0, 0, 0, 0, now.Location())
|
||||
targetMonth := firstOfThisMonth.AddDate(0, -1, 0)
|
||||
return c.aggregateMonthlySummary(jobCtx, targetMonth, false)
|
||||
})
|
||||
}
|
||||
|
||||
func (c *CronTask) AggregateMonthlySummary(ctx context.Context, month time.Time, force bool) error {
|
||||
return c.aggregateMonthlySummary(ctx, month, force)
|
||||
}
|
||||
|
||||
func (c *CronTask) aggregateMonthlySummary(ctx context.Context, targetMonth time.Time, force bool) error {
|
||||
jobStart := time.Now()
|
||||
if err := report.EnsureSnapshotRegistry(ctx, c.Database); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
granularity := strings.ToLower(strings.TrimSpace(c.Settings.Values.Settings.MonthlyAggregationGranularity))
|
||||
if granularity == "" {
|
||||
granularity = "hourly"
|
||||
}
|
||||
if granularity != "hourly" && granularity != "daily" {
|
||||
c.Logger.Warn("unknown monthly aggregation granularity; defaulting to hourly", "granularity", granularity)
|
||||
granularity = "hourly"
|
||||
}
|
||||
|
||||
monthStart := time.Date(targetMonth.Year(), targetMonth.Month(), 1, 0, 0, 0, 0, targetMonth.Location())
|
||||
monthEnd := monthStart.AddDate(0, 1, 0)
|
||||
dbConn := c.Database.DB()
|
||||
db.SetPostgresWorkMem(ctx, dbConn, c.Settings.Values.Settings.PostgresWorkMemMB)
|
||||
driver := strings.ToLower(dbConn.DriverName())
|
||||
useGoAgg := os.Getenv("MONTHLY_AGG_GO") == "1"
|
||||
if !useGoAgg && granularity == "hourly" && driver == "sqlite" {
|
||||
c.Logger.Warn("SQL monthly aggregation is slow on sqlite; overriding to Go path", "granularity", granularity)
|
||||
useGoAgg = true
|
||||
}
|
||||
|
||||
var snapshots []report.SnapshotRecord
|
||||
var unionColumns []string
|
||||
if granularity == "daily" {
|
||||
dailySnapshots, err := report.SnapshotRecordsWithFallback(ctx, c.Database, "daily", "inventory_daily_summary_", "20060102", monthStart, monthEnd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dailySnapshots = filterRecordsInRange(dailySnapshots, monthStart, monthEnd)
|
||||
dailySnapshots = filterSnapshotsWithRows(ctx, dbConn, dailySnapshots)
|
||||
snapshots = dailySnapshots
|
||||
unionColumns = monthlyUnionColumns
|
||||
} else {
|
||||
hourlySnapshots, err := report.SnapshotRecordsWithFallback(ctx, c.Database, "hourly", "inventory_hourly_", "epoch", monthStart, monthEnd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hourlySnapshots = filterRecordsInRange(hourlySnapshots, monthStart, monthEnd)
|
||||
hourlySnapshots = filterSnapshotsWithRows(ctx, dbConn, hourlySnapshots)
|
||||
snapshots = hourlySnapshots
|
||||
unionColumns = summaryUnionColumns
|
||||
}
|
||||
if len(snapshots) == 0 {
|
||||
return fmt.Errorf("no %s snapshot tables found for %s", granularity, targetMonth.Format("2006-01"))
|
||||
}
|
||||
|
||||
monthlyTable, err := monthlySummaryTableName(targetMonth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := db.EnsureSummaryTable(ctx, dbConn, monthlyTable); err != nil {
|
||||
return err
|
||||
}
|
||||
if rowsExist, err := db.TableHasRows(ctx, dbConn, monthlyTable); err != nil {
|
||||
return err
|
||||
} else if rowsExist && !force {
|
||||
c.Logger.Debug("Monthly summary already exists, skipping aggregation", "summary_table", monthlyTable)
|
||||
return nil
|
||||
} else if rowsExist && force {
|
||||
if err := clearTable(ctx, dbConn, monthlyTable); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Optional Go-based aggregation path.
|
||||
if useGoAgg {
|
||||
if granularity == "daily" {
|
||||
c.Logger.Debug("Using go implementation of monthly aggregation (daily)")
|
||||
if err := c.aggregateMonthlySummaryGo(ctx, monthStart, monthEnd, monthlyTable, snapshots); err != nil {
|
||||
c.Logger.Warn("go-based monthly aggregation failed, falling back to SQL path", "error", err)
|
||||
} else {
|
||||
metrics.RecordMonthlyAggregation(time.Since(jobStart), nil)
|
||||
c.Logger.Debug("Finished monthly inventory aggregation (Go path)", "summary_table", monthlyTable)
|
||||
return nil
|
||||
}
|
||||
} else if granularity == "hourly" {
|
||||
c.Logger.Debug("Using go implementation of monthly aggregation (hourly)")
|
||||
if err := c.aggregateMonthlySummaryGoHourly(ctx, monthStart, monthEnd, monthlyTable, snapshots); err != nil {
|
||||
c.Logger.Warn("go-based monthly aggregation failed, falling back to SQL path", "error", err)
|
||||
} else {
|
||||
metrics.RecordMonthlyAggregation(time.Since(jobStart), nil)
|
||||
c.Logger.Debug("Finished monthly inventory aggregation (Go path)", "summary_table", monthlyTable)
|
||||
return nil
|
||||
}
|
||||
} else {
|
||||
c.Logger.Warn("MONTHLY_AGG_GO is set but granularity is unsupported; using SQL path", "granularity", granularity)
|
||||
}
|
||||
}
|
||||
|
||||
tables := make([]string, 0, len(snapshots))
|
||||
for _, snapshot := range snapshots {
|
||||
tables = append(tables, snapshot.TableName)
|
||||
}
|
||||
unionQuery, err := buildUnionQuery(tables, unionColumns, templateExclusionFilter())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
monthlyTotals, err := db.SnapshotTotalsForUnion(ctx, dbConn, unionQuery)
|
||||
if err != nil {
|
||||
c.Logger.Warn("unable to calculate monthly totals", "error", err, "month", targetMonth.Format("2006-01"))
|
||||
} else {
|
||||
c.Logger.Info("Monthly snapshot totals",
|
||||
"month", targetMonth.Format("2006-01"),
|
||||
"vm_count", monthlyTotals.VmCount,
|
||||
"vcpu_total", monthlyTotals.VcpuTotal,
|
||||
"ram_total_gb", monthlyTotals.RamTotal,
|
||||
"disk_total_gb", monthlyTotals.DiskTotal,
|
||||
)
|
||||
}
|
||||
|
||||
var insertQuery string
|
||||
if granularity == "daily" {
|
||||
insertQuery, err = db.BuildMonthlySummaryInsert(monthlyTable, unionQuery)
|
||||
} else {
|
||||
insertQuery, err = db.BuildDailySummaryInsert(monthlyTable, unionQuery)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := dbConn.ExecContext(ctx, insertQuery); err != nil {
|
||||
c.Logger.Error("failed to aggregate monthly inventory", "error", err, "month", targetMonth.Format("2006-01"))
|
||||
return err
|
||||
}
|
||||
if applied, err := db.ApplyLifecycleDeletionToSummary(ctx, dbConn, monthlyTable, monthStart.Unix(), monthEnd.Unix()); err != nil {
|
||||
c.Logger.Warn("failed to apply lifecycle deletions to monthly summary", "error", err, "table", monthlyTable)
|
||||
} else {
|
||||
c.Logger.Info("Monthly aggregation deletion times", "source_lifecycle_cache", applied)
|
||||
}
|
||||
if err := db.UpdateSummaryPresenceByWindow(ctx, dbConn, monthlyTable, monthStart.Unix(), monthEnd.Unix()); err != nil {
|
||||
c.Logger.Warn("failed to update monthly AvgIsPresent from lifecycle window", "error", err, "table", monthlyTable)
|
||||
}
|
||||
rowCount, err := db.TableRowCount(ctx, dbConn, monthlyTable)
|
||||
if err != nil {
|
||||
c.Logger.Warn("unable to count monthly summary rows", "error", err, "table", monthlyTable)
|
||||
}
|
||||
if err := report.RegisterSnapshot(ctx, c.Database, "monthly", monthlyTable, targetMonth, rowCount); err != nil {
|
||||
c.Logger.Warn("failed to register monthly snapshot", "error", err, "table", monthlyTable)
|
||||
}
|
||||
|
||||
db.AnalyzeTableIfPostgres(ctx, dbConn, monthlyTable)
|
||||
|
||||
if err := c.generateReport(ctx, monthlyTable); err != nil {
|
||||
c.Logger.Warn("failed to generate monthly report", "error", err, "table", monthlyTable)
|
||||
metrics.RecordMonthlyAggregation(time.Since(jobStart), err)
|
||||
return err
|
||||
}
|
||||
|
||||
c.Logger.Debug("Finished monthly inventory aggregation", "summary_table", monthlyTable)
|
||||
metrics.RecordMonthlyAggregation(time.Since(jobStart), nil)
|
||||
return nil
|
||||
}
|
||||
|
||||
func monthlySummaryTableName(t time.Time) (string, error) {
|
||||
return db.SafeTableName(fmt.Sprintf("inventory_monthly_summary_%s", t.Format("200601")))
|
||||
}
|
||||
|
||||
// aggregateMonthlySummaryGoHourly aggregates hourly snapshots directly into the monthly summary table.
|
||||
func (c *CronTask) aggregateMonthlySummaryGoHourly(ctx context.Context, monthStart, monthEnd time.Time, summaryTable string, hourlySnapshots []report.SnapshotRecord) error {
|
||||
jobStart := time.Now()
|
||||
dbConn := c.Database.DB()
|
||||
|
||||
if err := clearTable(ctx, dbConn, summaryTable); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(hourlySnapshots) == 0 {
|
||||
return fmt.Errorf("no hourly snapshot tables found for %s", monthStart.Format("2006-01"))
|
||||
}
|
||||
|
||||
totalSamples := len(hourlySnapshots)
|
||||
var (
|
||||
aggMap map[dailyAggKey]*dailyAggVal
|
||||
snapTimes []int64
|
||||
)
|
||||
|
||||
if db.TableExists(ctx, dbConn, "vm_hourly_stats") {
|
||||
cacheAgg, cacheTimes, cacheErr := c.scanHourlyCache(ctx, monthStart, monthEnd)
|
||||
if cacheErr != nil {
|
||||
c.Logger.Warn("failed to use hourly cache, falling back to table scans", "error", cacheErr)
|
||||
} else if len(cacheAgg) > 0 {
|
||||
c.Logger.Debug("using hourly cache for monthly aggregation", "month", monthStart.Format("2006-01"), "snapshots", len(cacheTimes), "vm_count", len(cacheAgg))
|
||||
aggMap = cacheAgg
|
||||
snapTimes = cacheTimes
|
||||
totalSamples = len(cacheTimes)
|
||||
}
|
||||
}
|
||||
|
||||
if aggMap == nil {
|
||||
var errScan error
|
||||
aggMap, errScan = c.scanHourlyTablesParallel(ctx, hourlySnapshots)
|
||||
if errScan != nil {
|
||||
return errScan
|
||||
}
|
||||
c.Logger.Debug("scanned hourly tables for monthly aggregation", "month", monthStart.Format("2006-01"), "tables", len(hourlySnapshots), "vm_count", len(aggMap))
|
||||
if len(aggMap) == 0 {
|
||||
return fmt.Errorf("no VM records aggregated for %s", monthStart.Format("2006-01"))
|
||||
}
|
||||
|
||||
snapTimes = make([]int64, 0, len(hourlySnapshots))
|
||||
for _, snap := range hourlySnapshots {
|
||||
snapTimes = append(snapTimes, snap.SnapshotTime.Unix())
|
||||
}
|
||||
sort.Slice(snapTimes, func(i, j int) bool { return snapTimes[i] < snapTimes[j] })
|
||||
}
|
||||
|
||||
lifecycleDeletions := c.applyLifecycleDeletions(ctx, aggMap, monthStart, monthEnd)
|
||||
c.Logger.Info("Monthly aggregation deletion times", "source_lifecycle_cache", lifecycleDeletions)
|
||||
|
||||
inventoryDeletions := c.applyInventoryDeletions(ctx, aggMap, monthStart, monthEnd)
|
||||
c.Logger.Info("Monthly aggregation deletion times", "source_inventory", inventoryDeletions)
|
||||
|
||||
if len(snapTimes) > 0 {
|
||||
maxSnap := snapTimes[len(snapTimes)-1]
|
||||
inferredDeletions := 0
|
||||
for _, v := range aggMap {
|
||||
if v.deletion != 0 {
|
||||
continue
|
||||
}
|
||||
consecutiveMisses := 0
|
||||
firstMiss := int64(0)
|
||||
for _, t := range snapTimes {
|
||||
if t <= v.lastSeen {
|
||||
continue
|
||||
}
|
||||
if _, ok := v.seen[t]; ok {
|
||||
consecutiveMisses = 0
|
||||
firstMiss = 0
|
||||
continue
|
||||
}
|
||||
consecutiveMisses++
|
||||
if firstMiss == 0 {
|
||||
firstMiss = t
|
||||
}
|
||||
if consecutiveMisses >= 2 {
|
||||
v.deletion = firstMiss
|
||||
inferredDeletions++
|
||||
break
|
||||
}
|
||||
}
|
||||
if v.deletion == 0 && v.lastSeen < maxSnap && firstMiss > 0 {
|
||||
c.Logger.Debug("pending deletion inference (insufficient consecutive misses)", "vm_id", v.key.VmId, "vm_uuid", v.key.VmUuid, "name", v.key.Name, "last_seen", v.lastSeen, "first_missing_snapshot", firstMiss)
|
||||
}
|
||||
}
|
||||
c.Logger.Info("Monthly aggregation deletion times", "source_inferred", inferredDeletions)
|
||||
}
|
||||
|
||||
totalSamplesByVcenter := sampleCountsByVcenter(aggMap)
|
||||
if err := c.insertDailyAggregates(ctx, summaryTable, aggMap, totalSamples, totalSamplesByVcenter); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := db.UpdateSummaryPresenceByWindow(ctx, dbConn, summaryTable, monthStart.Unix(), monthEnd.Unix()); err != nil {
|
||||
c.Logger.Warn("failed to update monthly AvgIsPresent from lifecycle window (Go hourly)", "error", err, "table", summaryTable)
|
||||
}
|
||||
|
||||
db.AnalyzeTableIfPostgres(ctx, dbConn, summaryTable)
|
||||
rowCount, err := db.TableRowCount(ctx, dbConn, summaryTable)
|
||||
if err != nil {
|
||||
c.Logger.Warn("unable to count monthly summary rows (Go hourly)", "error", err, "table", summaryTable)
|
||||
}
|
||||
if err := report.RegisterSnapshot(ctx, c.Database, "monthly", summaryTable, monthStart, rowCount); err != nil {
|
||||
c.Logger.Warn("failed to register monthly snapshot (Go hourly)", "error", err, "table", summaryTable)
|
||||
}
|
||||
if err := c.generateReport(ctx, summaryTable); err != nil {
|
||||
c.Logger.Warn("failed to generate monthly report (Go hourly)", "error", err, "table", summaryTable)
|
||||
return err
|
||||
}
|
||||
|
||||
c.Logger.Debug("Finished monthly inventory aggregation (Go hourly)",
|
||||
"summary_table", summaryTable,
|
||||
"duration", time.Since(jobStart),
|
||||
"tables_scanned", len(hourlySnapshots),
|
||||
"rows_written", rowCount,
|
||||
"total_samples", totalSamples,
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
||||
// aggregateMonthlySummaryGo mirrors the SQL-based monthly aggregation but performs the work in Go,
|
||||
// reading daily summaries in parallel and reducing them to a single monthly summary table.
|
||||
func (c *CronTask) aggregateMonthlySummaryGo(ctx context.Context, monthStart, monthEnd time.Time, summaryTable string, dailySnapshots []report.SnapshotRecord) error {
|
||||
jobStart := time.Now()
|
||||
dbConn := c.Database.DB()
|
||||
|
||||
if err := clearTable(ctx, dbConn, summaryTable); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Build union query for lifecycle refinement after inserts.
|
||||
dailyTables := make([]string, 0, len(dailySnapshots))
|
||||
for _, snapshot := range dailySnapshots {
|
||||
dailyTables = append(dailyTables, snapshot.TableName)
|
||||
}
|
||||
unionQuery, err := buildUnionQuery(dailyTables, monthlyUnionColumns, templateExclusionFilter())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
aggMap, err := c.scanDailyTablesParallel(ctx, dailySnapshots)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(aggMap) == 0 {
|
||||
cacheAgg, cacheErr := c.scanDailyRollup(ctx, monthStart, monthEnd)
|
||||
if cacheErr == nil && len(cacheAgg) > 0 {
|
||||
aggMap = cacheAgg
|
||||
} else if cacheErr != nil {
|
||||
c.Logger.Warn("failed to read daily rollup cache; using table scan", "error", cacheErr)
|
||||
}
|
||||
}
|
||||
if len(aggMap) == 0 {
|
||||
return fmt.Errorf("no VM records aggregated for %s", monthStart.Format("2006-01"))
|
||||
}
|
||||
|
||||
if err := c.insertMonthlyAggregates(ctx, summaryTable, aggMap); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if applied, err := db.ApplyLifecycleDeletionToSummary(ctx, dbConn, summaryTable, monthStart.Unix(), monthEnd.Unix()); err != nil {
|
||||
c.Logger.Warn("failed to apply lifecycle deletions to monthly summary (Go)", "error", err, "table", summaryTable)
|
||||
} else {
|
||||
c.Logger.Info("Monthly aggregation deletion times", "source_lifecycle_cache", applied)
|
||||
}
|
||||
|
||||
if err := db.RefineCreationDeletionFromUnion(ctx, dbConn, summaryTable, unionQuery); err != nil {
|
||||
c.Logger.Warn("failed to refine creation/deletion times (monthly Go)", "error", err, "table", summaryTable)
|
||||
}
|
||||
if err := db.UpdateSummaryPresenceByWindow(ctx, dbConn, summaryTable, monthStart.Unix(), monthEnd.Unix()); err != nil {
|
||||
c.Logger.Warn("failed to update monthly AvgIsPresent from lifecycle window (Go)", "error", err, "table", summaryTable)
|
||||
}
|
||||
|
||||
db.AnalyzeTableIfPostgres(ctx, dbConn, summaryTable)
|
||||
rowCount, err := db.TableRowCount(ctx, dbConn, summaryTable)
|
||||
if err != nil {
|
||||
c.Logger.Warn("unable to count monthly summary rows", "error", err, "table", summaryTable)
|
||||
}
|
||||
if err := report.RegisterSnapshot(ctx, c.Database, "monthly", summaryTable, monthStart, rowCount); err != nil {
|
||||
c.Logger.Warn("failed to register monthly snapshot", "error", err, "table", summaryTable)
|
||||
}
|
||||
if err := c.generateReport(ctx, summaryTable); err != nil {
|
||||
c.Logger.Warn("failed to generate monthly report (Go)", "error", err, "table", summaryTable)
|
||||
return err
|
||||
}
|
||||
|
||||
c.Logger.Debug("Finished monthly inventory aggregation (Go path)", "summary_table", summaryTable, "duration", time.Since(jobStart))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *CronTask) scanDailyTablesParallel(ctx context.Context, snapshots []report.SnapshotRecord) (map[monthlyAggKey]*monthlyAggVal, error) {
|
||||
agg := make(map[monthlyAggKey]*monthlyAggVal, 1024)
|
||||
mu := sync.Mutex{}
|
||||
workers := runtime.NumCPU()
|
||||
if workers < 2 {
|
||||
workers = 2
|
||||
}
|
||||
if workers > len(snapshots) {
|
||||
workers = len(snapshots)
|
||||
}
|
||||
|
||||
jobs := make(chan report.SnapshotRecord, len(snapshots))
|
||||
wg := sync.WaitGroup{}
|
||||
for i := 0; i < workers; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for snap := range jobs {
|
||||
rows, err := c.scanDailyTable(ctx, snap)
|
||||
if err != nil {
|
||||
c.Logger.Warn("failed to scan daily summary", "table", snap.TableName, "error", err)
|
||||
continue
|
||||
}
|
||||
mu.Lock()
|
||||
for k, v := range rows {
|
||||
if existing, ok := agg[k]; ok {
|
||||
mergeMonthlyAgg(existing, v)
|
||||
} else {
|
||||
agg[k] = v
|
||||
}
|
||||
}
|
||||
mu.Unlock()
|
||||
}
|
||||
}()
|
||||
}
|
||||
for _, snap := range snapshots {
|
||||
jobs <- snap
|
||||
}
|
||||
close(jobs)
|
||||
wg.Wait()
|
||||
return agg, nil
|
||||
}
|
||||
|
||||
func mergeMonthlyAgg(dst, src *monthlyAggVal) {
|
||||
if src.creation > 0 && (dst.creation == 0 || src.creation < dst.creation) {
|
||||
dst.creation = src.creation
|
||||
}
|
||||
// If creation is unknown in all daily summaries, leave it zero for reports (VM trace handles approximation separately).
|
||||
if src.deletion > 0 && (dst.deletion == 0 || src.deletion < dst.deletion) {
|
||||
dst.deletion = src.deletion
|
||||
}
|
||||
if src.lastSnapshot.After(dst.lastSnapshot) {
|
||||
dst.lastSnapshot = src.lastSnapshot
|
||||
if src.inventoryId != 0 {
|
||||
dst.inventoryId = src.inventoryId
|
||||
}
|
||||
dst.resourcePool = src.resourcePool
|
||||
dst.datacenter = src.datacenter
|
||||
dst.cluster = src.cluster
|
||||
dst.folder = src.folder
|
||||
dst.isTemplate = src.isTemplate
|
||||
dst.poweredOn = src.poweredOn
|
||||
dst.srmPlaceholder = src.srmPlaceholder
|
||||
dst.provisioned = src.provisioned
|
||||
dst.vcpuCount = src.vcpuCount
|
||||
dst.ramGB = src.ramGB
|
||||
dst.eventKey = src.eventKey
|
||||
dst.cloudId = src.cloudId
|
||||
}
|
||||
|
||||
dst.samplesPresent += src.samplesPresent
|
||||
dst.totalSamples += src.totalSamples
|
||||
dst.sumVcpu += src.sumVcpu
|
||||
dst.sumRam += src.sumRam
|
||||
dst.sumDisk += src.sumDisk
|
||||
dst.tinWeighted += src.tinWeighted
|
||||
dst.bronzeWeighted += src.bronzeWeighted
|
||||
dst.silverWeighted += src.silverWeighted
|
||||
dst.goldWeighted += src.goldWeighted
|
||||
}
|
||||
|
||||
func (c *CronTask) scanDailyTable(ctx context.Context, snap report.SnapshotRecord) (map[monthlyAggKey]*monthlyAggVal, error) {
|
||||
dbConn := c.Database.DB()
|
||||
query := fmt.Sprintf(`
|
||||
SELECT
|
||||
"InventoryId",
|
||||
"Name","Vcenter","VmId","VmUuid","EventKey","CloudId","ResourcePool","Datacenter","Cluster","Folder",
|
||||
COALESCE("ProvisionedDisk",0) AS disk,
|
||||
COALESCE("VcpuCount",0) AS vcpu,
|
||||
COALESCE("RamGB",0) AS ram,
|
||||
COALESCE("CreationTime",0) AS creation,
|
||||
COALESCE("DeletionTime",0) AS deletion,
|
||||
COALESCE("SamplesPresent",0) AS samples_present,
|
||||
"AvgVcpuCount","AvgRamGB","AvgProvisionedDisk","AvgIsPresent",
|
||||
"PoolTinPct","PoolBronzePct","PoolSilverPct","PoolGoldPct",
|
||||
"Tin","Bronze","Silver","Gold","IsTemplate","PoweredOn","SrmPlaceholder"
|
||||
FROM %s
|
||||
`, snap.TableName)
|
||||
|
||||
rows, err := dbConn.QueryxContext(ctx, query)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
result := make(map[monthlyAggKey]*monthlyAggVal, 256)
|
||||
for rows.Next() {
|
||||
var (
|
||||
inventoryId sql.NullInt64
|
||||
name, vcenter, vmId, vmUuid string
|
||||
eventKey, cloudId sql.NullString
|
||||
resourcePool, datacenter, cluster, folder sql.NullString
|
||||
isTemplate, poweredOn, srmPlaceholder sql.NullString
|
||||
disk, avgVcpu, avgRam, avgDisk sql.NullFloat64
|
||||
avgIsPresent sql.NullFloat64
|
||||
poolTin, poolBronze, poolSilver, poolGold sql.NullFloat64
|
||||
tinPct, bronzePct, silverPct, goldPct sql.NullFloat64
|
||||
vcpu, ram sql.NullInt64
|
||||
creation, deletion sql.NullInt64
|
||||
samplesPresent sql.NullInt64
|
||||
)
|
||||
|
||||
if err := rows.Scan(
|
||||
&inventoryId,
|
||||
&name, &vcenter, &vmId, &vmUuid, &eventKey, &cloudId, &resourcePool, &datacenter, &cluster, &folder,
|
||||
&disk, &vcpu, &ram, &creation, &deletion, &samplesPresent,
|
||||
&avgVcpu, &avgRam, &avgDisk, &avgIsPresent,
|
||||
&poolTin, &poolBronze, &poolSilver, &poolGold,
|
||||
&tinPct, &bronzePct, &silverPct, &goldPct,
|
||||
&isTemplate, &poweredOn, &srmPlaceholder,
|
||||
); err != nil {
|
||||
c.Logger.Warn("failed to scan daily summary row", "table", snap.TableName, "error", err)
|
||||
continue
|
||||
}
|
||||
|
||||
templateVal := strings.TrimSpace(isTemplate.String)
|
||||
if strings.EqualFold(templateVal, "true") || templateVal == "1" {
|
||||
continue
|
||||
}
|
||||
|
||||
key := monthlyAggKey{Vcenter: vcenter, VmId: vmId, VmUuid: vmUuid, Name: name}
|
||||
agg := &monthlyAggVal{
|
||||
key: key,
|
||||
inventoryId: inventoryId.Int64,
|
||||
eventKey: eventKey.String,
|
||||
cloudId: cloudId.String,
|
||||
resourcePool: resourcePool.String,
|
||||
datacenter: datacenter.String,
|
||||
cluster: cluster.String,
|
||||
folder: folder.String,
|
||||
isTemplate: isTemplate.String,
|
||||
poweredOn: poweredOn.String,
|
||||
srmPlaceholder: srmPlaceholder.String,
|
||||
provisioned: disk.Float64,
|
||||
vcpuCount: vcpu.Int64,
|
||||
ramGB: ram.Int64,
|
||||
creation: creation.Int64,
|
||||
deletion: deletion.Int64,
|
||||
lastSnapshot: snap.SnapshotTime,
|
||||
samplesPresent: samplesPresent.Int64,
|
||||
}
|
||||
|
||||
totalSamplesDay := float64(samplesPresent.Int64)
|
||||
if avgIsPresent.Valid && avgIsPresent.Float64 > 0 {
|
||||
totalSamplesDay = float64(samplesPresent.Int64) / avgIsPresent.Float64
|
||||
}
|
||||
agg.totalSamples = totalSamplesDay
|
||||
if avgVcpu.Valid {
|
||||
agg.sumVcpu = avgVcpu.Float64 * totalSamplesDay
|
||||
}
|
||||
if avgRam.Valid {
|
||||
agg.sumRam = avgRam.Float64 * totalSamplesDay
|
||||
}
|
||||
if avgDisk.Valid {
|
||||
agg.sumDisk = avgDisk.Float64 * totalSamplesDay
|
||||
}
|
||||
if poolTin.Valid {
|
||||
agg.tinWeighted = (poolTin.Float64 / 100.0) * totalSamplesDay
|
||||
}
|
||||
if poolBronze.Valid {
|
||||
agg.bronzeWeighted = (poolBronze.Float64 / 100.0) * totalSamplesDay
|
||||
}
|
||||
if poolSilver.Valid {
|
||||
agg.silverWeighted = (poolSilver.Float64 / 100.0) * totalSamplesDay
|
||||
}
|
||||
if poolGold.Valid {
|
||||
agg.goldWeighted = (poolGold.Float64 / 100.0) * totalSamplesDay
|
||||
}
|
||||
|
||||
result[key] = agg
|
||||
}
|
||||
return result, rows.Err()
|
||||
}
|
||||
|
||||
// scanDailyRollup aggregates monthly data from vm_daily_rollup cache.
|
||||
func (c *CronTask) scanDailyRollup(ctx context.Context, start, end time.Time) (map[monthlyAggKey]*monthlyAggVal, error) {
|
||||
dbConn := c.Database.DB()
|
||||
if !db.TableExists(ctx, dbConn, "vm_daily_rollup") {
|
||||
return map[monthlyAggKey]*monthlyAggVal{}, nil
|
||||
}
|
||||
query := `
|
||||
SELECT
|
||||
"Date","Vcenter","VmId","VmUuid","Name","CreationTime","DeletionTime",
|
||||
"SamplesPresent","TotalSamples","SumVcpu","SumRam","SumDisk",
|
||||
"TinHits","BronzeHits","SilverHits","GoldHits",
|
||||
"LastResourcePool","LastDatacenter","LastCluster","LastFolder",
|
||||
"LastProvisionedDisk","LastVcpuCount","LastRamGB","IsTemplate","PoweredOn","SrmPlaceholder"
|
||||
FROM vm_daily_rollup
|
||||
WHERE "Date" >= ? AND "Date" < ?
|
||||
`
|
||||
bind := dbConn.Rebind(query)
|
||||
rows, err := dbConn.QueryxContext(ctx, bind, start.Unix(), end.Unix())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
agg := make(map[monthlyAggKey]*monthlyAggVal, 512)
|
||||
for rows.Next() {
|
||||
var (
|
||||
date sql.NullInt64
|
||||
vcenter, vmId, vmUuid, name string
|
||||
creation, deletion sql.NullInt64
|
||||
samplesPresent, totalSamples sql.NullInt64
|
||||
sumVcpu, sumRam, sumDisk sql.NullFloat64
|
||||
tinHits, bronzeHits, silverHits, goldHits sql.NullInt64
|
||||
lastPool, lastDc, lastCluster, lastFolder sql.NullString
|
||||
lastDisk, lastVcpu, lastRam sql.NullFloat64
|
||||
isTemplate, poweredOn, srmPlaceholder sql.NullString
|
||||
)
|
||||
if err := rows.Scan(
|
||||
&date, &vcenter, &vmId, &vmUuid, &name, &creation, &deletion,
|
||||
&samplesPresent, &totalSamples, &sumVcpu, &sumRam, &sumDisk,
|
||||
&tinHits, &bronzeHits, &silverHits, &goldHits,
|
||||
&lastPool, &lastDc, &lastCluster, &lastFolder,
|
||||
&lastDisk, &lastVcpu, &lastRam, &isTemplate, &poweredOn, &srmPlaceholder,
|
||||
); err != nil {
|
||||
continue
|
||||
}
|
||||
templateVal := strings.TrimSpace(isTemplate.String)
|
||||
if strings.EqualFold(templateVal, "true") || templateVal == "1" {
|
||||
continue
|
||||
}
|
||||
key := monthlyAggKey{Vcenter: vcenter, VmId: vmId, VmUuid: vmUuid, Name: name}
|
||||
val := &monthlyAggVal{
|
||||
key: key,
|
||||
resourcePool: lastPool.String,
|
||||
datacenter: lastDc.String,
|
||||
cluster: lastCluster.String,
|
||||
folder: lastFolder.String,
|
||||
isTemplate: isTemplate.String,
|
||||
poweredOn: poweredOn.String,
|
||||
srmPlaceholder: srmPlaceholder.String,
|
||||
provisioned: lastDisk.Float64,
|
||||
vcpuCount: int64(lastVcpu.Float64),
|
||||
ramGB: int64(lastRam.Float64),
|
||||
creation: creation.Int64,
|
||||
deletion: deletion.Int64,
|
||||
lastSnapshot: time.Unix(date.Int64, 0),
|
||||
samplesPresent: samplesPresent.Int64,
|
||||
totalSamples: float64(totalSamples.Int64),
|
||||
sumVcpu: sumVcpu.Float64,
|
||||
sumRam: sumRam.Float64,
|
||||
sumDisk: sumDisk.Float64,
|
||||
tinWeighted: float64(tinHits.Int64),
|
||||
bronzeWeighted: float64(bronzeHits.Int64),
|
||||
silverWeighted: float64(silverHits.Int64),
|
||||
goldWeighted: float64(goldHits.Int64),
|
||||
}
|
||||
if existing, ok := agg[key]; ok {
|
||||
mergeMonthlyAgg(existing, val)
|
||||
} else {
|
||||
agg[key] = val
|
||||
}
|
||||
}
|
||||
return agg, rows.Err()
|
||||
}
|
||||
|
||||
func (c *CronTask) insertMonthlyAggregates(ctx context.Context, summaryTable string, aggMap map[monthlyAggKey]*monthlyAggVal) error {
|
||||
dbConn := c.Database.DB()
|
||||
columns := []string{
|
||||
"InventoryId", "Name", "Vcenter", "VmId", "EventKey", "CloudId", "CreationTime", "DeletionTime",
|
||||
"ResourcePool", "Datacenter", "Cluster", "Folder", "ProvisionedDisk", "VcpuCount",
|
||||
"RamGB", "IsTemplate", "PoweredOn", "SrmPlaceholder", "VmUuid", "SamplesPresent",
|
||||
"AvgVcpuCount", "AvgRamGB", "AvgProvisionedDisk", "AvgIsPresent",
|
||||
"PoolTinPct", "PoolBronzePct", "PoolSilverPct", "PoolGoldPct",
|
||||
"Tin", "Bronze", "Silver", "Gold",
|
||||
}
|
||||
placeholders := make([]string, len(columns))
|
||||
for i := range columns {
|
||||
placeholders[i] = "?"
|
||||
}
|
||||
stmtText := fmt.Sprintf(`INSERT INTO %s (%s) VALUES (%s)`, summaryTable, strings.Join(columns, ","), strings.Join(placeholders, ","))
|
||||
stmtText = dbConn.Rebind(stmtText)
|
||||
|
||||
tx, err := dbConn.BeginTxx(ctx, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
stmt, err := tx.PreparexContext(ctx, stmtText)
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return err
|
||||
}
|
||||
defer stmt.Close()
|
||||
|
||||
for _, v := range aggMap {
|
||||
inventoryVal := sql.NullInt64{}
|
||||
if v.inventoryId != 0 {
|
||||
inventoryVal = sql.NullInt64{Int64: v.inventoryId, Valid: true}
|
||||
}
|
||||
avgVcpu := sql.NullFloat64{}
|
||||
avgRam := sql.NullFloat64{}
|
||||
avgDisk := sql.NullFloat64{}
|
||||
avgIsPresent := sql.NullFloat64{}
|
||||
tinPct := sql.NullFloat64{}
|
||||
bronzePct := sql.NullFloat64{}
|
||||
silverPct := sql.NullFloat64{}
|
||||
goldPct := sql.NullFloat64{}
|
||||
|
||||
if v.totalSamples > 0 {
|
||||
avgVcpu = sql.NullFloat64{Float64: v.sumVcpu / v.totalSamples, Valid: true}
|
||||
avgRam = sql.NullFloat64{Float64: v.sumRam / v.totalSamples, Valid: true}
|
||||
avgDisk = sql.NullFloat64{Float64: v.sumDisk / v.totalSamples, Valid: true}
|
||||
avgIsPresent = sql.NullFloat64{Float64: float64(v.samplesPresent) / v.totalSamples, Valid: true}
|
||||
tinPct = sql.NullFloat64{Float64: 100.0 * v.tinWeighted / v.totalSamples, Valid: true}
|
||||
bronzePct = sql.NullFloat64{Float64: 100.0 * v.bronzeWeighted / v.totalSamples, Valid: true}
|
||||
silverPct = sql.NullFloat64{Float64: 100.0 * v.silverWeighted / v.totalSamples, Valid: true}
|
||||
goldPct = sql.NullFloat64{Float64: 100.0 * v.goldWeighted / v.totalSamples, Valid: true}
|
||||
}
|
||||
|
||||
if _, err := stmt.ExecContext(ctx,
|
||||
inventoryVal,
|
||||
v.key.Name, v.key.Vcenter, v.key.VmId, v.eventKey, v.cloudId, v.creation, v.deletion,
|
||||
v.resourcePool, v.datacenter, v.cluster, v.folder, v.provisioned, v.vcpuCount, v.ramGB,
|
||||
v.isTemplate, v.poweredOn, v.srmPlaceholder, v.key.VmUuid, v.samplesPresent,
|
||||
avgVcpu, avgRam, avgDisk, avgIsPresent,
|
||||
tinPct, bronzePct, silverPct, goldPct,
|
||||
tinPct, bronzePct, silverPct, goldPct,
|
||||
); err != nil {
|
||||
tx.Rollback()
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return tx.Commit()
|
||||
}
|
||||
@@ -4,38 +4,44 @@ import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"log/slog"
|
||||
"strings"
|
||||
"time"
|
||||
"vctp/db"
|
||||
"vctp/db/queries"
|
||||
"vctp/internal/vcenter"
|
||||
|
||||
"github.com/vmware/govmomi/vim25/types"
|
||||
)
|
||||
|
||||
// Handler handles requests.
|
||||
type CronTask struct {
|
||||
Logger *slog.Logger
|
||||
Database db.Database
|
||||
}
|
||||
|
||||
// use gocron to check events in the Events table
|
||||
func (c *CronTask) RunVmCheck(ctx context.Context, logger *slog.Logger) error {
|
||||
startedAt := time.Now()
|
||||
defer func() {
|
||||
logger.Info("Event processing job finished", "duration", time.Since(startedAt))
|
||||
}()
|
||||
var (
|
||||
//unixTimestamp int64
|
||||
numVcpus int32
|
||||
numRam int32
|
||||
totalDiskGB float64
|
||||
srmPlaceholder int
|
||||
srmPlaceholder string
|
||||
foundVm bool
|
||||
isTemplate string
|
||||
poweredOn string
|
||||
folderPath string
|
||||
rpName string
|
||||
vmUuid string
|
||||
)
|
||||
|
||||
logger.Debug("Started Events processing", "time", time.Now())
|
||||
dateCmp := time.Now().AddDate(0, 0, -1).Unix()
|
||||
logger.Debug("Started Events processing", "time", time.Now(), "since", dateCmp)
|
||||
|
||||
// Query events table
|
||||
events, err := c.Database.Queries().ListUnprocessedEvents(ctx)
|
||||
events, err := c.Database.Queries().ListUnprocessedEvents(ctx,
|
||||
sql.NullInt64{Int64: dateCmp, Valid: dateCmp > 0})
|
||||
if err != nil {
|
||||
logger.Error("Unable to query for unprocessed events", "error", err)
|
||||
return nil // TODO - what to do with this error?
|
||||
} else {
|
||||
logger.Debug("Successfully queried for unprocessed events", "count", len(events))
|
||||
}
|
||||
|
||||
for _, evt := range events {
|
||||
@@ -44,8 +50,8 @@ func (c *CronTask) RunVmCheck(ctx context.Context, logger *slog.Logger) error {
|
||||
// TODO - get a list of unique vcenters, then process each event in batches
|
||||
// to avoid doing unnecessary login/logout of vcenter
|
||||
|
||||
c.Logger.Debug("connecting to vcenter")
|
||||
vc := vcenter.New(c.Logger)
|
||||
//c.Logger.Debug("connecting to vcenter")
|
||||
vc := vcenter.New(c.Logger, c.VcCreds)
|
||||
vc.Login(evt.Source)
|
||||
|
||||
//datacenter = evt.DatacenterName.String
|
||||
@@ -53,53 +59,121 @@ func (c *CronTask) RunVmCheck(ctx context.Context, logger *slog.Logger) error {
|
||||
|
||||
if err != nil {
|
||||
c.Logger.Error("Can't locate vm in vCenter", "vmID", evt.VmId.String, "error", err)
|
||||
continue
|
||||
} else if vmObject == nil {
|
||||
c.Logger.Debug("didn't find VM", "vm_id", evt.VmId.String)
|
||||
numRam = 0
|
||||
numVcpus = 0
|
||||
totalDiskGB = 0
|
||||
} else {
|
||||
c.Logger.Debug("found VM")
|
||||
srmPlaceholder = 0 // Default assumption
|
||||
//prettyPrint(vmObject)
|
||||
|
||||
// calculate VM properties we want to store
|
||||
if vmObject.Vm.Config != nil {
|
||||
numRam = vmObject.Vm.Config.Hardware.MemoryMB
|
||||
//numVcpus = vmObject.Vm.Config.Hardware.NumCPU * vmObject.Vm.Config.Hardware.NumCoresPerSocket
|
||||
numVcpus = vmObject.Vm.Config.Hardware.NumCPU
|
||||
// TODO - if VM name ends with -tmp or -phVm then we mark this record as processed and stop trying to find a VM that doesnt exist anymore
|
||||
|
||||
// Calculate the total disk allocated in GB
|
||||
for _, device := range vmObject.Vm.Config.Hardware.Device {
|
||||
if disk, ok := device.(*types.VirtualDisk); ok {
|
||||
totalDiskGB += float64(disk.CapacityInBytes / 1024 / 1024 / 1024) // Convert from bytes to GB
|
||||
}
|
||||
if strings.HasSuffix(evt.VmName.String, "-phVm") || strings.HasSuffix(evt.VmName.String, "-tmp") {
|
||||
c.Logger.Info("VM name indicates temporary VM, marking as processed", "vm_name", evt.VmName.String)
|
||||
|
||||
err = c.Database.Queries().UpdateEventsProcessed(ctx, evt.Eid)
|
||||
if err != nil {
|
||||
c.Logger.Error("Unable to mark this event as processed", "event_id", evt.Eid, "error", err)
|
||||
} else {
|
||||
//c.Logger.Debug("Marked event as processed", "event_id", evt.Eid)
|
||||
}
|
||||
|
||||
// Determine if the VM is a normal VM or an SRM placeholder
|
||||
if vmObject.Vm.Config.ManagedBy != nil && vmObject.Vm.Config.ManagedBy.Type == "com.vmware.vcDr" {
|
||||
c.Logger.Debug("VM ManagedBy indicates managed by SRM")
|
||||
srmPlaceholder = 1
|
||||
}
|
||||
|
||||
foundVm = true
|
||||
} else {
|
||||
c.Logger.Error("Empty VM config")
|
||||
}
|
||||
|
||||
/*
|
||||
numRam = 0
|
||||
numVcpus = 0
|
||||
totalDiskGB = 0
|
||||
isTemplate = "FALSE"
|
||||
folderPath = ""
|
||||
vmUuid = ""
|
||||
*/
|
||||
continue
|
||||
}
|
||||
err = vc.Logout()
|
||||
if err != nil {
|
||||
c.Logger.Error("unable to logout of vcenter", "error", err)
|
||||
|
||||
if strings.HasPrefix(vmObject.Name, "vCLS-") {
|
||||
c.Logger.Info("Skipping internal vCLS VM event", "vm_name", vmObject.Name)
|
||||
if err := c.Database.Queries().UpdateEventsProcessed(ctx, evt.Eid); err != nil {
|
||||
c.Logger.Error("Unable to mark vCLS event as processed", "event_id", evt.Eid, "error", err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
//c.Logger.Debug("found VM")
|
||||
srmPlaceholder = "FALSE" // Default assumption
|
||||
//prettyPrint(vmObject)
|
||||
|
||||
// calculate VM properties we want to store
|
||||
if vmObject.Config != nil {
|
||||
numRam = vmObject.Config.Hardware.MemoryMB
|
||||
numVcpus = vmObject.Config.Hardware.NumCPU
|
||||
vmUuid = vmObject.Config.Uuid
|
||||
|
||||
var totalDiskBytes int64
|
||||
|
||||
// Calculate the total disk allocated in GB
|
||||
for _, device := range vmObject.Config.Hardware.Device {
|
||||
if disk, ok := device.(*types.VirtualDisk); ok {
|
||||
|
||||
// Print the filename of the backing device
|
||||
if _, ok := disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo); ok {
|
||||
//c.Logger.Debug("Adding disk", "size_bytes", disk.CapacityInBytes, "backing_file", backing.FileName)
|
||||
} else {
|
||||
//c.Logger.Debug("Adding disk, unknown backing type", "size_bytes", disk.CapacityInBytes)
|
||||
}
|
||||
|
||||
totalDiskBytes += disk.CapacityInBytes
|
||||
//totalDiskGB += float64(disk.CapacityInBytes / 1024 / 1024 / 1024) // Convert from bytes to GB
|
||||
}
|
||||
}
|
||||
totalDiskGB = float64(totalDiskBytes / 1024 / 1024 / 1024)
|
||||
c.Logger.Debug("Converted total disk size", "bytes", totalDiskBytes, "GB", totalDiskGB)
|
||||
|
||||
// Determine if the VM is a normal VM or an SRM placeholder
|
||||
if vmObject.Config.ManagedBy != nil && vmObject.Config.ManagedBy.ExtensionKey == "com.vmware.vcDr" {
|
||||
if vmObject.Config.ManagedBy.Type == "placeholderVm" {
|
||||
c.Logger.Debug("VM is a placeholder")
|
||||
srmPlaceholder = "TRUE"
|
||||
} else {
|
||||
c.Logger.Debug("VM is managed by SRM but not a placeholder", "details", vmObject.Config.ManagedBy)
|
||||
}
|
||||
}
|
||||
|
||||
if vmObject.Config.Template {
|
||||
isTemplate = "TRUE"
|
||||
} else {
|
||||
isTemplate = "FALSE"
|
||||
}
|
||||
|
||||
// Retrieve the full folder path of the VM
|
||||
folderPath, err = vc.GetVMFolderPath(*vmObject)
|
||||
if err != nil {
|
||||
c.Logger.Error("failed to get vm folder path", "error", err)
|
||||
folderPath = ""
|
||||
} else {
|
||||
c.Logger.Debug("Found vm folder path", "folder_path", folderPath)
|
||||
}
|
||||
|
||||
// Retrieve the resource pool of the VM
|
||||
rpName, _ = vc.GetVmResourcePool(*vmObject)
|
||||
|
||||
foundVm = true
|
||||
} else {
|
||||
c.Logger.Error("Empty VM config")
|
||||
}
|
||||
|
||||
//c.Logger.Debug("VM has runtime data", "power_state", vmObject.Runtime.PowerState)
|
||||
if vmObject.Runtime.PowerState == "poweredOff" {
|
||||
poweredOn = "FALSE"
|
||||
} else {
|
||||
poweredOn = "TRUE"
|
||||
}
|
||||
|
||||
_ = vc.Logout(ctx)
|
||||
|
||||
if foundVm {
|
||||
c.Logger.Debug("Simulate adding to Inventory", "vm_name", evt.VmName.String, "vcpus", numVcpus, "ram", numRam, "dc", evt.DatacenterId.String)
|
||||
c.Logger.Debug("Adding to Inventory table", "vm_name", evt.VmName.String, "vcpus", numVcpus, "ram", numRam, "dc", evt.DatacenterId.String)
|
||||
|
||||
params := queries.CreateInventoryParams{
|
||||
Name: vmObject.Vm.Name,
|
||||
Name: vmObject.Name,
|
||||
Vcenter: evt.Source,
|
||||
EventId: sql.NullString{String: evt.CloudId, Valid: evt.CloudId != ""},
|
||||
CloudId: sql.NullString{String: evt.CloudId, Valid: evt.CloudId != ""},
|
||||
EventKey: sql.NullString{String: evt.EventKey.String, Valid: evt.EventKey.Valid},
|
||||
VmId: sql.NullString{String: evt.VmId.String, Valid: evt.VmId.Valid},
|
||||
Datacenter: sql.NullString{String: evt.DatacenterName.String, Valid: evt.DatacenterName.Valid},
|
||||
@@ -108,26 +182,29 @@ func (c *CronTask) RunVmCheck(ctx context.Context, logger *slog.Logger) error {
|
||||
InitialVcpus: sql.NullInt64{Int64: int64(numVcpus), Valid: numVcpus > 0},
|
||||
InitialRam: sql.NullInt64{Int64: int64(numRam), Valid: numRam > 0},
|
||||
ProvisionedDisk: sql.NullFloat64{Float64: totalDiskGB, Valid: totalDiskGB > 0},
|
||||
Folder: sql.NullString{String: vmObject.FolderPath, Valid: vmObject.FolderPath != ""},
|
||||
ResourcePool: sql.NullString{String: vmObject.ResourcePool, Valid: vmObject.ResourcePool != ""},
|
||||
SrmPlaceholder: sql.NullInt64{Int64: int64(srmPlaceholder), Valid: true},
|
||||
Folder: sql.NullString{String: folderPath, Valid: folderPath != ""},
|
||||
ResourcePool: sql.NullString{String: rpName, Valid: rpName != ""},
|
||||
VmUuid: sql.NullString{String: vmUuid, Valid: vmUuid != ""},
|
||||
SrmPlaceholder: srmPlaceholder,
|
||||
IsTemplate: isTemplate,
|
||||
PoweredOn: poweredOn,
|
||||
}
|
||||
|
||||
c.Logger.Debug("database params", "params", params)
|
||||
//c.Logger.Debug("database params", "params", params)
|
||||
|
||||
// Insert the new inventory record into the database
|
||||
result, err := c.Database.Queries().CreateInventory(ctx, params)
|
||||
_, err := c.Database.Queries().CreateInventory(ctx, params)
|
||||
if err != nil {
|
||||
c.Logger.Error("unable to perform database insert", "error", err)
|
||||
} else {
|
||||
c.Logger.Debug("created database record", "insert_result", result)
|
||||
//c.Logger.Debug("created database record", "insert_result", result)
|
||||
|
||||
// mark this event as processed
|
||||
err = c.Database.Queries().UpdateEventsProcessed(ctx, evt.Eid)
|
||||
if err != nil {
|
||||
c.Logger.Error("Unable to mark this event as processed", "event_id", evt.Eid, "error", err)
|
||||
} else {
|
||||
c.Logger.Debug("Marked event as processed", "event_id", evt.Eid)
|
||||
//c.Logger.Debug("Marked event as processed", "event_id", evt.Eid)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
||||
123
internal/tasks/types.go
Normal file
123
internal/tasks/types.go
Normal file
@@ -0,0 +1,123 @@
|
||||
package tasks
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"log/slog"
|
||||
"time"
|
||||
|
||||
"vctp/db"
|
||||
"vctp/internal/settings"
|
||||
"vctp/internal/vcenter"
|
||||
)
|
||||
|
||||
// CronTask stores runtime information to be used by tasks.
|
||||
type CronTask struct {
|
||||
Logger *slog.Logger
|
||||
Database db.Database
|
||||
Settings *settings.Settings
|
||||
VcCreds *vcenter.VcenterLogin
|
||||
FirstHourlySnapshotCheck bool
|
||||
}
|
||||
|
||||
// InventorySnapshotRow represents a single VM snapshot row.
|
||||
type InventorySnapshotRow struct {
|
||||
InventoryId sql.NullInt64
|
||||
Name string
|
||||
Vcenter string
|
||||
VmId sql.NullString
|
||||
EventKey sql.NullString
|
||||
CloudId sql.NullString
|
||||
CreationTime sql.NullInt64
|
||||
DeletionTime sql.NullInt64
|
||||
ResourcePool sql.NullString
|
||||
Datacenter sql.NullString
|
||||
Cluster sql.NullString
|
||||
Folder sql.NullString
|
||||
ProvisionedDisk sql.NullFloat64
|
||||
VcpuCount sql.NullInt64
|
||||
RamGB sql.NullInt64
|
||||
IsTemplate string
|
||||
PoweredOn string
|
||||
SrmPlaceholder string
|
||||
VmUuid sql.NullString
|
||||
SnapshotTime int64
|
||||
}
|
||||
|
||||
// snapshotTotals aliases DB snapshot totals for convenience.
|
||||
type snapshotTotals = db.SnapshotTotals
|
||||
|
||||
type dailyAggKey struct {
|
||||
Vcenter string
|
||||
VmId string
|
||||
VmUuid string
|
||||
Name string
|
||||
}
|
||||
|
||||
type dailyAggVal struct {
|
||||
key dailyAggKey
|
||||
resourcePool string
|
||||
datacenter string
|
||||
cluster string
|
||||
folder string
|
||||
isTemplate string
|
||||
poweredOn string
|
||||
srmPlaceholder string
|
||||
creation int64
|
||||
firstSeen int64
|
||||
lastSeen int64
|
||||
lastDisk float64
|
||||
lastVcpu int64
|
||||
lastRam int64
|
||||
sumVcpu int64
|
||||
sumRam int64
|
||||
sumDisk float64
|
||||
samples int64
|
||||
tinHits int64
|
||||
bronzeHits int64
|
||||
silverHits int64
|
||||
goldHits int64
|
||||
seen map[int64]struct{}
|
||||
deletion int64
|
||||
}
|
||||
|
||||
type monthlyAggKey struct {
|
||||
Vcenter string
|
||||
VmId string
|
||||
VmUuid string
|
||||
Name string
|
||||
}
|
||||
|
||||
type monthlyAggVal struct {
|
||||
key monthlyAggKey
|
||||
inventoryId int64
|
||||
eventKey string
|
||||
cloudId string
|
||||
resourcePool string
|
||||
datacenter string
|
||||
cluster string
|
||||
folder string
|
||||
isTemplate string
|
||||
poweredOn string
|
||||
srmPlaceholder string
|
||||
creation int64
|
||||
deletion int64
|
||||
lastSnapshot time.Time
|
||||
provisioned float64
|
||||
vcpuCount int64
|
||||
ramGB int64
|
||||
samplesPresent int64
|
||||
totalSamples float64
|
||||
sumVcpu float64
|
||||
sumRam float64
|
||||
sumDisk float64
|
||||
tinWeighted float64
|
||||
bronzeWeighted float64
|
||||
silverWeighted float64
|
||||
goldWeighted float64
|
||||
}
|
||||
|
||||
// CronTracker manages re-entry protection and status recording for cron jobs.
|
||||
type CronTracker struct {
|
||||
db db.Database
|
||||
bindType int
|
||||
}
|
||||
@@ -1,11 +1,14 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"log/slog"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
const rsaBits = 4096
|
||||
@@ -18,6 +21,10 @@ func GetFilePath(path string) string {
|
||||
|
||||
// check if filename exists
|
||||
if _, err := os.Stat(path); os.IsNotExist((err)) {
|
||||
if filepath.IsAbs(path) {
|
||||
slog.Info("File not found, using absolute path", "filename", path)
|
||||
return path
|
||||
}
|
||||
slog.Info("File not found, searching in same directory as binary", "filename", path)
|
||||
// if not, check that it exists in the same directory as the currently executing binary
|
||||
ex, err2 := os.Executable()
|
||||
@@ -53,3 +60,40 @@ func FileExists(filename string) bool {
|
||||
}
|
||||
return !info.IsDir()
|
||||
}
|
||||
|
||||
func SleepWithContext(ctx context.Context, d time.Duration) {
|
||||
timer := time.NewTimer(d)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
if !timer.Stop() {
|
||||
<-timer.C
|
||||
}
|
||||
case <-timer.C:
|
||||
}
|
||||
}
|
||||
|
||||
// EnvInt parses an environment variable into an int; returns (value, true) when set and valid.
|
||||
func EnvInt(key string) (int, bool) {
|
||||
val := os.Getenv(key)
|
||||
if val == "" {
|
||||
return 0, false
|
||||
}
|
||||
parsed, err := strconv.Atoi(val)
|
||||
if err != nil {
|
||||
return 0, false
|
||||
}
|
||||
return parsed, true
|
||||
}
|
||||
|
||||
// DurationFromEnv parses an environment variable representing seconds into a duration, defaulting when unset/invalid.
|
||||
func DurationFromEnv(key string, fallback time.Duration) time.Duration {
|
||||
val := os.Getenv(key)
|
||||
if val == "" {
|
||||
return fallback
|
||||
}
|
||||
seconds, err := strconv.ParseInt(val, 10, 64)
|
||||
if err != nil || seconds <= 0 {
|
||||
return fallback
|
||||
}
|
||||
return time.Duration(seconds) * time.Second
|
||||
}
|
||||
|
||||
@@ -3,13 +3,14 @@ package vcenter
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"log/slog"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/vmware/govmomi"
|
||||
"github.com/vmware/govmomi/event"
|
||||
"github.com/vmware/govmomi/find"
|
||||
"github.com/vmware/govmomi/object"
|
||||
"github.com/vmware/govmomi/view"
|
||||
@@ -19,44 +20,71 @@ import (
|
||||
)
|
||||
|
||||
type Vcenter struct {
|
||||
Logger *slog.Logger
|
||||
ctx context.Context
|
||||
client *govmomi.Client
|
||||
Logger *slog.Logger
|
||||
Vurl string
|
||||
ctx context.Context
|
||||
client *govmomi.Client
|
||||
credentials *VcenterLogin
|
||||
}
|
||||
|
||||
type VcenterLogin struct {
|
||||
Username string
|
||||
Password string
|
||||
Insecure bool
|
||||
}
|
||||
|
||||
type VmProperties struct {
|
||||
Vm mo.VirtualMachine
|
||||
ResourcePool string
|
||||
FolderPath string
|
||||
//Datacenter string
|
||||
}
|
||||
|
||||
var clientUserAgent = "vCTP"
|
||||
|
||||
// SetUserAgent customizes the User-Agent used when talking to vCenter.
|
||||
func SetUserAgent(ua string) {
|
||||
if strings.TrimSpace(ua) != "" {
|
||||
clientUserAgent = ua
|
||||
}
|
||||
}
|
||||
|
||||
type HostLookup struct {
|
||||
Cluster string
|
||||
Datacenter string
|
||||
}
|
||||
|
||||
type FolderLookup map[string]string
|
||||
|
||||
// New creates a new Vcenter with the given logger
|
||||
func New(logger *slog.Logger) *Vcenter {
|
||||
func New(logger *slog.Logger, creds *VcenterLogin) *Vcenter {
|
||||
|
||||
//ctx, cancel := context.WithCancel(context.Background())
|
||||
//defer cancel()
|
||||
|
||||
return &Vcenter{
|
||||
Logger: logger,
|
||||
ctx: context.Background(),
|
||||
Logger: logger,
|
||||
ctx: context.Background(),
|
||||
credentials: creds,
|
||||
}
|
||||
}
|
||||
|
||||
func (v *Vcenter) Login(vUrl string) error {
|
||||
var insecure bool
|
||||
|
||||
insecureString := os.Getenv("VCENTER_INSECURE")
|
||||
username := os.Getenv("VCENTER_USERNAME")
|
||||
password := os.Getenv("VCENTER_PASSWORD")
|
||||
|
||||
if v == nil {
|
||||
return fmt.Errorf("vcenter is nil")
|
||||
}
|
||||
if strings.TrimSpace(vUrl) == "" {
|
||||
return fmt.Errorf("vcenter URL is empty")
|
||||
}
|
||||
if v.credentials == nil {
|
||||
return fmt.Errorf("vcenter credentials are nil")
|
||||
}
|
||||
// Connect to vCenter
|
||||
u, err := soap.ParseURL(vUrl)
|
||||
if err != nil {
|
||||
log.Fatalf("Error parsing vCenter URL: %s", err)
|
||||
return fmt.Errorf("error parsing vCenter URL: %w", err)
|
||||
}
|
||||
v.Vurl = vUrl
|
||||
|
||||
u.User = url.UserPassword(username, password)
|
||||
u.User = url.UserPassword(v.credentials.Username, v.credentials.Password)
|
||||
|
||||
/*
|
||||
c, err := govmomi.NewClient(ctx, u, insecure)
|
||||
@@ -65,41 +93,614 @@ func (v *Vcenter) Login(vUrl string) error {
|
||||
}
|
||||
*/
|
||||
|
||||
if insecureString == "true" {
|
||||
insecure = true
|
||||
}
|
||||
|
||||
c, err := govmomi.NewClient(v.ctx, u, insecure)
|
||||
c, err := govmomi.NewClient(v.ctx, u, v.credentials.Insecure)
|
||||
if err != nil {
|
||||
v.Logger.Error("Unable to connect to vCenter", "error", err)
|
||||
return fmt.Errorf("unable to connect to vCenter : %s", err)
|
||||
}
|
||||
if clientUserAgent != "" {
|
||||
c.Client.UserAgent = clientUserAgent
|
||||
}
|
||||
|
||||
//defer c.Logout(v.ctx)
|
||||
|
||||
v.client = c
|
||||
|
||||
v.Logger.Debug("successfully connected to vCenter", "url", vUrl, "username", username)
|
||||
v.Logger.Debug("successfully connected to vCenter", "url", vUrl, "username", v.credentials.Username)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v *Vcenter) Logout() error {
|
||||
//v.Logger.Debug("vcenter logging out")
|
||||
|
||||
if v.ctx == nil {
|
||||
func (v *Vcenter) Logout(ctx context.Context) error {
|
||||
if ctx == nil {
|
||||
ctx = v.ctx
|
||||
}
|
||||
if ctx == nil {
|
||||
v.Logger.Warn("Nil context, unable to logout")
|
||||
return nil
|
||||
}
|
||||
|
||||
if v.client.Valid() {
|
||||
//v.Logger.Debug("vcenter client is valid. Logging out")
|
||||
return v.client.Logout(v.ctx)
|
||||
} else {
|
||||
v.Logger.Debug("vcenter client is not valid")
|
||||
return v.client.Logout(ctx)
|
||||
}
|
||||
v.Logger.Debug("vcenter client is not valid")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v *Vcenter) GetAllVmReferences() ([]*object.VirtualMachine, error) {
|
||||
var results []*object.VirtualMachine
|
||||
finder := find.NewFinder(v.client.Client, true)
|
||||
|
||||
m := view.NewManager(v.client.Client)
|
||||
|
||||
vms, err := m.CreateContainerView(v.ctx, v.client.ServiceContent.RootFolder, []string{"VirtualMachine"}, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer vms.Destroy(v.ctx)
|
||||
|
||||
// List all datacenters
|
||||
datacenters, err := finder.DatacenterList(v.ctx, "*")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list datacenters: %w", err)
|
||||
}
|
||||
|
||||
for _, dc := range datacenters {
|
||||
v.Logger.Debug("Getting VMs in", "datacenter", dc.Name())
|
||||
// Set the current datacenter
|
||||
finder.SetDatacenter(dc)
|
||||
|
||||
// Get the list of all virtual machines in the current datacenter
|
||||
vms, err := finder.VirtualMachineList(v.ctx, "*")
|
||||
if err != nil {
|
||||
v.Logger.Error("Failed to list VMs in", "datacenter", dc.Name(), "error", err)
|
||||
continue
|
||||
}
|
||||
|
||||
for _, vm := range vms {
|
||||
//vmRef := vm.Reference()
|
||||
//v.Logger.Debug("result", "vm", vm, "MoRef", vmRef, "path", vm.InventoryPath)
|
||||
results = append(results, vm)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
v.Logger.Debug("Found VM references", "count", len(results))
|
||||
|
||||
return results, err
|
||||
}
|
||||
|
||||
// GetAllVMsWithProps returns all VMs with the properties needed for snapshotting in a single property-collector call.
|
||||
func (v *Vcenter) GetAllVMsWithProps() ([]mo.VirtualMachine, error) {
|
||||
m := view.NewManager(v.client.Client)
|
||||
cv, err := m.CreateContainerView(v.ctx, v.client.ServiceContent.RootFolder, []string{"VirtualMachine"}, true)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create VM container view: %w", err)
|
||||
}
|
||||
defer cv.Destroy(v.ctx)
|
||||
|
||||
var vms []mo.VirtualMachine
|
||||
props := []string{
|
||||
"name",
|
||||
"parent",
|
||||
"config.uuid",
|
||||
"config.createDate",
|
||||
"config.hardware",
|
||||
"config.managedBy",
|
||||
"config.template",
|
||||
"runtime.powerState",
|
||||
"runtime.host",
|
||||
"resourcePool",
|
||||
}
|
||||
if err := cv.Retrieve(v.ctx, []string{"VirtualMachine"}, props, &vms); err != nil {
|
||||
return nil, fmt.Errorf("failed to retrieve VMs: %w", err)
|
||||
}
|
||||
return vms, nil
|
||||
}
|
||||
|
||||
// FindVmDeletionEvents returns a map of MoRef (VmId) to the deletion event time within the given window.
|
||||
func (v *Vcenter) FindVmDeletionEvents(ctx context.Context, begin, end time.Time) (map[string]time.Time, error) {
|
||||
return v.findVmDeletionEvents(ctx, begin, end, nil)
|
||||
}
|
||||
|
||||
// FindVmDeletionEventsForCandidates returns deletion event times for the provided VM IDs only.
|
||||
func (v *Vcenter) FindVmDeletionEventsForCandidates(ctx context.Context, begin, end time.Time, candidates []string) (map[string]time.Time, error) {
|
||||
if len(candidates) == 0 {
|
||||
return map[string]time.Time{}, nil
|
||||
}
|
||||
candidateSet := make(map[string]struct{}, len(candidates))
|
||||
for _, id := range candidates {
|
||||
if id == "" {
|
||||
continue
|
||||
}
|
||||
candidateSet[id] = struct{}{}
|
||||
}
|
||||
if len(candidateSet) == 0 {
|
||||
return map[string]time.Time{}, nil
|
||||
}
|
||||
return v.findVmDeletionEvents(ctx, begin, end, candidateSet)
|
||||
}
|
||||
|
||||
func (v *Vcenter) findVmDeletionEvents(ctx context.Context, begin, end time.Time, candidateSet map[string]struct{}) (map[string]time.Time, error) {
|
||||
result := make(map[string]time.Time)
|
||||
if v.client == nil || !v.client.Valid() {
|
||||
return result, fmt.Errorf("vcenter client is not valid")
|
||||
}
|
||||
// vCenter events are stored in UTC; normalize the query window.
|
||||
beginUTC := begin.UTC()
|
||||
endUTC := end.UTC()
|
||||
mgr := event.NewManager(v.client.Client)
|
||||
|
||||
type deletionHit struct {
|
||||
ts time.Time
|
||||
priority int
|
||||
}
|
||||
const (
|
||||
deletionPriorityRemoved = iota
|
||||
deletionPriorityVmEvent
|
||||
deletionPriorityTask
|
||||
)
|
||||
hits := make(map[string]deletionHit)
|
||||
foundCandidates := 0
|
||||
recordDeletion := func(vmID string, ts time.Time, priority int) {
|
||||
if vmID == "" {
|
||||
return
|
||||
}
|
||||
if candidateSet != nil {
|
||||
if _, ok := candidateSet[vmID]; !ok {
|
||||
return
|
||||
}
|
||||
}
|
||||
if prev, ok := hits[vmID]; !ok {
|
||||
hits[vmID] = deletionHit{ts: ts, priority: priority}
|
||||
if candidateSet != nil {
|
||||
foundCandidates++
|
||||
}
|
||||
} else if priority < prev.priority || (priority == prev.priority && ts.Before(prev.ts)) {
|
||||
hits[vmID] = deletionHit{ts: ts, priority: priority}
|
||||
}
|
||||
}
|
||||
|
||||
isDeletionMessage := func(msg string) bool {
|
||||
msg = strings.ToLower(msg)
|
||||
return strings.Contains(msg, "destroy") ||
|
||||
strings.Contains(msg, "deleted") ||
|
||||
strings.Contains(msg, "unregister") ||
|
||||
strings.Contains(msg, "removed from inventory")
|
||||
}
|
||||
|
||||
isVmDeletionTask := func(info types.TaskInfo, msg string) bool {
|
||||
id := strings.ToLower(strings.TrimSpace(info.DescriptionId))
|
||||
if id != "" {
|
||||
if strings.Contains(id, "virtualmachine") &&
|
||||
(strings.Contains(id, "destroy") || strings.Contains(id, "delete") || strings.Contains(id, "unregister")) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
name := strings.ToLower(strings.TrimSpace(info.Name))
|
||||
if name != "" {
|
||||
if (strings.Contains(name, "destroy") || strings.Contains(name, "delete") || strings.Contains(name, "unregister")) &&
|
||||
(strings.Contains(name, "virtualmachine") || strings.Contains(name, "virtual machine")) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
if msg != "" && isDeletionMessage(msg) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
processEvents := func(evts []types.BaseEvent) {
|
||||
for _, ev := range evts {
|
||||
switch e := ev.(type) {
|
||||
case *types.VmRemovedEvent:
|
||||
if e.Vm != nil {
|
||||
vmID := e.Vm.Vm.Value
|
||||
recordDeletion(vmID, e.CreatedTime, deletionPriorityRemoved)
|
||||
}
|
||||
case *types.TaskEvent:
|
||||
// Fallback for destroy task events.
|
||||
if e.Info.Entity != nil {
|
||||
vmID := e.Info.Entity.Value
|
||||
if vmID != "" && isVmDeletionTask(e.Info, e.GetEvent().FullFormattedMessage) {
|
||||
recordDeletion(vmID, e.CreatedTime, deletionPriorityTask)
|
||||
}
|
||||
}
|
||||
case *types.VmEvent:
|
||||
if e.Vm != nil {
|
||||
vmID := e.Vm.Vm.Value
|
||||
if vmID != "" && isDeletionMessage(e.GetEvent().FullFormattedMessage) {
|
||||
recordDeletion(vmID, e.CreatedTime, deletionPriorityVmEvent)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
eventPageSize = int32(1000)
|
||||
maxEventPages = 25
|
||||
)
|
||||
readCollector := func(label string, collector *event.HistoryCollector) error {
|
||||
pageCount := 0
|
||||
for {
|
||||
events, err := collector.ReadNextEvents(ctx, eventPageSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(events) == 0 {
|
||||
break
|
||||
}
|
||||
processEvents(events)
|
||||
if candidateSet != nil && foundCandidates >= len(candidateSet) {
|
||||
break
|
||||
}
|
||||
pageCount++
|
||||
if pageCount >= maxEventPages {
|
||||
if v.Logger != nil {
|
||||
v.Logger.Warn("vcenter deletion events truncated", "vcenter", v.Vurl, "label", label, "pages", pageCount, "page_size", eventPageSize, "window_start_utc", beginUTC, "window_end_utc", endUTC)
|
||||
}
|
||||
break
|
||||
}
|
||||
if len(events) < int(eventPageSize) {
|
||||
break
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// First attempt: specific deletion event types.
|
||||
disableFullMessage := false
|
||||
filter := types.EventFilterSpec{
|
||||
Time: &types.EventFilterSpecByTime{
|
||||
BeginTime: &beginUTC,
|
||||
EndTime: &endUTC,
|
||||
},
|
||||
DisableFullMessage: &disableFullMessage,
|
||||
EventTypeId: []string{
|
||||
"VmRemovedEvent",
|
||||
"TaskEvent",
|
||||
},
|
||||
}
|
||||
collector, err := mgr.CreateCollectorForEvents(ctx, filter)
|
||||
if err != nil {
|
||||
return result, fmt.Errorf("failed to create event collector: %w", err)
|
||||
}
|
||||
defer collector.Destroy(ctx)
|
||||
|
||||
if err := readCollector("primary", collector); err != nil {
|
||||
return result, fmt.Errorf("failed to read events: %w", err)
|
||||
}
|
||||
|
||||
// If nothing found, widen the filter to all event types in the window as a fallback.
|
||||
if len(hits) == 0 {
|
||||
fallbackFilter := types.EventFilterSpec{
|
||||
Time: &types.EventFilterSpecByTime{
|
||||
BeginTime: &beginUTC,
|
||||
EndTime: &endUTC,
|
||||
},
|
||||
DisableFullMessage: &disableFullMessage,
|
||||
}
|
||||
fc, err := mgr.CreateCollectorForEvents(ctx, fallbackFilter)
|
||||
if err == nil {
|
||||
defer fc.Destroy(ctx)
|
||||
if readErr := readCollector("fallback", fc); readErr != nil && v.Logger != nil {
|
||||
v.Logger.Warn("vcenter fallback event read failed", "vcenter", v.Vurl, "error", readErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for vmID, hit := range hits {
|
||||
result[vmID] = hit.ts
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (v *Vcenter) BuildHostLookup() (map[string]HostLookup, error) {
|
||||
finder := find.NewFinder(v.client.Client, true)
|
||||
datacenters, err := finder.DatacenterList(v.ctx, "*")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list datacenters: %w", err)
|
||||
}
|
||||
|
||||
lookup := make(map[string]HostLookup)
|
||||
clusterCache := make(map[string]string)
|
||||
|
||||
for _, dc := range datacenters {
|
||||
finder.SetDatacenter(dc)
|
||||
hosts, err := finder.HostSystemList(v.ctx, "*")
|
||||
if err != nil {
|
||||
v.Logger.Warn("failed to list hosts for datacenter", "datacenter", dc.Name(), "error", err)
|
||||
continue
|
||||
}
|
||||
|
||||
for _, host := range hosts {
|
||||
ref := host.Reference()
|
||||
var moHost mo.HostSystem
|
||||
if err := v.client.RetrieveOne(v.ctx, ref, []string{"parent"}, &moHost); err != nil {
|
||||
v.Logger.Warn("failed to retrieve host info", "host", host.Name(), "error", err)
|
||||
continue
|
||||
}
|
||||
|
||||
clusterName := ""
|
||||
if moHost.Parent != nil {
|
||||
if cached, ok := clusterCache[moHost.Parent.Value]; ok {
|
||||
clusterName = cached
|
||||
} else {
|
||||
var moCompute mo.ComputeResource
|
||||
if err := v.client.RetrieveOne(v.ctx, *moHost.Parent, []string{"name"}, &moCompute); err == nil {
|
||||
clusterName = moCompute.Name
|
||||
clusterCache[moHost.Parent.Value] = clusterName
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
lookup[ref.Value] = HostLookup{
|
||||
Cluster: clusterName,
|
||||
Datacenter: dc.Name(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return lookup, nil
|
||||
}
|
||||
|
||||
func (v *Vcenter) BuildFolderPathLookup() (FolderLookup, error) {
|
||||
m := view.NewManager(v.client.Client)
|
||||
folders, err := m.CreateContainerView(v.ctx, v.client.ServiceContent.RootFolder, []string{"Folder"}, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer folders.Destroy(v.ctx)
|
||||
|
||||
var results []mo.Folder
|
||||
if err := folders.Retrieve(v.ctx, []string{"Folder"}, []string{"name", "parent"}, &results); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
nameByID := make(map[string]string, len(results))
|
||||
parentByID := make(map[string]*types.ManagedObjectReference, len(results))
|
||||
for _, folder := range results {
|
||||
nameByID[folder.Reference().Value] = folder.Name
|
||||
parentByID[folder.Reference().Value] = folder.Parent
|
||||
}
|
||||
|
||||
paths := make(FolderLookup, len(results))
|
||||
var buildPath func(id string) string
|
||||
buildPath = func(id string) string {
|
||||
if pathValue, ok := paths[id]; ok {
|
||||
return pathValue
|
||||
}
|
||||
name, ok := nameByID[id]
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
parent := parentByID[id]
|
||||
if parent == nil || parent.Type == "Datacenter" {
|
||||
paths[id] = path.Join("/", name)
|
||||
return paths[id]
|
||||
}
|
||||
if parent.Type != "Folder" {
|
||||
paths[id] = path.Join("/", name)
|
||||
return paths[id]
|
||||
}
|
||||
parentPath := buildPath(parent.Value)
|
||||
if parentPath == "" {
|
||||
paths[id] = path.Join("/", name)
|
||||
return paths[id]
|
||||
}
|
||||
paths[id] = path.Join(parentPath, name)
|
||||
return paths[id]
|
||||
}
|
||||
|
||||
for id := range nameByID {
|
||||
_ = buildPath(id)
|
||||
}
|
||||
|
||||
return paths, nil
|
||||
}
|
||||
|
||||
func (v *Vcenter) GetVMFolderPathFromLookup(vm mo.VirtualMachine, lookup FolderLookup) (string, bool) {
|
||||
if vm.Parent == nil || lookup == nil {
|
||||
return "", false
|
||||
}
|
||||
pathValue, ok := lookup[vm.Parent.Value]
|
||||
return pathValue, ok
|
||||
}
|
||||
|
||||
func (v *Vcenter) ConvertObjToMoVM(vmObj *object.VirtualMachine) (*mo.VirtualMachine, error) {
|
||||
// Use the InventoryPath to extract the datacenter name and VM path
|
||||
inventoryPath := vmObj.InventoryPath
|
||||
parts := strings.SplitN(inventoryPath, "/", 3)
|
||||
|
||||
if len(parts) < 2 {
|
||||
return nil, fmt.Errorf("invalid InventoryPath: %s", inventoryPath)
|
||||
}
|
||||
|
||||
// The first part of the path is the datacenter name
|
||||
datacenterName := parts[1]
|
||||
|
||||
// Finder to search for datacenter and VM
|
||||
finder := find.NewFinder(v.client.Client, true)
|
||||
|
||||
// Find the specific datacenter by name
|
||||
datacenter, err := finder.Datacenter(v.ctx, fmt.Sprintf("/%s", datacenterName))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to find datacenter %s: %w", datacenterName, err)
|
||||
}
|
||||
|
||||
// Set the found datacenter in the finder
|
||||
finder.SetDatacenter(datacenter)
|
||||
|
||||
// Now retrieve the VM using its ManagedObjectReference
|
||||
vmRef := vmObj.Reference()
|
||||
|
||||
// Retrieve the full mo.VirtualMachine object for the reference
|
||||
var moVM mo.VirtualMachine
|
||||
err = v.client.RetrieveOne(v.ctx, vmRef, nil, &moVM)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to retrieve VM %s in datacenter %s: %w", vmObj.Name(), datacenterName, err)
|
||||
}
|
||||
|
||||
// Return the found mo.VirtualMachine object
|
||||
//v.Logger.Debug("Found VM in datacenter", "vm_name", moVM.Name, "dc_name", datacenterName)
|
||||
return &moVM, nil
|
||||
}
|
||||
|
||||
func (v *Vcenter) ConvertObjToMoHost(hostObj *object.HostSystem) (*mo.HostSystem, error) {
|
||||
// Use the InventoryPath to extract the datacenter name and Host path
|
||||
inventoryPath := hostObj.InventoryPath
|
||||
parts := strings.SplitN(inventoryPath, "/", 3)
|
||||
v.Logger.Debug("inventory path", "parts", parts)
|
||||
|
||||
if len(parts) < 2 {
|
||||
return nil, fmt.Errorf("invalid InventoryPath: %s", inventoryPath)
|
||||
}
|
||||
|
||||
// The first part of the path is the datacenter name
|
||||
datacenterName := parts[1]
|
||||
|
||||
// Finder to search for datacenter and VM
|
||||
finder := find.NewFinder(v.client.Client, true)
|
||||
|
||||
// Find the specific datacenter by name
|
||||
datacenter, err := finder.Datacenter(v.ctx, fmt.Sprintf("/%s", datacenterName))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to find datacenter %s: %w", datacenterName, err)
|
||||
}
|
||||
|
||||
// Set the found datacenter in the finder
|
||||
finder.SetDatacenter(datacenter)
|
||||
|
||||
// Now retrieve the VM using its ManagedObjectReference
|
||||
hostRef := hostObj.Reference()
|
||||
|
||||
// Retrieve the full mo.HostSystem object for the reference
|
||||
var moHost mo.HostSystem
|
||||
err = v.client.RetrieveOne(v.ctx, hostRef, nil, &moHost)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to retrieve Host %s in datacenter %s: %w", hostObj.Name(), datacenterName, err)
|
||||
}
|
||||
|
||||
// Return the found mo.HostSystem object
|
||||
v.Logger.Debug("Found Host in datacenter", "host_name", moHost.Name, "dc_name", datacenterName)
|
||||
return &moHost, nil
|
||||
}
|
||||
|
||||
func (v *Vcenter) GetHostSystemObject(hostRef types.ManagedObjectReference) (*mo.HostSystem, error) {
|
||||
finder := find.NewFinder(v.client.Client, true)
|
||||
|
||||
// List all datacenters
|
||||
datacenters, err := finder.DatacenterList(v.ctx, "*")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list datacenters: %w", err)
|
||||
}
|
||||
|
||||
for _, dc := range datacenters {
|
||||
v.Logger.Debug("Checking dc for host", "name", dc.Name(), "hostRef", hostRef.String())
|
||||
// Set the current datacenter
|
||||
finder.SetDatacenter(dc)
|
||||
|
||||
var hs mo.HostSystem
|
||||
err := v.client.RetrieveOne(v.ctx, hostRef, nil, &hs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
v.Logger.Debug("Found hostsystem", "name", hs.Name)
|
||||
return &hs, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Function to find the cluster or compute resource from a host reference
|
||||
func (v *Vcenter) GetClusterFromHost(hostRef *types.ManagedObjectReference) (string, error) {
|
||||
if hostRef == nil {
|
||||
v.Logger.Warn("nil hostRef passed to GetClusterFromHost")
|
||||
return "", nil
|
||||
}
|
||||
// Get the host object
|
||||
host, err := v.GetHostSystemObject(*hostRef)
|
||||
if err != nil {
|
||||
v.Logger.Error("cant get host", "error", err)
|
||||
return "", err
|
||||
}
|
||||
if host == nil {
|
||||
v.Logger.Warn("host lookup returned nil", "host_ref", hostRef)
|
||||
return "", nil
|
||||
}
|
||||
|
||||
v.Logger.Debug("host parent", "parent", host.Parent)
|
||||
|
||||
if host.Parent != nil && host.Parent.Type == "ClusterComputeResource" {
|
||||
// Retrieve properties of the compute resource
|
||||
var moCompute mo.ComputeResource
|
||||
err = v.client.RetrieveOne(v.ctx, *host.Parent, nil, &moCompute)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to retrieve compute resource: %w", err)
|
||||
}
|
||||
v.Logger.Debug("VM is on host in cluster/compute resource", "name", moCompute.Name)
|
||||
return moCompute.Name, nil
|
||||
}
|
||||
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// Function to determine the datacenter a VM belongs to
|
||||
func (v *Vcenter) GetDatacenterForVM(vm mo.VirtualMachine) (string, error) {
|
||||
// Start with the VM's parent reference
|
||||
ref := vm.Reference()
|
||||
|
||||
// Traverse the inventory hierarchy upwards to find the datacenter
|
||||
for {
|
||||
// Get the parent reference of the current object
|
||||
parentRef, err := v.getParent(ref)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get parent object: %w", err)
|
||||
}
|
||||
|
||||
// If we get a nil parent reference, it means we've hit the root without finding the datacenter
|
||||
if parentRef == nil {
|
||||
return "", fmt.Errorf("failed to find datacenter for VM")
|
||||
}
|
||||
|
||||
// Check if the parent is a Datacenter
|
||||
switch parentRef.Type {
|
||||
case "Datacenter":
|
||||
// If we found a Datacenter, retrieve its properties
|
||||
datacenter := object.NewDatacenter(v.client.Client, *parentRef)
|
||||
var moDC mo.Datacenter
|
||||
err = v.client.RetrieveOne(v.ctx, datacenter.Reference(), nil, &moDC)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to retrieve datacenter: %w", err)
|
||||
}
|
||||
|
||||
//log.Printf("VM is in datacenter: %s", moDC.Name)
|
||||
v.Logger.Debug("VM datacenter found", "vm_name", vm.Name, "dc_name", moDC.Name)
|
||||
return moDC.Name, nil
|
||||
|
||||
default:
|
||||
// Continue traversing upwards if not a Datacenter
|
||||
ref = *parentRef
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Helper function to get the parent ManagedObjectReference of a given object
|
||||
func (v *Vcenter) getParent(ref types.ManagedObjectReference) (*types.ManagedObjectReference, error) {
|
||||
// Retrieve the object's properties
|
||||
var obj mo.ManagedEntity
|
||||
err := v.client.RetrieveOne(v.ctx, ref, []string{"parent"}, &obj)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to retrieve parent of object: %w", err)
|
||||
}
|
||||
|
||||
// Return the parent reference
|
||||
if obj.Parent != nil {
|
||||
return obj.Parent, nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (v *Vcenter) FindVMByName(vmName string) ([]mo.VirtualMachine, error) {
|
||||
@@ -177,12 +778,9 @@ func (v *Vcenter) FindVMByID(vmID string) (*VmProperties, error) {
|
||||
return nil, fmt.Errorf("VM with ID %s not found in any datacenter", vmID)
|
||||
}
|
||||
|
||||
func (v *Vcenter) FindVMByIDWithDatacenter(vmID string, dcID string) (*VmProperties, error) {
|
||||
//var dcName string
|
||||
func (v *Vcenter) FindVMByIDWithDatacenter(vmID string, dcID string) (*mo.VirtualMachine, error) {
|
||||
var err error
|
||||
resourcePool := ""
|
||||
vmFolderPath := ""
|
||||
v.Logger.Debug("searching for vm id", "vm_id", vmID, "datacenter_id", dcID)
|
||||
//v.Logger.Debug("searching for vm id", "vm_id", vmID, "datacenter_id", dcID)
|
||||
|
||||
finder := find.NewFinder(v.client.Client, true)
|
||||
|
||||
@@ -211,51 +809,60 @@ func (v *Vcenter) FindVMByIDWithDatacenter(vmID string, dcID string) (*VmPropert
|
||||
//err := v.client.RetrieveOne(v.ctx, vmRef, []string{"config", "name"}, &vm)
|
||||
err = v.client.RetrieveOne(v.ctx, vmRef, nil, &vm)
|
||||
if err == nil {
|
||||
v.Logger.Debug("Found VM")
|
||||
//v.Logger.Debug("Found VM")
|
||||
|
||||
// Retrieve the resource pool the VM is in
|
||||
if vm.ResourcePool != nil {
|
||||
rp := object.NewResourcePool(v.client.Client, *vm.ResourcePool)
|
||||
rpName, err := rp.ObjectName(v.ctx)
|
||||
if err != nil {
|
||||
v.Logger.Error("failed to get resource pool name", "error", err)
|
||||
} else {
|
||||
v.Logger.Debug("Found resource pool name", "rp_name", rpName)
|
||||
resourcePool = rpName
|
||||
}
|
||||
return &vm, nil
|
||||
|
||||
}
|
||||
|
||||
// Retrieve the full folder path of the VM
|
||||
folderPath, err := v.getVMFolderPath(vm)
|
||||
if err != nil {
|
||||
v.Logger.Error("failed to get vm folder path", "error", err)
|
||||
} else {
|
||||
v.Logger.Debug("Found vm folder path", "folder_path", folderPath)
|
||||
vmFolderPath = folderPath
|
||||
}
|
||||
|
||||
return &VmProperties{
|
||||
//Datacenter: dcName,
|
||||
Vm: vm,
|
||||
ResourcePool: resourcePool,
|
||||
FolderPath: vmFolderPath,
|
||||
}, nil
|
||||
} else if _, ok := err.(*find.NotFoundError); !ok {
|
||||
// If the error is not a NotFoundError, return it
|
||||
//return nil, fmt.Errorf("failed to retrieve VM with ID %s in datacenter %s: %w", vmID, dc.Name(), err)
|
||||
v.Logger.Debug("Couldn't find vm in datacenter", "vm_id", vmID, "datacenter_id", dcID)
|
||||
//v.Logger.Debug("Couldn't find vm in datacenter", "vm_id", vmID, "datacenter_id", dcID)
|
||||
return nil, nil
|
||||
} else {
|
||||
return nil, fmt.Errorf("failed to retrieve VM: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
//v.Logger.Info("Unable to find vm in datacenter", "vm_id", vmID, "datacenter_id", dcID)
|
||||
//return nil, nil
|
||||
// Helper function to retrieve the resource pool for the VM
|
||||
func (v *Vcenter) GetVmResourcePool(vm mo.VirtualMachine) (string, error) {
|
||||
var resourcePool string
|
||||
if vm.ResourcePool != nil {
|
||||
rp := object.NewResourcePool(v.client.Client, *vm.ResourcePool)
|
||||
rpName, err := rp.ObjectName(v.ctx)
|
||||
if err != nil {
|
||||
v.Logger.Error("failed to get resource pool name", "error", err)
|
||||
return resourcePool, err
|
||||
} else {
|
||||
//v.Logger.Debug("Found resource pool name", "rp_name", rpName)
|
||||
resourcePool = rpName
|
||||
}
|
||||
}
|
||||
return resourcePool, nil
|
||||
}
|
||||
|
||||
// BuildResourcePoolLookup creates a cache of resource pool MoRef -> name for fast lookups.
|
||||
func (v *Vcenter) BuildResourcePoolLookup() (map[string]string, error) {
|
||||
m := view.NewManager(v.client.Client)
|
||||
cv, err := m.CreateContainerView(v.ctx, v.client.ServiceContent.RootFolder, []string{"ResourcePool"}, true)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create resource pool view: %w", err)
|
||||
}
|
||||
defer cv.Destroy(v.ctx)
|
||||
|
||||
var pools []mo.ResourcePool
|
||||
if err := cv.Retrieve(v.ctx, []string{"ResourcePool"}, []string{"name"}, &pools); err != nil {
|
||||
return nil, fmt.Errorf("failed to retrieve resource pools: %w", err)
|
||||
}
|
||||
|
||||
lookup := make(map[string]string, len(pools))
|
||||
for _, pool := range pools {
|
||||
lookup[pool.Reference().Value] = pool.Name
|
||||
}
|
||||
return lookup, nil
|
||||
}
|
||||
|
||||
// Helper function to retrieve the full folder path for the VM
|
||||
func (v *Vcenter) getVMFolderPath(vm mo.VirtualMachine) (string, error) {
|
||||
func (v *Vcenter) GetVMFolderPath(vm mo.VirtualMachine) (string, error) {
|
||||
//finder := find.NewFinder(v.client.Client, true)
|
||||
|
||||
v.Logger.Debug("Commencing vm folder path search")
|
||||
@@ -268,9 +875,10 @@ func (v *Vcenter) getVMFolderPath(vm mo.VirtualMachine) (string, error) {
|
||||
|
||||
// Traverse the folder hierarchy to build the full folder path
|
||||
folderPath := ""
|
||||
v.Logger.Debug("parent is", "parent", parentRef)
|
||||
//v.Logger.Debug("parent is", "parent", parentRef)
|
||||
|
||||
for parentRef.Type != "Datacenter" {
|
||||
maxHops := 128
|
||||
for parentRef != nil && parentRef.Type != "Datacenter" && maxHops > 0 {
|
||||
// Retrieve the parent object
|
||||
//parentObj, err := finder.ObjectReference(v.ctx, *parentRef)
|
||||
//if err != nil {
|
||||
@@ -293,11 +901,16 @@ func (v *Vcenter) getVMFolderPath(vm mo.VirtualMachine) (string, error) {
|
||||
//if folder, ok := parentObj.(*object.Folder); ok {
|
||||
if parentObj.Parent != nil {
|
||||
parentRef = parentObj.Parent
|
||||
v.Logger.Debug("Parent uplevel is", "ref", parentRef)
|
||||
//v.Logger.Debug("Parent uplevel is", "ref", parentRef)
|
||||
} else {
|
||||
return "", fmt.Errorf("unexpected parent type: %s", parentObj.Reference().Type)
|
||||
}
|
||||
//break
|
||||
maxHops--
|
||||
}
|
||||
|
||||
if parentRef == nil || maxHops == 0 {
|
||||
return "", fmt.Errorf("folder traversal terminated early for VM %s", vm.Name)
|
||||
}
|
||||
|
||||
return folderPath, nil
|
||||
|
||||
10
log/log.go
10
log/log.go
@@ -65,10 +65,9 @@ func ToLevel(level string) Level {
|
||||
}
|
||||
}
|
||||
|
||||
// GetLevel returns the log level from the environment variable.
|
||||
// GetLevel returns the default log level.
|
||||
func GetLevel() Level {
|
||||
level := os.Getenv("LOG_LEVEL")
|
||||
return ToLevel(level)
|
||||
return LevelInfo
|
||||
}
|
||||
|
||||
// Output represents the log output.
|
||||
@@ -93,8 +92,7 @@ func ToOutput(output string) Output {
|
||||
}
|
||||
}
|
||||
|
||||
// GetOutput returns the log output from the environment variable.
|
||||
// GetOutput returns the default log output.
|
||||
func GetOutput() Output {
|
||||
output := os.Getenv("LOG_OUTPUT")
|
||||
return ToOutput(output)
|
||||
return OutputText
|
||||
}
|
||||
|
||||
341
main.go
341
main.go
@@ -2,46 +2,80 @@ package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
"vctp/db"
|
||||
"vctp/internal/secrets"
|
||||
"vctp/internal/settings"
|
||||
"vctp/internal/tasks"
|
||||
utils "vctp/internal/utils"
|
||||
"vctp/internal/vcenter"
|
||||
"vctp/log"
|
||||
"vctp/server"
|
||||
"vctp/server/router"
|
||||
|
||||
"crypto/sha256"
|
||||
"log/slog"
|
||||
|
||||
"github.com/go-co-op/gocron/v2"
|
||||
"github.com/joho/godotenv"
|
||||
)
|
||||
|
||||
var (
|
||||
bindDisableTls bool
|
||||
sha1ver string // sha1 revision used to build the program
|
||||
buildTime string // when the executable was built
|
||||
cronFrequency time.Duration
|
||||
bindDisableTls bool
|
||||
sha1ver string // sha1 revision used to build the program
|
||||
buildTime string // when the executable was built
|
||||
cronFrequency time.Duration
|
||||
cronInvFrequency time.Duration
|
||||
cronSnapshotFrequency time.Duration
|
||||
cronAggregateFrequency time.Duration
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Load data from environment file
|
||||
envFilename := utils.GetFilePath(".env")
|
||||
err := godotenv.Load(envFilename)
|
||||
if err != nil {
|
||||
panic("Error loading .env file")
|
||||
}
|
||||
const fallbackEncryptionKey = "5L1l3B5KvwOCzUHMAlCgsgUTRAYMfSpa"
|
||||
|
||||
logger := log.New(
|
||||
log.GetLevel(),
|
||||
log.GetOutput(),
|
||||
)
|
||||
func main() {
|
||||
settingsPath := flag.String("settings", "/etc/dtms/vctp.yml", "Path to settings YAML")
|
||||
runInventory := flag.Bool("run-inventory", false, "Run a single inventory snapshot across all configured vCenters and exit")
|
||||
flag.Parse()
|
||||
|
||||
bootstrapLogger := log.New(log.LevelInfo, log.OutputText)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
// Load settings from yaml
|
||||
s := settings.New(bootstrapLogger, *settingsPath)
|
||||
err := s.ReadYMLSettings()
|
||||
if err != nil {
|
||||
bootstrapLogger.Error("failed to open yaml settings file", "error", err, "filename", *settingsPath)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
logger := log.New(
|
||||
log.ToLevel(strings.ToLower(strings.TrimSpace(s.Values.Settings.LogLevel))),
|
||||
log.ToOutput(strings.ToLower(strings.TrimSpace(s.Values.Settings.LogOutput))),
|
||||
)
|
||||
s.Logger = logger
|
||||
|
||||
logger.Info("vCTP starting", "build_time", buildTime, "sha1_version", sha1ver, "go_version", runtime.Version(), "settings_file", *settingsPath)
|
||||
|
||||
// Configure database
|
||||
database, err := db.New(logger, utils.GetFilePath("db.sqlite3"))
|
||||
dbDriver := strings.TrimSpace(s.Values.Settings.DatabaseDriver)
|
||||
if dbDriver == "" {
|
||||
dbDriver = "sqlite"
|
||||
}
|
||||
normalizedDriver := strings.ToLower(strings.TrimSpace(dbDriver))
|
||||
if normalizedDriver == "" || normalizedDriver == "sqlite3" {
|
||||
normalizedDriver = "sqlite"
|
||||
}
|
||||
dbURL := strings.TrimSpace(s.Values.Settings.DatabaseURL)
|
||||
if dbURL == "" && normalizedDriver == "sqlite" {
|
||||
dbURL = utils.GetFilePath("db.sqlite3")
|
||||
}
|
||||
|
||||
database, err := db.New(logger, db.Config{Driver: normalizedDriver, DSN: dbURL})
|
||||
if err != nil {
|
||||
logger.Error("Failed to create database", "error", err)
|
||||
os.Exit(1)
|
||||
@@ -49,52 +83,36 @@ func main() {
|
||||
defer database.Close()
|
||||
//defer database.DB().Close()
|
||||
|
||||
if err = db.Migrate(database); err != nil {
|
||||
if err = db.Migrate(database, normalizedDriver); err != nil {
|
||||
logger.Error("failed to migrate database", "error", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Prepare the task scheduler
|
||||
s, err := gocron.NewScheduler()
|
||||
if err != nil {
|
||||
logger.Error("failed to create scheduler", "error", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Pass useful information to the cron jobs
|
||||
c := &tasks.CronTask{
|
||||
Logger: logger,
|
||||
Database: database,
|
||||
}
|
||||
|
||||
// Determine bind IP
|
||||
bindIP := os.Getenv("BIND_IP")
|
||||
bindIP := strings.TrimSpace(s.Values.Settings.BindIP)
|
||||
if bindIP == "" {
|
||||
bindIP = utils.GetOutboundIP().String()
|
||||
}
|
||||
// Determine bind port
|
||||
bindPort := os.Getenv("BIND_PORT")
|
||||
if bindPort == "" {
|
||||
bindPort = "9443"
|
||||
bindPort := s.Values.Settings.BindPort
|
||||
if bindPort == 0 {
|
||||
bindPort = 9443
|
||||
}
|
||||
bindAddress := fmt.Sprint(bindIP, ":", bindPort)
|
||||
slog.Info("Will listen on address", "ip", bindIP, "port", bindPort)
|
||||
//logger.Info("Will listen on address", "ip", bindIP, "port", bindPort)
|
||||
|
||||
// Determine bind disable TLS
|
||||
bindDisableTlsEnv := os.Getenv("BIND_DISABLE_TLS")
|
||||
if bindDisableTlsEnv == "true" {
|
||||
bindDisableTls = true
|
||||
}
|
||||
bindDisableTls = s.Values.Settings.BindDisableTLS
|
||||
|
||||
// Get file names for TLS cert/key
|
||||
tlsCertFilename := os.Getenv("TLS_CERT_FILE")
|
||||
tlsCertFilename := strings.TrimSpace(s.Values.Settings.TLSCertFilename)
|
||||
if tlsCertFilename != "" {
|
||||
tlsCertFilename = utils.GetFilePath(tlsCertFilename)
|
||||
} else {
|
||||
tlsCertFilename = "./cert.pem"
|
||||
}
|
||||
|
||||
tlsKeyFilename := os.Getenv("TLS_KEY_FILE")
|
||||
tlsKeyFilename := strings.TrimSpace(s.Values.Settings.TLSKeyFilename)
|
||||
if tlsKeyFilename != "" {
|
||||
tlsKeyFilename = utils.GetFilePath(tlsKeyFilename)
|
||||
} else {
|
||||
@@ -103,54 +121,231 @@ func main() {
|
||||
|
||||
// Generate certificate if required
|
||||
if !(utils.FileExists(tlsCertFilename) && utils.FileExists(tlsKeyFilename)) {
|
||||
slog.Warn("Specified TLS certificate or private key do not exist", "certificate", tlsCertFilename, "tls-key", tlsKeyFilename)
|
||||
logger.Warn("Specified TLS certificate or private key do not exist", "certificate", tlsCertFilename, "tls-key", tlsKeyFilename)
|
||||
utils.GenerateCerts(tlsCertFilename, tlsKeyFilename)
|
||||
}
|
||||
|
||||
cronFrequencyString := os.Getenv("VCENTER_POLLING_SECONDS")
|
||||
if cronFrequencyString != "" {
|
||||
cronFrequency, err = time.ParseDuration(cronFrequencyString)
|
||||
if err != nil {
|
||||
slog.Error("Can't convert VCENTER_POLLING_SECONDS value to time duration. Defaulting to 60s", "value", cronFrequencyString, "error", err)
|
||||
cronFrequency = time.Second * 60
|
||||
}
|
||||
} else {
|
||||
cronFrequency = time.Second * 60
|
||||
// Load vcenter credentials from serttings, decrypt if required
|
||||
encKey := deriveEncryptionKey(logger)
|
||||
a := secrets.New(logger, encKey)
|
||||
vcEp := strings.TrimSpace(s.Values.Settings.VcenterPassword)
|
||||
if len(vcEp) == 0 {
|
||||
logger.Error("No vcenter password configured")
|
||||
os.Exit(1)
|
||||
}
|
||||
logger.Debug("Setting VM polling cronjob frequency to", "frequency", cronFrequency)
|
||||
|
||||
// start background processing
|
||||
startsAt := time.Now().Add(time.Second * 10)
|
||||
job, err := s.NewJob(
|
||||
gocron.DurationJob(cronFrequency),
|
||||
gocron.NewTask(func() {
|
||||
c.RunVmCheck(ctx, logger)
|
||||
}), gocron.WithSingletonMode(gocron.LimitModeReschedule),
|
||||
gocron.WithStartAt(gocron.WithStartDateTime(startsAt)),
|
||||
)
|
||||
vcPass, err := a.Decrypt(vcEp)
|
||||
if err != nil {
|
||||
logger.Error("failed to start cron jobs", "error", err)
|
||||
logger.Error("failed to decrypt vcenter credentials. Assuming un-encrypted", "error", err)
|
||||
vcPass = []byte(vcEp)
|
||||
if cipherText, encErr := a.Encrypt([]byte(vcEp)); encErr != nil {
|
||||
logger.Warn("failed to encrypt vcenter credentials", "error", encErr)
|
||||
} else {
|
||||
s.Values.Settings.VcenterPassword = cipherText
|
||||
if err := s.WriteYMLSettings(); err != nil {
|
||||
logger.Warn("failed to update settings with encrypted vcenter password", "error", err)
|
||||
} else {
|
||||
logger.Info("encrypted vcenter password stored in settings file")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
creds := vcenter.VcenterLogin{
|
||||
Username: strings.TrimSpace(s.Values.Settings.VcenterUsername),
|
||||
Password: string(vcPass),
|
||||
Insecure: s.Values.Settings.VcenterInsecure,
|
||||
}
|
||||
if creds.Username == "" {
|
||||
logger.Error("No vcenter username configured")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
slog.Debug("Created cron job", "job", job)
|
||||
// Set a recognizable User-Agent for vCenter sessions.
|
||||
ua := "vCTP"
|
||||
if sha1ver != "" {
|
||||
ua = fmt.Sprintf("vCTP/%s", sha1ver)
|
||||
}
|
||||
vcenter.SetUserAgent(ua)
|
||||
|
||||
s.Start()
|
||||
// Prepare the task scheduler
|
||||
c, err := gocron.NewScheduler()
|
||||
if err != nil {
|
||||
logger.Error("failed to create scheduler", "error", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Pass useful information to the cron jobs
|
||||
ct := &tasks.CronTask{
|
||||
Logger: logger,
|
||||
Database: database,
|
||||
Settings: s,
|
||||
VcCreds: &creds,
|
||||
FirstHourlySnapshotCheck: true,
|
||||
}
|
||||
|
||||
// One-shot mode: run a single inventory snapshot across all configured vCenters and exit.
|
||||
if *runInventory {
|
||||
logger.Info("Running one-shot inventory snapshot across all vCenters")
|
||||
ct.RunVcenterSnapshotHourly(ctx, logger, true)
|
||||
logger.Info("One-shot inventory snapshot complete; exiting")
|
||||
return
|
||||
}
|
||||
|
||||
cronSnapshotFrequency = durationFromSeconds(s.Values.Settings.VcenterInventorySnapshotSeconds, 3600)
|
||||
logger.Debug("Setting VM inventory snapshot cronjob frequency to", "frequency", cronSnapshotFrequency)
|
||||
|
||||
cronAggregateFrequency = durationFromSeconds(s.Values.Settings.VcenterInventoryAggregateSeconds, 86400)
|
||||
logger.Debug("Setting VM inventory daily aggregation cronjob frequency to", "frequency", cronAggregateFrequency)
|
||||
|
||||
startsAt3 := alignStart(time.Now(), cronSnapshotFrequency)
|
||||
job3, err := c.NewJob(
|
||||
gocron.DurationJob(cronSnapshotFrequency),
|
||||
gocron.NewTask(func() {
|
||||
ct.RunVcenterSnapshotHourly(ctx, logger, false)
|
||||
}), gocron.WithSingletonMode(gocron.LimitModeReschedule),
|
||||
gocron.WithStartAt(gocron.WithStartDateTime(startsAt3)),
|
||||
)
|
||||
if err != nil {
|
||||
logger.Error("failed to start vcenter inventory snapshot cron job", "error", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
logger.Debug("Created vcenter inventory snapshot cron job", "job", job3.ID(), "starting_at", startsAt3)
|
||||
|
||||
startsAt4 := time.Now().Add(cronAggregateFrequency)
|
||||
if cronAggregateFrequency == time.Hour*24 {
|
||||
now := time.Now()
|
||||
startsAt4 = time.Date(now.Year(), now.Month(), now.Day()+1, 0, 10, 0, 0, now.Location())
|
||||
}
|
||||
job4, err := c.NewJob(
|
||||
gocron.DurationJob(cronAggregateFrequency),
|
||||
gocron.NewTask(func() {
|
||||
ct.RunVcenterDailyAggregate(ctx, logger)
|
||||
}), gocron.WithSingletonMode(gocron.LimitModeReschedule),
|
||||
gocron.WithStartAt(gocron.WithStartDateTime(startsAt4)),
|
||||
)
|
||||
if err != nil {
|
||||
logger.Error("failed to start vcenter inventory aggregation cron job", "error", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
logger.Debug("Created vcenter inventory aggregation cron job", "job", job4.ID(), "starting_at", startsAt4)
|
||||
|
||||
monthlyCron := strings.TrimSpace(s.Values.Settings.MonthlyAggregationCron)
|
||||
if monthlyCron == "" {
|
||||
monthlyCron = "10 3 1 * *"
|
||||
}
|
||||
logger.Debug("Setting monthly aggregation cron schedule", "cron", monthlyCron)
|
||||
job5, err := c.NewJob(
|
||||
gocron.CronJob(monthlyCron, false),
|
||||
gocron.NewTask(func() {
|
||||
ct.RunVcenterMonthlyAggregate(ctx, logger)
|
||||
}), gocron.WithSingletonMode(gocron.LimitModeReschedule),
|
||||
)
|
||||
if err != nil {
|
||||
logger.Error("failed to start vcenter monthly aggregation cron job", "error", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
logger.Debug("Created vcenter monthly aggregation cron job", "job", job5.ID())
|
||||
|
||||
snapshotCleanupCron := strings.TrimSpace(s.Values.Settings.SnapshotCleanupCron)
|
||||
if snapshotCleanupCron == "" {
|
||||
snapshotCleanupCron = "30 2 * * *"
|
||||
}
|
||||
job6, err := c.NewJob(
|
||||
gocron.CronJob(snapshotCleanupCron, false),
|
||||
gocron.NewTask(func() {
|
||||
ct.RunSnapshotCleanup(ctx, logger)
|
||||
if strings.EqualFold(s.Values.Settings.DatabaseDriver, "sqlite") {
|
||||
logger.Info("Performing sqlite VACUUM after snapshot cleanup")
|
||||
if _, err := ct.Database.DB().ExecContext(ctx, "VACUUM"); err != nil {
|
||||
logger.Warn("VACUUM failed after snapshot cleanup", "error", err)
|
||||
} else {
|
||||
logger.Debug("VACUUM completed after snapshot cleanup")
|
||||
}
|
||||
}
|
||||
}), gocron.WithSingletonMode(gocron.LimitModeReschedule),
|
||||
)
|
||||
if err != nil {
|
||||
logger.Error("failed to start snapshot cleanup cron job", "error", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
logger.Debug("Created snapshot cleanup cron job", "job", job6.ID())
|
||||
|
||||
// Retry failed hourly snapshots
|
||||
retrySeconds := s.Values.Settings.HourlySnapshotRetrySeconds
|
||||
if retrySeconds <= 0 {
|
||||
retrySeconds = 300
|
||||
}
|
||||
job7, err := c.NewJob(
|
||||
gocron.DurationJob(time.Duration(retrySeconds)*time.Second),
|
||||
gocron.NewTask(func() {
|
||||
ct.RunHourlySnapshotRetry(ctx, logger)
|
||||
}), gocron.WithSingletonMode(gocron.LimitModeReschedule),
|
||||
)
|
||||
if err != nil {
|
||||
logger.Error("failed to start hourly snapshot retry cron job", "error", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
logger.Debug("Created hourly snapshot retry cron job", "job", job7.ID(), "interval_seconds", retrySeconds)
|
||||
|
||||
// start cron scheduler
|
||||
c.Start()
|
||||
|
||||
// Start server
|
||||
r := router.New(logger, database, buildTime, sha1ver, runtime.Version(), &creds, a, s)
|
||||
svr := server.New(
|
||||
logger,
|
||||
s,
|
||||
c,
|
||||
cancel,
|
||||
bindAddress,
|
||||
server.WithRouter(router.New(logger, database, buildTime, sha1ver, runtime.Version())),
|
||||
server.WithRouter(r),
|
||||
server.SetTls(bindDisableTls),
|
||||
server.SetCertificate(tlsCertFilename),
|
||||
server.SetPrivateKey(tlsKeyFilename),
|
||||
)
|
||||
|
||||
svr.DisableTls(bindDisableTls)
|
||||
svr.SetCertificate(tlsCertFilename)
|
||||
svr.SetPrivateKey(tlsKeyFilename)
|
||||
//logger.Debug("Server configured", "object", svr)
|
||||
|
||||
svr.StartAndWait()
|
||||
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
// alignStart snaps the first run to a sensible boundary (hour or 15-minute block) when possible.
|
||||
func alignStart(now time.Time, freq time.Duration) time.Time {
|
||||
if freq == time.Hour {
|
||||
return now.Truncate(time.Hour).Add(time.Hour)
|
||||
}
|
||||
quarter := 15 * time.Minute
|
||||
if freq%quarter == 0 {
|
||||
return now.Truncate(quarter).Add(quarter)
|
||||
}
|
||||
return now.Add(freq)
|
||||
}
|
||||
|
||||
func durationFromSeconds(value int, fallback int) time.Duration {
|
||||
if value <= 0 {
|
||||
return time.Second * time.Duration(fallback)
|
||||
}
|
||||
return time.Second * time.Duration(value)
|
||||
}
|
||||
|
||||
func deriveEncryptionKey(logger *slog.Logger) []byte {
|
||||
if runtime.GOOS == "linux" {
|
||||
if data, err := os.ReadFile("/sys/class/dmi/id/product_uuid"); err == nil {
|
||||
src := strings.TrimSpace(string(data))
|
||||
if src != "" {
|
||||
sum := sha256.Sum256([]byte(src))
|
||||
logger.Debug("derived encryption key from BIOS UUID")
|
||||
return sum[:]
|
||||
}
|
||||
}
|
||||
if data, err := os.ReadFile("/etc/machine-id"); err == nil {
|
||||
src := strings.TrimSpace(string(data))
|
||||
if src != "" {
|
||||
sum := sha256.Sum256([]byte(src))
|
||||
logger.Debug("derived encryption key from machine-id")
|
||||
return sum[:]
|
||||
}
|
||||
}
|
||||
}
|
||||
logger.Warn("using fallback encryption key; hardware UUID not available")
|
||||
return []byte(fallbackEncryptionKey)
|
||||
}
|
||||
|
||||
47
scripts/drone.sh
Executable file
47
scripts/drone.sh
Executable file
@@ -0,0 +1,47 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# disable CGO for cross-compiling
|
||||
export CGO_ENABLED=0
|
||||
|
||||
package_name=vctp
|
||||
package=./
|
||||
commit=$(git rev-parse HEAD)
|
||||
buildtime=$(date +%Y-%m-%dT%T%z)
|
||||
#Extract the version from yml
|
||||
package_version=$(grep 'version:' "$package_name.yml" | awk '{print $2}' | tr -d '"' | sed 's/^v//')
|
||||
|
||||
host_os=$(uname -s | tr '[:upper:]' '[:lower:]')
|
||||
host_arch=$(uname -m)
|
||||
platforms=("linux/amd64")
|
||||
if [[ "$host_os" == "darwin" && ( "$host_arch" == "x86_64" || "$host_arch" == "amd64" || "$host_arch" == "arm64" ) ]]; then
|
||||
platforms=("darwin/amd64")
|
||||
fi
|
||||
|
||||
echo Building: $package_name
|
||||
echo - Version $package_version
|
||||
echo - Commit $commit
|
||||
echo - Build Time $buildtime
|
||||
mkdir -p build
|
||||
for platform in "${platforms[@]}"
|
||||
do
|
||||
platform_split=(${platform//\// })
|
||||
GOOS=${platform_split[0]}
|
||||
GOARCH=${platform_split[1]}
|
||||
output_name=$package_name'-'$GOOS'-'$GOARCH
|
||||
if [ $GOOS = "windows" ]; then
|
||||
output_name+='.exe'
|
||||
fi
|
||||
|
||||
starttime=$(TZ=Australia/Sydney date +%Y-%m-%dT%T%z)
|
||||
echo "build commences at $starttime"
|
||||
env GOOS=$GOOS GOARCH=$GOARCH go build -trimpath -ldflags="-X main.version=$package_version -X main.sha1ver=$commit -X main.buildTime=$buildtime" -o build/$output_name $package
|
||||
if [ $? -ne 0 ]; then
|
||||
echo 'An error has occurred! Aborting the script execution...'
|
||||
exit 1
|
||||
fi
|
||||
#gzip build/$output_name
|
||||
echo "build complete at $buildtime : $output_name"
|
||||
#sha256sum build/${output_name}.gz > build/${output_name}_checksum.txt
|
||||
done
|
||||
|
||||
ls -lah build
|
||||
59
scripts/update-swagger-ui.sh
Executable file
59
scripts/update-swagger-ui.sh
Executable file
@@ -0,0 +1,59 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# Usage: ./update-swagger-ui.sh [version]
|
||||
# Example: ./update-swagger-ui.sh v5.17.14
|
||||
# If no version is provided, defaults below is used.
|
||||
VERSION="${1:-v5.31.0}"
|
||||
|
||||
TARGET_DIR="server/router/swagger-ui-dist"
|
||||
TARBALL_URL="https://github.com/swagger-api/swagger-ui/archive/refs/tags/${VERSION}.tar.gz"
|
||||
|
||||
echo ">> Fetching Swagger UI ${VERSION} …"
|
||||
tmpdir="$(mktemp -d)"
|
||||
cleanup() { rm -rf "$tmpdir"; }
|
||||
trap cleanup EXIT
|
||||
|
||||
# Requirements check
|
||||
for cmd in curl tar; do
|
||||
command -v "$cmd" >/dev/null 2>&1 || { echo "ERROR: $cmd not found"; exit 1; }
|
||||
done
|
||||
|
||||
# Download & unpack
|
||||
curl -fsSL "$TARBALL_URL" | tar -xz -C "$tmpdir"
|
||||
SRC_DIR="${tmpdir}/swagger-ui-${VERSION#v}/dist"
|
||||
if [[ ! -d "$SRC_DIR" ]]; then
|
||||
echo "ERROR: Unpacked dist not found at $SRC_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Replace target
|
||||
rm -rf "$TARGET_DIR"
|
||||
mkdir -p "$TARGET_DIR"
|
||||
# Use cp -a for portability (avoids rsync dependency)
|
||||
cp -a "${SRC_DIR}/." "$TARGET_DIR/"
|
||||
|
||||
INDEX="${TARGET_DIR}/swagger-initializer.js"
|
||||
if [[ ! -f "$INDEX" ]]; then
|
||||
echo "ERROR: ${INDEX} not found after copy"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo ">> Patching swagger-initializer.js to point at /swagger.json"
|
||||
|
||||
if sed --version >/dev/null 2>&1; then
|
||||
SED_INPLACE=(-i)
|
||||
else
|
||||
SED_INPLACE=(-i '')
|
||||
fi
|
||||
|
||||
append_validator=$'/url:[[:space:]]*"[^"]*swagger\\.json"[[:space:]]*,?$/a\\\n validatorUrl: null,'
|
||||
|
||||
sed "${SED_INPLACE[@]}" -E \
|
||||
-e 's#configUrl:[[:space:]]*["'\''"][^"'\''"]*["'\''"]#url: "/swagger.json"#' \
|
||||
-e 's#url:[[:space:]]*["'\''"][^"'\''"]*["'\''"]#url: "/swagger.json"#' \
|
||||
-e 's#urls:[[:space:]]*\[[^]]*\]#url: "/swagger.json"#' \
|
||||
-e "$append_validator" \
|
||||
"$INDEX"
|
||||
|
||||
echo ">> Done. Files are in ${TARGET_DIR}"
|
||||
205
server/handler/dailyCreationDiagnostics.go
Normal file
205
server/handler/dailyCreationDiagnostics.go
Normal file
@@ -0,0 +1,205 @@
|
||||
package handler
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
"vctp/db"
|
||||
"vctp/server/models"
|
||||
)
|
||||
|
||||
// DailyCreationDiagnostics returns missing CreationTime diagnostics for a daily summary table.
|
||||
// @Summary Daily summary CreationTime diagnostics
|
||||
// @Description Returns counts of daily summary rows missing CreationTime and sample rows for the given date.
|
||||
// @Tags diagnostics
|
||||
// @Produce json
|
||||
// @Param date query string true "Daily date (YYYY-MM-DD)"
|
||||
// @Success 200 {object} models.DailyCreationDiagnosticsResponse "Diagnostics result"
|
||||
// @Failure 400 {object} models.ErrorResponse "Invalid request"
|
||||
// @Failure 404 {object} models.ErrorResponse "Summary not found"
|
||||
// @Failure 500 {object} models.ErrorResponse "Server error"
|
||||
// @Router /api/diagnostics/daily-creation [get]
|
||||
func (h *Handler) DailyCreationDiagnostics(w http.ResponseWriter, r *http.Request) {
|
||||
dateValue := strings.TrimSpace(r.URL.Query().Get("date"))
|
||||
if dateValue == "" {
|
||||
writeJSONError(w, http.StatusBadRequest, "date is required")
|
||||
return
|
||||
}
|
||||
|
||||
loc := time.Now().Location()
|
||||
parsed, err := time.ParseInLocation("2006-01-02", dateValue, loc)
|
||||
if err != nil {
|
||||
writeJSONError(w, http.StatusBadRequest, "date must be YYYY-MM-DD")
|
||||
return
|
||||
}
|
||||
|
||||
tableName := fmt.Sprintf("inventory_daily_summary_%s", parsed.Format("20060102"))
|
||||
if _, err := db.SafeTableName(tableName); err != nil {
|
||||
writeJSONError(w, http.StatusBadRequest, "invalid summary table name")
|
||||
return
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
dbConn := h.Database.DB()
|
||||
if !db.TableExists(ctx, dbConn, tableName) {
|
||||
writeJSONError(w, http.StatusNotFound, "daily summary table not found")
|
||||
return
|
||||
}
|
||||
|
||||
var totalRows int64
|
||||
countQuery := fmt.Sprintf(`SELECT COUNT(*) FROM %s`, tableName)
|
||||
if err := dbConn.GetContext(ctx, &totalRows, countQuery); err != nil {
|
||||
h.Logger.Warn("daily creation diagnostics count failed", "table", tableName, "error", err)
|
||||
writeJSONError(w, http.StatusInternalServerError, "failed to read summary rows")
|
||||
return
|
||||
}
|
||||
|
||||
var missingTotal int64
|
||||
missingQuery := fmt.Sprintf(`SELECT COUNT(*) FROM %s WHERE "CreationTime" IS NULL OR "CreationTime" = 0`, tableName)
|
||||
if err := dbConn.GetContext(ctx, &missingTotal, missingQuery); err != nil {
|
||||
h.Logger.Warn("daily creation diagnostics missing count failed", "table", tableName, "error", err)
|
||||
writeJSONError(w, http.StatusInternalServerError, "failed to read missing creation rows")
|
||||
return
|
||||
}
|
||||
|
||||
var avgIsPresentLtOne int64
|
||||
avgPresenceQuery := fmt.Sprintf(`SELECT COUNT(*) FROM %s WHERE "AvgIsPresent" IS NOT NULL AND "AvgIsPresent" < 0.999999`, tableName)
|
||||
if err := dbConn.GetContext(ctx, &avgIsPresentLtOne, avgPresenceQuery); err != nil {
|
||||
h.Logger.Warn("daily creation diagnostics avg-is-present count failed", "table", tableName, "error", err)
|
||||
writeJSONError(w, http.StatusInternalServerError, "failed to read avg is present rows")
|
||||
return
|
||||
}
|
||||
|
||||
var missingPartialCount int64
|
||||
missingPartialQuery := fmt.Sprintf(`SELECT COUNT(*) FROM %s WHERE ("CreationTime" IS NULL OR "CreationTime" = 0) AND "AvgIsPresent" IS NOT NULL AND "AvgIsPresent" < 0.999999`, tableName)
|
||||
if err := dbConn.GetContext(ctx, &missingPartialCount, missingPartialQuery); err != nil {
|
||||
h.Logger.Warn("daily creation diagnostics missing partial count failed", "table", tableName, "error", err)
|
||||
writeJSONError(w, http.StatusInternalServerError, "failed to read missing partial rows")
|
||||
return
|
||||
}
|
||||
|
||||
missingPct := 0.0
|
||||
if totalRows > 0 {
|
||||
missingPct = float64(missingTotal) * 100 / float64(totalRows)
|
||||
}
|
||||
|
||||
byVcenter := make([]models.DailyCreationMissingByVcenter, 0)
|
||||
byVcenterQuery := fmt.Sprintf(`
|
||||
SELECT "Vcenter", COUNT(*) AS missing_count
|
||||
FROM %s
|
||||
WHERE "CreationTime" IS NULL OR "CreationTime" = 0
|
||||
GROUP BY "Vcenter"
|
||||
ORDER BY missing_count DESC
|
||||
`, tableName)
|
||||
if rows, err := dbConn.QueryxContext(ctx, byVcenterQuery); err != nil {
|
||||
h.Logger.Warn("daily creation diagnostics by-vcenter failed", "table", tableName, "error", err)
|
||||
} else {
|
||||
for rows.Next() {
|
||||
var vcenter string
|
||||
var count int64
|
||||
if err := rows.Scan(&vcenter, &count); err != nil {
|
||||
continue
|
||||
}
|
||||
byVcenter = append(byVcenter, models.DailyCreationMissingByVcenter{
|
||||
Vcenter: vcenter,
|
||||
MissingCount: count,
|
||||
})
|
||||
}
|
||||
rows.Close()
|
||||
}
|
||||
|
||||
const sampleLimit = 10
|
||||
samples := make([]models.DailyCreationMissingSample, 0, sampleLimit)
|
||||
sampleQuery := fmt.Sprintf(`
|
||||
SELECT "Vcenter","VmId","VmUuid","Name","SamplesPresent","AvgIsPresent","SnapshotTime"
|
||||
FROM %s
|
||||
WHERE "CreationTime" IS NULL OR "CreationTime" = 0
|
||||
ORDER BY "SamplesPresent" DESC
|
||||
LIMIT %d
|
||||
`, tableName, sampleLimit)
|
||||
if rows, err := dbConn.QueryxContext(ctx, sampleQuery); err != nil {
|
||||
h.Logger.Warn("daily creation diagnostics sample failed", "table", tableName, "error", err)
|
||||
} else {
|
||||
for rows.Next() {
|
||||
var (
|
||||
vcenter string
|
||||
vmId, vmUuid, name sql.NullString
|
||||
samplesPresent, snapshotTime sql.NullInt64
|
||||
avgIsPresent sql.NullFloat64
|
||||
)
|
||||
if err := rows.Scan(&vcenter, &vmId, &vmUuid, &name, &samplesPresent, &avgIsPresent, &snapshotTime); err != nil {
|
||||
continue
|
||||
}
|
||||
samples = append(samples, models.DailyCreationMissingSample{
|
||||
Vcenter: vcenter,
|
||||
VmId: vmId.String,
|
||||
VmUuid: vmUuid.String,
|
||||
Name: name.String,
|
||||
SamplesPresent: samplesPresent.Int64,
|
||||
AvgIsPresent: avgIsPresent.Float64,
|
||||
SnapshotTime: snapshotTime.Int64,
|
||||
})
|
||||
}
|
||||
rows.Close()
|
||||
}
|
||||
|
||||
partialSamples := make([]models.DailyCreationMissingSample, 0, sampleLimit)
|
||||
partialSampleQuery := fmt.Sprintf(`
|
||||
SELECT "Vcenter","VmId","VmUuid","Name","SamplesPresent","AvgIsPresent","SnapshotTime"
|
||||
FROM %s
|
||||
WHERE ("CreationTime" IS NULL OR "CreationTime" = 0)
|
||||
AND "AvgIsPresent" IS NOT NULL
|
||||
AND "AvgIsPresent" < 0.999999
|
||||
ORDER BY "SamplesPresent" DESC
|
||||
LIMIT %d
|
||||
`, tableName, sampleLimit)
|
||||
if rows, err := dbConn.QueryxContext(ctx, partialSampleQuery); err != nil {
|
||||
h.Logger.Warn("daily creation diagnostics partial sample failed", "table", tableName, "error", err)
|
||||
} else {
|
||||
for rows.Next() {
|
||||
var (
|
||||
vcenter string
|
||||
vmId, vmUuid, name sql.NullString
|
||||
samplesPresent, snapshotTime sql.NullInt64
|
||||
avgIsPresent sql.NullFloat64
|
||||
)
|
||||
if err := rows.Scan(&vcenter, &vmId, &vmUuid, &name, &samplesPresent, &avgIsPresent, &snapshotTime); err != nil {
|
||||
continue
|
||||
}
|
||||
partialSamples = append(partialSamples, models.DailyCreationMissingSample{
|
||||
Vcenter: vcenter,
|
||||
VmId: vmId.String,
|
||||
VmUuid: vmUuid.String,
|
||||
Name: name.String,
|
||||
SamplesPresent: samplesPresent.Int64,
|
||||
AvgIsPresent: avgIsPresent.Float64,
|
||||
SnapshotTime: snapshotTime.Int64,
|
||||
})
|
||||
}
|
||||
rows.Close()
|
||||
}
|
||||
|
||||
response := models.DailyCreationDiagnosticsResponse{
|
||||
Status: "OK",
|
||||
Date: parsed.Format("2006-01-02"),
|
||||
Table: tableName,
|
||||
TotalRows: totalRows,
|
||||
MissingCreationCount: missingTotal,
|
||||
MissingCreationPct: missingPct,
|
||||
AvgIsPresentLtOneCount: avgIsPresentLtOne,
|
||||
MissingCreationPartialCount: missingPartialCount,
|
||||
MissingByVcenter: byVcenter,
|
||||
Samples: samples,
|
||||
MissingCreationPartialSamples: partialSamples,
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
json.NewEncoder(w).Encode(response)
|
||||
}
|
||||
71
server/handler/encryptData.go
Normal file
71
server/handler/encryptData.go
Normal file
@@ -0,0 +1,71 @@
|
||||
package handler
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// EncryptData encrypts a plaintext value and returns the ciphertext.
|
||||
// @Summary Encrypt data
|
||||
// @Description Encrypts a plaintext value and returns the ciphertext.
|
||||
// @Tags crypto
|
||||
// @Accept json
|
||||
// @Produce json
|
||||
// @Param payload body map[string]string true "Plaintext payload"
|
||||
// @Success 200 {object} models.StatusMessageResponse "Ciphertext response"
|
||||
// @Failure 500 {object} models.ErrorResponse "Server error"
|
||||
// @Router /api/encrypt [post]
|
||||
func (h *Handler) EncryptData(w http.ResponseWriter, r *http.Request) {
|
||||
//ctx := context.Background()
|
||||
var cipherText string
|
||||
|
||||
reqBody, err := io.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
h.Logger.Error("Invalid data received", "error", err)
|
||||
fmt.Fprintf(w, "Invalid data received")
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
return
|
||||
} else {
|
||||
h.Logger.Debug("received input data", "length", len(reqBody))
|
||||
}
|
||||
|
||||
// get the json input
|
||||
var input map[string]string
|
||||
if err := json.Unmarshal(reqBody, &input); err != nil {
|
||||
h.Logger.Error("unable to unmarshal json", "error", err)
|
||||
prettyPrint(reqBody)
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
json.NewEncoder(w).Encode(map[string]string{
|
||||
"status": "ERROR",
|
||||
"message": fmt.Sprintf("Unable to unmarshal JSON in request body: '%s'", err),
|
||||
})
|
||||
return
|
||||
} else {
|
||||
h.Logger.Debug("successfully decoded JSON")
|
||||
//prettyPrint(input)
|
||||
}
|
||||
|
||||
//cipher, err := h.Secret.Encrypt()
|
||||
for k := range input {
|
||||
//h.Logger.Debug("foo", "key", k, "value", input[k])
|
||||
cipherText, err = h.Secret.Encrypt([]byte(input[k]))
|
||||
if err != nil {
|
||||
h.Logger.Error("Unable to encrypt", "error", err)
|
||||
} else {
|
||||
h.Logger.Debug("Encrypted plaintext", "length", len(input[k]), "ciphertext", cipherText)
|
||||
w.WriteHeader(http.StatusOK)
|
||||
json.NewEncoder(w).Encode(map[string]string{
|
||||
"status": "OK",
|
||||
"message": cipherText,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// return the result
|
||||
|
||||
}
|
||||
@@ -1,12 +1,11 @@
|
||||
package handler
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"vctp/db"
|
||||
|
||||
"github.com/a-h/templ"
|
||||
"vctp/internal/secrets"
|
||||
"vctp/internal/settings"
|
||||
"vctp/internal/vcenter"
|
||||
)
|
||||
|
||||
// Handler handles requests.
|
||||
@@ -16,13 +15,7 @@ type Handler struct {
|
||||
BuildTime string
|
||||
SHA1Ver string
|
||||
GoVersion string
|
||||
}
|
||||
|
||||
func (h *Handler) html(ctx context.Context, w http.ResponseWriter, status int, t templ.Component) {
|
||||
w.Header().Set("Content-Type", "text/html; charset=utf-8")
|
||||
w.WriteHeader(status)
|
||||
|
||||
if err := t.Render(ctx, w); err != nil {
|
||||
h.Logger.Error("Failed to render component", "error", err)
|
||||
}
|
||||
VcCreds *vcenter.VcenterLogin
|
||||
Secret *secrets.Secrets
|
||||
Settings *settings.Settings
|
||||
}
|
||||
|
||||
@@ -5,7 +5,14 @@ import (
|
||||
"vctp/components/views"
|
||||
)
|
||||
|
||||
// Home handles the home page.
|
||||
// Home renders the web UI home page.
|
||||
// @Summary Home page
|
||||
// @Description Renders the main UI page.
|
||||
// @Tags ui
|
||||
// @Produce text/html
|
||||
// @Success 200 {string} string "HTML page"
|
||||
// @Failure 500 {string} string "Render failed"
|
||||
// @Router / [get]
|
||||
func (h *Handler) Home(w http.ResponseWriter, r *http.Request) {
|
||||
//h.html(r.Context(), w, http.StatusOK, core.HTML("Example Site", home.Home()))
|
||||
|
||||
|
||||
17
server/handler/metrics.go
Normal file
17
server/handler/metrics.go
Normal file
@@ -0,0 +1,17 @@
|
||||
package handler
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"vctp/internal/metrics"
|
||||
)
|
||||
|
||||
// Metrics exposes Prometheus metrics.
|
||||
// @Summary Prometheus metrics
|
||||
// @Description Exposes Prometheus metrics for vctp.
|
||||
// @Tags metrics
|
||||
// @Produce plain
|
||||
// @Success 200 "Prometheus metrics"
|
||||
// @Router /metrics [get]
|
||||
func (h *Handler) Metrics(w http.ResponseWriter, r *http.Request) {
|
||||
metrics.Handler().ServeHTTP(w, r)
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user