diff --git a/.drone.yml b/.drone.yml index e2d5e55..000517a 100644 --- a/.drone.yml +++ b/.drone.yml @@ -26,16 +26,23 @@ steps: environment: CGO_ENABLED: 0 GOMODCACHE: '/drone/src/pkg.mod' - GOCACHE: '/drone/src/pkg.build' + GOCACHE: '/drone/src/pkg.build' + GOBIN: '/drone/src/pkg.tools' volumes: - name: shared path: /shared commands: - #- cp /shared/index.html ./www/ - #- go install github.com/sqlc-dev/sqlc/cmd/sqlc@latest - #- sqlc generate - - chmod +x .drone.sh - - ./.drone.sh + - export PATH=/drone/src/pkg.tools:$PATH + - go install github.com/a-h/templ/cmd/templ@latest + - go install github.com/sqlc-dev/sqlc/cmd/sqlc@latest + - go install github.com/swaggo/swag/cmd/swag@latest + - sqlc generate + - templ generate -path ./components + - swag init --exclude "pkg.mod,pkg.build,pkg.tools" -o server/router/docs + - chmod +x ./scripts/*.sh + - ./scripts/update-swagger-ui.sh + - ./scripts/drone.sh + - cp ./build/cbs-linux-amd64 /shared/ - name: dell-sftp-deploy image: hypervtechnics/drone-sftp @@ -78,4 +85,4 @@ volumes: temp: {} - name: cache host: - path: /var/lib/cache \ No newline at end of file + path: /var/lib/cache diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml deleted file mode 100644 index 9d69038..0000000 --- a/.github/workflows/ci.yml +++ /dev/null @@ -1,92 +0,0 @@ -name: CI -on: - push: - branches: - - main - paths-ignore: - - '.github/**' - pull_request: - branches: - - main -jobs: - lint: - name: Lint - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - uses: actions/setup-go@v5 - with: - go-version: 1.22.x - - run: go mod download - - run: go install github.com/a-h/templ/cmd/templ@v0.2.771 - - run: make generate-templ - - uses: sqlc-dev/setup-sqlc@v4 - with: - sqlc-version: '1.27.0' - - run: sqlc vet - - run: sqlc generate - - name: Lint - uses: golangci/golangci-lint-action@v3 - with: - version: v1.54 - skip-pkg-cache: true - test: - name: Test - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - uses: actions/setup-go@v5 - with: - go-version: 1.22.x - - run: go mod download - - run: go install github.com/a-h/templ/cmd/templ@v0.2.771 - - run: make generate-templ - - uses: sqlc-dev/setup-sqlc@v4 - with: - sqlc-version: '1.27.0' - - run: sqlc generate - - name: Test - run: go test -race ./... - e2e: - name: End-to-End - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - uses: actions/setup-go@v5 - with: - go-version: 1.22.x - - run: go mod download - - run: go install github.com/a-h/templ/cmd/templ@v0.2.771 - - run: templ generate -path ./components - - uses: sqlc-dev/setup-sqlc@v4 - with: - sqlc-version: '1.27.0' - - run: sqlc generate - - run: go test ./... -tags=e2e - docker-publish: - name: Publish Docker - runs-on: ubuntu-latest - permissions: - contents: read - packages: write - if: github.event_name == 'push' && github.ref == 'refs/heads/main' - needs: - - lint - - test - - e2e - steps: - - uses: actions/checkout@v4 - - uses: docker/login-action@v3 - with: - registry: ghcr.io - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - - uses: docker/metadata-action@v5 - id: meta - with: - images: ghcr.io/piszmog/vctp - - uses: docker/build-push-action@v5 - with: - push: true - tags: ${{ steps.meta.outputs.tags }} - labels: ${{ steps.meta.outputs.labels }} diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml deleted file mode 100644 index c083c8f..0000000 --- a/.github/workflows/release.yml +++ /dev/null @@ -1,77 +0,0 @@ -name: Release -on: - workflow_dispatch: - inputs: - version: - description: The version to release (e.g. v1.0.0) - required: true - type: string - -jobs: - release: - name: Release - permissions: - contents: write - runs-on: ubuntu-latest - steps: - - name: Checkout - uses: actions/checkout@v4 - - uses: actions/setup-go@v5 - with: - go-version: 1.22.x - - run: go mod download - - run: go install github.com/a-h/templ/cmd/templ@v0.2.771 - - name: Generate Templ Files - run: make generate-templ - - name: Generate CSS - run: | - curl -sLO https://github.com/tailwindlabs/tailwindcss/releases/latest/download/tailwindcss-linux-x64 - chmod +x tailwindcss-linux-x64 - mv tailwindcss-linux-x64 tailwindcss - ./tailwindcss -i ./styles/input.css -o ./dist/assets/css/output@${{ github.event.inputs.version }}.css --minify - - uses: sqlc-dev/setup-sqlc@v4 - with: - sqlc-version: '1.27.0' - - run: sqlc generate - - name: Build Application - run: go build -o ./app -ldflags="-s -w -X version.Value=${{ github.event.inputs.version }}" - - name: Create Tag - uses: piszmog/create-tag@v1 - with: - version: ${{ github.event.inputs.version }} - token: ${{ secrets.GITHUB_TOKEN }} - - name: Release - uses: softprops/action-gh-release@v2 - with: - name: ${{ github.event.inputs.version }} - tag_name: ${{ github.event.inputs.version }} - generate_release_notes: true - files: app - publish: - name: Publish Docker - runs-on: ubuntu-latest - permissions: - contents: read - packages: write - needs: - - release - steps: - - uses: actions/checkout@v4 - - uses: docker/login-action@v3 - with: - registry: ghcr.io - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - - uses: docker/metadata-action@v5 - id: meta - with: - images: ghcr.io/piszmog/my-app - tags: | - type=raw,value=${{ github.event.inputs.version }} - - uses: docker/build-push-action@v5 - with: - push: true - tags: ${{ steps.meta.outputs.tags }} - labels: ${{ steps.meta.outputs.labels }} - build-args: | - VERSION=$${{ github.event.inputs.version }} diff --git a/README.md b/README.md index ab7786b..283019b 100644 --- a/README.md +++ b/README.md @@ -100,7 +100,7 @@ This is where `templ` files live. Anything you want to render to the user goes h ### DB This is the directory that `sqlc` generates to. Update `queries.sql` to build -your database operations. +your database operations. The schema for `sqlc` lives in `db/schema.sql`. Once `queries.sql` is updated, run `make generate-sql` to update the generated models @@ -133,6 +133,12 @@ DB_DRIVER=postgres DB_URL=postgres://user:pass@localhost:5432/vctp?sslmode=disab PostgreSQL migrations live in `db/migrations_postgres`, while SQLite migrations remain in `db/migrations`. +#### Snapshot Retention +Hourly and daily snapshot table retention can be configured with environment variables: + +- `HOURLY_SNAPSHOT_MAX_AGE_DAYS` (default: 60) +- `DAILY_SNAPSHOT_MAX_AGE_MONTHS` (default: 12) + ### Dist This is where your assets live. Any Javascript, images, or styling needs to go in the diff --git a/components/core/footer_templ.go b/components/core/footer_templ.go index 7d4f27b..e973e82 100644 --- a/components/core/footer_templ.go +++ b/components/core/footer_templ.go @@ -1,6 +1,6 @@ // Code generated by templ - DO NOT EDIT. -// templ: version: v0.2.778 +// templ: version: v0.3.977 package core //lint:file-ignore SA4006 This context is only used if a nested component is present. @@ -29,11 +29,11 @@ func Footer() templ.Component { templ_7745c5c3_Var1 = templ.NopComponent } ctx = templ.ClearChildren(ctx) - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString("") + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 1, "") if templ_7745c5c3_Err != nil { return templ_7745c5c3_Err } - return templ_7745c5c3_Err + return nil }) } diff --git a/components/core/header_templ.go b/components/core/header_templ.go index 6367c81..993badd 100644 --- a/components/core/header_templ.go +++ b/components/core/header_templ.go @@ -1,6 +1,6 @@ // Code generated by templ - DO NOT EDIT. -// templ: version: v0.2.778 +// templ: version: v0.3.977 package core //lint:file-ignore SA4006 This context is only used if a nested component is present. @@ -31,24 +31,24 @@ func Header() templ.Component { templ_7745c5c3_Var1 = templ.NopComponent } ctx = templ.ClearChildren(ctx) - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString("vCTP APIvCTP API") + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 2, "\" rel=\"stylesheet\">") if templ_7745c5c3_Err != nil { return templ_7745c5c3_Err } - return templ_7745c5c3_Err + return nil }) } diff --git a/components/views/index_templ.go b/components/views/index_templ.go index f830e0e..7e67c0d 100644 --- a/components/views/index_templ.go +++ b/components/views/index_templ.go @@ -1,6 +1,6 @@ // Code generated by templ - DO NOT EDIT. -// templ: version: v0.2.778 +// templ: version: v0.3.977 package views //lint:file-ignore SA4006 This context is only used if a nested component is present. @@ -39,7 +39,7 @@ func Index(info BuildInfo) templ.Component { templ_7745c5c3_Var1 = templ.NopComponent } ctx = templ.ClearChildren(ctx) - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString("") + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 1, "") if templ_7745c5c3_Err != nil { return templ_7745c5c3_Err } @@ -47,46 +47,46 @@ func Index(info BuildInfo) templ.Component { if templ_7745c5c3_Err != nil { return templ_7745c5c3_Err } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString("

Build Information

Build Time: ") + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 2, "

Build Information

Build Time: ") if templ_7745c5c3_Err != nil { return templ_7745c5c3_Err } var templ_7745c5c3_Var2 string templ_7745c5c3_Var2, templ_7745c5c3_Err = templ.JoinStringErrs(info.BuildTime) if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/index.templ`, Line: 21, Col: 80} + return templ.Error{Err: templ_7745c5c3_Err, FileName: `components/views/index.templ`, Line: 21, Col: 80} } _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var2)) if templ_7745c5c3_Err != nil { return templ_7745c5c3_Err } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString("

SHA1 Version: ") + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 3, "

SHA1 Version: ") if templ_7745c5c3_Err != nil { return templ_7745c5c3_Err } var templ_7745c5c3_Var3 string templ_7745c5c3_Var3, templ_7745c5c3_Err = templ.JoinStringErrs(info.SHA1Ver) if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/index.templ`, Line: 22, Col: 80} + return templ.Error{Err: templ_7745c5c3_Err, FileName: `components/views/index.templ`, Line: 22, Col: 80} } _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var3)) if templ_7745c5c3_Err != nil { return templ_7745c5c3_Err } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString("

Go Runtime Version: ") + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 4, "

Go Runtime Version: ") if templ_7745c5c3_Err != nil { return templ_7745c5c3_Err } var templ_7745c5c3_Var4 string templ_7745c5c3_Var4, templ_7745c5c3_Err = templ.JoinStringErrs(info.GoVersion) if templ_7745c5c3_Err != nil { - return templ.Error{Err: templ_7745c5c3_Err, FileName: `views/index.templ`, Line: 23, Col: 88} + return templ.Error{Err: templ_7745c5c3_Err, FileName: `components/views/index.templ`, Line: 23, Col: 88} } _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var4)) if templ_7745c5c3_Err != nil { return templ_7745c5c3_Err } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString("

") + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 5, "

") if templ_7745c5c3_Err != nil { return templ_7745c5c3_Err } @@ -94,11 +94,11 @@ func Index(info BuildInfo) templ.Component { if templ_7745c5c3_Err != nil { return templ_7745c5c3_Err } - _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString("") + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 6, "") if templ_7745c5c3_Err != nil { return templ_7745c5c3_Err } - return templ_7745c5c3_Err + return nil }) } diff --git a/components/views/snapshots.templ b/components/views/snapshots.templ new file mode 100644 index 0000000..0d8b932 --- /dev/null +++ b/components/views/snapshots.templ @@ -0,0 +1,47 @@ +package views + +import ( + "vctp/components/core" +) + +type SnapshotEntry struct { + Label string + Link string +} + +templ SnapshotHourlyList(entries []SnapshotEntry) { + @SnapshotListPage("Hourly Inventory Snapshots", "inventory snapshots captured hourly", entries) +} + +templ SnapshotDailyList(entries []SnapshotEntry) { + @SnapshotListPage("Daily Inventory Snapshots", "daily summaries of hourly inventory snapshots", entries) +} + +templ SnapshotMonthlyList(entries []SnapshotEntry) { + @SnapshotListPage("Monthly Inventory Snapshots", "monthly summary aggregated from daily snapshots", entries) +} + +templ SnapshotListPage(title string, subtitle string, entries []SnapshotEntry) { + + + @core.Header() + +
+
+

{title}

+

{subtitle}

+ +
+
+ + @core.Footer() + +} diff --git a/components/views/snapshots_templ.go b/components/views/snapshots_templ.go new file mode 100644 index 0000000..f11a2a8 --- /dev/null +++ b/components/views/snapshots_templ.go @@ -0,0 +1,214 @@ +// Code generated by templ - DO NOT EDIT. + +// templ: version: v0.3.977 +package views + +//lint:file-ignore SA4006 This context is only used if a nested component is present. + +import "github.com/a-h/templ" +import templruntime "github.com/a-h/templ/runtime" + +import ( + "vctp/components/core" +) + +type SnapshotEntry struct { + Label string + Link string +} + +func SnapshotHourlyList(entries []SnapshotEntry) templ.Component { + return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) { + templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context + if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil { + return templ_7745c5c3_CtxErr + } + templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W) + if !templ_7745c5c3_IsBuffer { + defer func() { + templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer) + if templ_7745c5c3_Err == nil { + templ_7745c5c3_Err = templ_7745c5c3_BufErr + } + }() + } + ctx = templ.InitializeContext(ctx) + templ_7745c5c3_Var1 := templ.GetChildren(ctx) + if templ_7745c5c3_Var1 == nil { + templ_7745c5c3_Var1 = templ.NopComponent + } + ctx = templ.ClearChildren(ctx) + templ_7745c5c3_Err = SnapshotListPage("Hourly Inventory Snapshots", "inventory snapshots captured hourly", entries).Render(ctx, templ_7745c5c3_Buffer) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + return nil + }) +} + +func SnapshotDailyList(entries []SnapshotEntry) templ.Component { + return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) { + templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context + if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil { + return templ_7745c5c3_CtxErr + } + templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W) + if !templ_7745c5c3_IsBuffer { + defer func() { + templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer) + if templ_7745c5c3_Err == nil { + templ_7745c5c3_Err = templ_7745c5c3_BufErr + } + }() + } + ctx = templ.InitializeContext(ctx) + templ_7745c5c3_Var2 := templ.GetChildren(ctx) + if templ_7745c5c3_Var2 == nil { + templ_7745c5c3_Var2 = templ.NopComponent + } + ctx = templ.ClearChildren(ctx) + templ_7745c5c3_Err = SnapshotListPage("Daily Inventory Snapshots", "daily summaries of hourly inventory snapshots", entries).Render(ctx, templ_7745c5c3_Buffer) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + return nil + }) +} + +func SnapshotMonthlyList(entries []SnapshotEntry) templ.Component { + return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) { + templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context + if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil { + return templ_7745c5c3_CtxErr + } + templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W) + if !templ_7745c5c3_IsBuffer { + defer func() { + templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer) + if templ_7745c5c3_Err == nil { + templ_7745c5c3_Err = templ_7745c5c3_BufErr + } + }() + } + ctx = templ.InitializeContext(ctx) + templ_7745c5c3_Var3 := templ.GetChildren(ctx) + if templ_7745c5c3_Var3 == nil { + templ_7745c5c3_Var3 = templ.NopComponent + } + ctx = templ.ClearChildren(ctx) + templ_7745c5c3_Err = SnapshotListPage("Monthly Inventory Snapshots", "monthly summary aggregated from daily snapshots", entries).Render(ctx, templ_7745c5c3_Buffer) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + return nil + }) +} + +func SnapshotListPage(title string, subtitle string, entries []SnapshotEntry) templ.Component { + return templruntime.GeneratedTemplate(func(templ_7745c5c3_Input templruntime.GeneratedComponentInput) (templ_7745c5c3_Err error) { + templ_7745c5c3_W, ctx := templ_7745c5c3_Input.Writer, templ_7745c5c3_Input.Context + if templ_7745c5c3_CtxErr := ctx.Err(); templ_7745c5c3_CtxErr != nil { + return templ_7745c5c3_CtxErr + } + templ_7745c5c3_Buffer, templ_7745c5c3_IsBuffer := templruntime.GetBuffer(templ_7745c5c3_W) + if !templ_7745c5c3_IsBuffer { + defer func() { + templ_7745c5c3_BufErr := templruntime.ReleaseBuffer(templ_7745c5c3_Buffer) + if templ_7745c5c3_Err == nil { + templ_7745c5c3_Err = templ_7745c5c3_BufErr + } + }() + } + ctx = templ.InitializeContext(ctx) + templ_7745c5c3_Var4 := templ.GetChildren(ctx) + if templ_7745c5c3_Var4 == nil { + templ_7745c5c3_Var4 = templ.NopComponent + } + ctx = templ.ClearChildren(ctx) + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 1, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = core.Header().Render(ctx, templ_7745c5c3_Buffer) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 2, "

") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var5 string + templ_7745c5c3_Var5, templ_7745c5c3_Err = templ.JoinStringErrs(title) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `components/views/snapshots.templ`, Line: 31, Col: 42} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var5)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 3, "

") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + var templ_7745c5c3_Var6 string + templ_7745c5c3_Var6, templ_7745c5c3_Err = templ.JoinStringErrs(subtitle) + if templ_7745c5c3_Err != nil { + return templ.Error{Err: templ_7745c5c3_Err, FileName: `components/views/snapshots.templ`, Line: 32, Col: 44} + } + _, templ_7745c5c3_Err = templ_7745c5c3_Buffer.WriteString(templ.EscapeString(templ_7745c5c3_Var6)) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 4, "

") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = core.Footer().Render(ctx, templ_7745c5c3_Buffer) + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + templ_7745c5c3_Err = templruntime.WriteString(templ_7745c5c3_Buffer, 9, "") + if templ_7745c5c3_Err != nil { + return templ_7745c5c3_Err + } + return nil + }) +} + +var _ = templruntime.GeneratedTemplate diff --git a/db/migrations/20240927002029_change_inventory.sql b/db/migrations/20240927002029_change_inventory.sql index 4638649..5e2341b 100644 --- a/db/migrations/20240927002029_change_inventory.sql +++ b/db/migrations/20240927002029_change_inventory.sql @@ -6,17 +6,17 @@ ALTER TABLE "Inventory" RENAME COLUMN SrmPlaceholder TO SrmPlaceholder_old; ALTER TABLE "Inventory" ADD COLUMN IsTemplate TEXT NOT NULL DEFAULT "FALSE"; ALTER TABLE "Inventory" ADD COLUMN PoweredOn TEXT NOT NULL DEFAULT "FALSE"; ALTER TABLE "Inventory" ADD COLUMN SrmPlaceholder TEXT NOT NULL DEFAULT "FALSE"; -UPDATE Inventory +UPDATE "Inventory" SET IsTemplate = CASE WHEN IsTemplate_old = 1 THEN 'TRUE' ELSE 'FALSE' END; -UPDATE Inventory +UPDATE "Inventory" SET PoweredOn = CASE WHEN PowerState_old = 1 THEN 'TRUE' ELSE 'FALSE' END; -UPDATE Inventory +UPDATE "Inventory" SET SrmPlaceholder = CASE WHEN SrmPlaceholder_old = 1 THEN 'TRUE' ELSE 'FALSE' @@ -35,17 +35,17 @@ ALTER TABLE "Inventory" RENAME COLUMN SrmPlaceholder TO SrmPlaceholder_old; ALTER TABLE "Inventory" ADD COLUMN IsTemplate INTEGER; ALTER TABLE "Inventory" ADD COLUMN PowerState INTEGER; ALTER TABLE "Inventory" ADD COLUMN SrmPlaceholder INTEGER; -UPDATE Inventory +UPDATE "Inventory" SET IsTemplate = CASE WHEN IsTemplate_old = 'TRUE' THEN 1 ELSE 0 END; -UPDATE Inventory +UPDATE "Inventory" SET PowerState = CASE WHEN PoweredOn_old = 'TRUE' THEN 1 ELSE 0 END; -UPDATE Inventory +UPDATE "Inventory" SET SrmPlaceholder = CASE WHEN SrmPlaceholder_old = 'TRUE' THEN 1 ELSE 0 diff --git a/db/queries/db.go b/db/queries/db.go index 1cbab90..85679b3 100644 --- a/db/queries/db.go +++ b/db/queries/db.go @@ -1,6 +1,6 @@ // Code generated by sqlc. DO NOT EDIT. // versions: -// sqlc v1.27.0 +// sqlc v1.29.0 package queries diff --git a/db/queries/models.go b/db/queries/models.go index 22229ab..b39c939 100644 --- a/db/queries/models.go +++ b/db/queries/models.go @@ -1,6 +1,6 @@ // Code generated by sqlc. DO NOT EDIT. // versions: -// sqlc v1.27.0 +// sqlc v1.29.0 package queries @@ -8,7 +8,7 @@ import ( "database/sql" ) -type Events struct { +type Event struct { Eid int64 CloudId string Source string @@ -60,7 +60,7 @@ type InventoryHistory struct { PreviousProvisionedDisk sql.NullFloat64 } -type Updates struct { +type Update struct { Uid int64 InventoryId sql.NullInt64 UpdateTime sql.NullInt64 diff --git a/db/queries/query.sql b/db/queries/query.sql index 428d312..654c348 100644 --- a/db/queries/query.sql +++ b/db/queries/query.sql @@ -1,37 +1,37 @@ -- name: ListInventory :many -SELECT * FROM "Inventory" +SELECT * FROM inventory ORDER BY "Name"; -- name: GetReportInventory :many -SELECT * FROM "Inventory" +SELECT * FROM inventory ORDER BY "CreationTime"; -- name: GetInventoryByName :many -SELECT * FROM "Inventory" +SELECT * FROM inventory WHERE "Name" = ?; -- name: GetInventoryByVcenter :many -SELECT * FROM "Inventory" +SELECT * FROM inventory WHERE "Vcenter" = ?; -- name: GetInventoryVmId :one -SELECT * FROM "Inventory" +SELECT * FROM inventory WHERE "VmId" = sqlc.arg('vmId') AND "Datacenter" = sqlc.arg('datacenterName'); -- name: GetInventoryVmUuid :one -SELECT * FROM "Inventory" +SELECT * FROM inventory WHERE "VmUuid" = sqlc.arg('vmUuid') AND "Datacenter" = sqlc.arg('datacenterName'); -- name: GetInventoryVcUrl :many -SELECT * FROM "Inventory" +SELECT * FROM inventory WHERE "Vcenter" = sqlc.arg('vc'); -- name: GetInventoryEventId :one -SELECT * FROM "Inventory" +SELECT * FROM inventory WHERE "CloudId" = ? LIMIT 1; -- name: CreateInventory :one -INSERT INTO "Inventory" ( +INSERT INTO inventory ( "Name", "Vcenter", "VmId", "VmUuid", "EventKey", "CloudId", "CreationTime", "ResourcePool", "VmType", "IsTemplate", "Datacenter", "Cluster", "Folder", "ProvisionedDisk", "InitialVcpus", "InitialRam", "SrmPlaceholder", "PoweredOn" ) VALUES( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? @@ -39,32 +39,32 @@ INSERT INTO "Inventory" ( RETURNING *; -- name: InventoryUpdate :exec -UPDATE "Inventory" +UPDATE inventory SET "VmUuid" = sqlc.arg('uuid'), "SrmPlaceholder" = sqlc.arg('srmPlaceholder') WHERE "Iid" = sqlc.arg('iid'); -- name: InventoryMarkDeleted :exec -UPDATE "Inventory" +UPDATE inventory SET "DeletionTime" = sqlc.arg('deletionTime') WHERE "VmId" = sqlc.arg('vmId') AND "Datacenter" = sqlc.arg('datacenterName'); -- name: InventoryCleanup :exec -DELETE FROM "Inventory" +DELETE FROM inventory WHERE "VmId" = sqlc.arg('vmId') AND "Datacenter" = sqlc.arg('datacenterName') RETURNING *; -- name: InventoryCleanupVcenter :exec -DELETE FROM "Inventory" +DELETE FROM inventory WHERE "Vcenter" = sqlc.arg('vc') RETURNING *; -- name: InventoryCleanupTemplates :exec -DELETE FROM "Inventory" +DELETE FROM inventory WHERE "IsTemplate" = 'TRUE' RETURNING *; -- name: CreateUpdate :one -INSERT INTO "Updates" ( +INSERT INTO updates ( "InventoryId", "Name", "EventKey", "EventId", "UpdateTime", "UpdateType", "NewVcpus", "NewRam", "NewResourcePool", "NewProvisionedDisk", "UserName", "PlaceholderChange", "RawChangeString" ) VALUES( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? @@ -72,25 +72,25 @@ INSERT INTO "Updates" ( RETURNING *; -- name: GetReportUpdates :many -SELECT * FROM "Updates" +SELECT * FROM updates ORDER BY "UpdateTime"; -- name: GetVmUpdates :many -SELECT * FROM "Updates" +SELECT * FROM updates WHERE "UpdateType" = sqlc.arg('updateType') AND "InventoryId" = sqlc.arg('InventoryId'); -- name: CleanupUpdates :exec -DELETE FROM "Updates" +DELETE FROM updates WHERE "UpdateType" = sqlc.arg('updateType') AND "UpdateTime" <= sqlc.arg('updateTime') RETURNING *; -- name: CleanupUpdatesNullVm :exec -DELETE FROM "Updates" +DELETE FROM updates WHERE "InventoryId" IS NULL RETURNING *; -- name: CreateEvent :one -INSERT INTO "Events" ( +INSERT INTO events ( "CloudId", "Source", "EventTime", "ChainId", "VmId", "VmName", "EventType", "EventKey", "DatacenterId", "DatacenterName", "ComputeResourceId", "ComputeResourceName", "UserName" ) VALUES( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? @@ -98,22 +98,22 @@ INSERT INTO "Events" ( RETURNING *; -- name: ListEvents :many -SELECT * FROM "Events" +SELECT * FROM events ORDER BY "EventTime"; -- name: ListUnprocessedEvents :many -SELECT * FROM "Events" +SELECT * FROM events WHERE "Processed" = 0 AND "EventTime" > sqlc.arg('eventTime') ORDER BY "EventTime"; -- name: UpdateEventsProcessed :exec -UPDATE "Events" +UPDATE events SET "Processed" = 1 WHERE "Eid" = sqlc.arg('eid'); -- name: CreateInventoryHistory :one -INSERT INTO "InventoryHistory" ( +INSERT INTO inventory_history ( "InventoryId", "ReportDate", "UpdateTime", "PreviousVcpus", "PreviousRam", "PreviousResourcePool", "PreviousProvisionedDisk" ) VALUES( ?, ?, ?, ?, ?, ?, ? diff --git a/db/queries/query.sql.go b/db/queries/query.sql.go index 104071c..6a0339b 100644 --- a/db/queries/query.sql.go +++ b/db/queries/query.sql.go @@ -1,6 +1,6 @@ // Code generated by sqlc. DO NOT EDIT. // versions: -// sqlc v1.27.0 +// sqlc v1.29.0 // source: query.sql package queries @@ -11,7 +11,7 @@ import ( ) const cleanupUpdates = `-- name: CleanupUpdates :exec -DELETE FROM "Updates" +DELETE FROM updates WHERE "UpdateType" = ?1 AND "UpdateTime" <= ?2 RETURNING Uid, InventoryId, UpdateTime, UpdateType, NewVcpus, NewRam, NewResourcePool, EventKey, EventId, NewProvisionedDisk, UserName, PlaceholderChange, Name, RawChangeString ` @@ -27,7 +27,7 @@ func (q *Queries) CleanupUpdates(ctx context.Context, arg CleanupUpdatesParams) } const cleanupUpdatesNullVm = `-- name: CleanupUpdatesNullVm :exec -DELETE FROM "Updates" +DELETE FROM updates WHERE "InventoryId" IS NULL RETURNING Uid, InventoryId, UpdateTime, UpdateType, NewVcpus, NewRam, NewResourcePool, EventKey, EventId, NewProvisionedDisk, UserName, PlaceholderChange, Name, RawChangeString ` @@ -38,7 +38,7 @@ func (q *Queries) CleanupUpdatesNullVm(ctx context.Context) error { } const createEvent = `-- name: CreateEvent :one -INSERT INTO "Events" ( +INSERT INTO events ( "CloudId", "Source", "EventTime", "ChainId", "VmId", "VmName", "EventType", "EventKey", "DatacenterId", "DatacenterName", "ComputeResourceId", "ComputeResourceName", "UserName" ) VALUES( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? @@ -62,7 +62,7 @@ type CreateEventParams struct { UserName sql.NullString } -func (q *Queries) CreateEvent(ctx context.Context, arg CreateEventParams) (Events, error) { +func (q *Queries) CreateEvent(ctx context.Context, arg CreateEventParams) (Event, error) { row := q.db.QueryRowContext(ctx, createEvent, arg.CloudId, arg.Source, @@ -78,7 +78,7 @@ func (q *Queries) CreateEvent(ctx context.Context, arg CreateEventParams) (Event arg.ComputeResourceName, arg.UserName, ) - var i Events + var i Event err := row.Scan( &i.Eid, &i.CloudId, @@ -100,7 +100,7 @@ func (q *Queries) CreateEvent(ctx context.Context, arg CreateEventParams) (Event } const createInventory = `-- name: CreateInventory :one -INSERT INTO "Inventory" ( +INSERT INTO inventory ( "Name", "Vcenter", "VmId", "VmUuid", "EventKey", "CloudId", "CreationTime", "ResourcePool", "VmType", "IsTemplate", "Datacenter", "Cluster", "Folder", "ProvisionedDisk", "InitialVcpus", "InitialRam", "SrmPlaceholder", "PoweredOn" ) VALUES( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? @@ -177,7 +177,7 @@ func (q *Queries) CreateInventory(ctx context.Context, arg CreateInventoryParams } const createInventoryHistory = `-- name: CreateInventoryHistory :one -INSERT INTO "InventoryHistory" ( +INSERT INTO inventory_history ( "InventoryId", "ReportDate", "UpdateTime", "PreviousVcpus", "PreviousRam", "PreviousResourcePool", "PreviousProvisionedDisk" ) VALUES( ?, ?, ?, ?, ?, ?, ? @@ -220,7 +220,7 @@ func (q *Queries) CreateInventoryHistory(ctx context.Context, arg CreateInventor } const createUpdate = `-- name: CreateUpdate :one -INSERT INTO "Updates" ( +INSERT INTO updates ( "InventoryId", "Name", "EventKey", "EventId", "UpdateTime", "UpdateType", "NewVcpus", "NewRam", "NewResourcePool", "NewProvisionedDisk", "UserName", "PlaceholderChange", "RawChangeString" ) VALUES( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? @@ -244,7 +244,7 @@ type CreateUpdateParams struct { RawChangeString []byte } -func (q *Queries) CreateUpdate(ctx context.Context, arg CreateUpdateParams) (Updates, error) { +func (q *Queries) CreateUpdate(ctx context.Context, arg CreateUpdateParams) (Update, error) { row := q.db.QueryRowContext(ctx, createUpdate, arg.InventoryId, arg.Name, @@ -260,7 +260,7 @@ func (q *Queries) CreateUpdate(ctx context.Context, arg CreateUpdateParams) (Upd arg.PlaceholderChange, arg.RawChangeString, ) - var i Updates + var i Update err := row.Scan( &i.Uid, &i.InventoryId, @@ -281,7 +281,7 @@ func (q *Queries) CreateUpdate(ctx context.Context, arg CreateUpdateParams) (Upd } const getInventoryByName = `-- name: GetInventoryByName :many -SELECT Iid, Name, Vcenter, VmId, EventKey, CloudId, CreationTime, DeletionTime, ResourcePool, VmType, Datacenter, Cluster, Folder, ProvisionedDisk, InitialVcpus, InitialRam, IsTemplate, PoweredOn, SrmPlaceholder, VmUuid FROM "Inventory" +SELECT Iid, Name, Vcenter, VmId, EventKey, CloudId, CreationTime, DeletionTime, ResourcePool, VmType, Datacenter, Cluster, Folder, ProvisionedDisk, InitialVcpus, InitialRam, IsTemplate, PoweredOn, SrmPlaceholder, VmUuid FROM inventory WHERE "Name" = ? ` @@ -330,7 +330,7 @@ func (q *Queries) GetInventoryByName(ctx context.Context, name string) ([]Invent } const getInventoryByVcenter = `-- name: GetInventoryByVcenter :many -SELECT Iid, Name, Vcenter, VmId, EventKey, CloudId, CreationTime, DeletionTime, ResourcePool, VmType, Datacenter, Cluster, Folder, ProvisionedDisk, InitialVcpus, InitialRam, IsTemplate, PoweredOn, SrmPlaceholder, VmUuid FROM "Inventory" +SELECT Iid, Name, Vcenter, VmId, EventKey, CloudId, CreationTime, DeletionTime, ResourcePool, VmType, Datacenter, Cluster, Folder, ProvisionedDisk, InitialVcpus, InitialRam, IsTemplate, PoweredOn, SrmPlaceholder, VmUuid FROM inventory WHERE "Vcenter" = ? ` @@ -379,7 +379,7 @@ func (q *Queries) GetInventoryByVcenter(ctx context.Context, vcenter string) ([] } const getInventoryEventId = `-- name: GetInventoryEventId :one -SELECT Iid, Name, Vcenter, VmId, EventKey, CloudId, CreationTime, DeletionTime, ResourcePool, VmType, Datacenter, Cluster, Folder, ProvisionedDisk, InitialVcpus, InitialRam, IsTemplate, PoweredOn, SrmPlaceholder, VmUuid FROM "Inventory" +SELECT Iid, Name, Vcenter, VmId, EventKey, CloudId, CreationTime, DeletionTime, ResourcePool, VmType, Datacenter, Cluster, Folder, ProvisionedDisk, InitialVcpus, InitialRam, IsTemplate, PoweredOn, SrmPlaceholder, VmUuid FROM inventory WHERE "CloudId" = ? LIMIT 1 ` @@ -412,7 +412,7 @@ func (q *Queries) GetInventoryEventId(ctx context.Context, cloudid sql.NullStrin } const getInventoryVcUrl = `-- name: GetInventoryVcUrl :many -SELECT Iid, Name, Vcenter, VmId, EventKey, CloudId, CreationTime, DeletionTime, ResourcePool, VmType, Datacenter, Cluster, Folder, ProvisionedDisk, InitialVcpus, InitialRam, IsTemplate, PoweredOn, SrmPlaceholder, VmUuid FROM "Inventory" +SELECT Iid, Name, Vcenter, VmId, EventKey, CloudId, CreationTime, DeletionTime, ResourcePool, VmType, Datacenter, Cluster, Folder, ProvisionedDisk, InitialVcpus, InitialRam, IsTemplate, PoweredOn, SrmPlaceholder, VmUuid FROM inventory WHERE "Vcenter" = ?1 ` @@ -461,7 +461,7 @@ func (q *Queries) GetInventoryVcUrl(ctx context.Context, vc string) ([]Inventory } const getInventoryVmId = `-- name: GetInventoryVmId :one -SELECT Iid, Name, Vcenter, VmId, EventKey, CloudId, CreationTime, DeletionTime, ResourcePool, VmType, Datacenter, Cluster, Folder, ProvisionedDisk, InitialVcpus, InitialRam, IsTemplate, PoweredOn, SrmPlaceholder, VmUuid FROM "Inventory" +SELECT Iid, Name, Vcenter, VmId, EventKey, CloudId, CreationTime, DeletionTime, ResourcePool, VmType, Datacenter, Cluster, Folder, ProvisionedDisk, InitialVcpus, InitialRam, IsTemplate, PoweredOn, SrmPlaceholder, VmUuid FROM inventory WHERE "VmId" = ?1 AND "Datacenter" = ?2 ` @@ -499,7 +499,7 @@ func (q *Queries) GetInventoryVmId(ctx context.Context, arg GetInventoryVmIdPara } const getInventoryVmUuid = `-- name: GetInventoryVmUuid :one -SELECT Iid, Name, Vcenter, VmId, EventKey, CloudId, CreationTime, DeletionTime, ResourcePool, VmType, Datacenter, Cluster, Folder, ProvisionedDisk, InitialVcpus, InitialRam, IsTemplate, PoweredOn, SrmPlaceholder, VmUuid FROM "Inventory" +SELECT Iid, Name, Vcenter, VmId, EventKey, CloudId, CreationTime, DeletionTime, ResourcePool, VmType, Datacenter, Cluster, Folder, ProvisionedDisk, InitialVcpus, InitialRam, IsTemplate, PoweredOn, SrmPlaceholder, VmUuid FROM inventory WHERE "VmUuid" = ?1 AND "Datacenter" = ?2 ` @@ -537,7 +537,7 @@ func (q *Queries) GetInventoryVmUuid(ctx context.Context, arg GetInventoryVmUuid } const getReportInventory = `-- name: GetReportInventory :many -SELECT Iid, Name, Vcenter, VmId, EventKey, CloudId, CreationTime, DeletionTime, ResourcePool, VmType, Datacenter, Cluster, Folder, ProvisionedDisk, InitialVcpus, InitialRam, IsTemplate, PoweredOn, SrmPlaceholder, VmUuid FROM "Inventory" +SELECT Iid, Name, Vcenter, VmId, EventKey, CloudId, CreationTime, DeletionTime, ResourcePool, VmType, Datacenter, Cluster, Folder, ProvisionedDisk, InitialVcpus, InitialRam, IsTemplate, PoweredOn, SrmPlaceholder, VmUuid FROM inventory ORDER BY "CreationTime" ` @@ -586,19 +586,19 @@ func (q *Queries) GetReportInventory(ctx context.Context) ([]Inventory, error) { } const getReportUpdates = `-- name: GetReportUpdates :many -SELECT Uid, InventoryId, UpdateTime, UpdateType, NewVcpus, NewRam, NewResourcePool, EventKey, EventId, NewProvisionedDisk, UserName, PlaceholderChange, Name, RawChangeString FROM "Updates" +SELECT Uid, InventoryId, UpdateTime, UpdateType, NewVcpus, NewRam, NewResourcePool, EventKey, EventId, NewProvisionedDisk, UserName, PlaceholderChange, Name, RawChangeString FROM updates ORDER BY "UpdateTime" ` -func (q *Queries) GetReportUpdates(ctx context.Context) ([]Updates, error) { +func (q *Queries) GetReportUpdates(ctx context.Context) ([]Update, error) { rows, err := q.db.QueryContext(ctx, getReportUpdates) if err != nil { return nil, err } defer rows.Close() - var items []Updates + var items []Update for rows.Next() { - var i Updates + var i Update if err := rows.Scan( &i.Uid, &i.InventoryId, @@ -629,7 +629,7 @@ func (q *Queries) GetReportUpdates(ctx context.Context) ([]Updates, error) { } const getVmUpdates = `-- name: GetVmUpdates :many -SELECT Uid, InventoryId, UpdateTime, UpdateType, NewVcpus, NewRam, NewResourcePool, EventKey, EventId, NewProvisionedDisk, UserName, PlaceholderChange, Name, RawChangeString FROM "Updates" +SELECT Uid, InventoryId, UpdateTime, UpdateType, NewVcpus, NewRam, NewResourcePool, EventKey, EventId, NewProvisionedDisk, UserName, PlaceholderChange, Name, RawChangeString FROM updates WHERE "UpdateType" = ?1 AND "InventoryId" = ?2 ` @@ -638,15 +638,15 @@ type GetVmUpdatesParams struct { InventoryId sql.NullInt64 } -func (q *Queries) GetVmUpdates(ctx context.Context, arg GetVmUpdatesParams) ([]Updates, error) { +func (q *Queries) GetVmUpdates(ctx context.Context, arg GetVmUpdatesParams) ([]Update, error) { rows, err := q.db.QueryContext(ctx, getVmUpdates, arg.UpdateType, arg.InventoryId) if err != nil { return nil, err } defer rows.Close() - var items []Updates + var items []Update for rows.Next() { - var i Updates + var i Update if err := rows.Scan( &i.Uid, &i.InventoryId, @@ -677,7 +677,7 @@ func (q *Queries) GetVmUpdates(ctx context.Context, arg GetVmUpdatesParams) ([]U } const inventoryCleanup = `-- name: InventoryCleanup :exec -DELETE FROM "Inventory" +DELETE FROM inventory WHERE "VmId" = ?1 AND "Datacenter" = ?2 RETURNING Iid, Name, Vcenter, VmId, EventKey, CloudId, CreationTime, DeletionTime, ResourcePool, VmType, Datacenter, Cluster, Folder, ProvisionedDisk, InitialVcpus, InitialRam, IsTemplate, PoweredOn, SrmPlaceholder, VmUuid ` @@ -693,7 +693,7 @@ func (q *Queries) InventoryCleanup(ctx context.Context, arg InventoryCleanupPara } const inventoryCleanupTemplates = `-- name: InventoryCleanupTemplates :exec -DELETE FROM "Inventory" +DELETE FROM inventory WHERE "IsTemplate" = 'TRUE' RETURNING Iid, Name, Vcenter, VmId, EventKey, CloudId, CreationTime, DeletionTime, ResourcePool, VmType, Datacenter, Cluster, Folder, ProvisionedDisk, InitialVcpus, InitialRam, IsTemplate, PoweredOn, SrmPlaceholder, VmUuid ` @@ -704,7 +704,7 @@ func (q *Queries) InventoryCleanupTemplates(ctx context.Context) error { } const inventoryCleanupVcenter = `-- name: InventoryCleanupVcenter :exec -DELETE FROM "Inventory" +DELETE FROM inventory WHERE "Vcenter" = ?1 RETURNING Iid, Name, Vcenter, VmId, EventKey, CloudId, CreationTime, DeletionTime, ResourcePool, VmType, Datacenter, Cluster, Folder, ProvisionedDisk, InitialVcpus, InitialRam, IsTemplate, PoweredOn, SrmPlaceholder, VmUuid ` @@ -715,7 +715,7 @@ func (q *Queries) InventoryCleanupVcenter(ctx context.Context, vc string) error } const inventoryMarkDeleted = `-- name: InventoryMarkDeleted :exec -UPDATE "Inventory" +UPDATE inventory SET "DeletionTime" = ?1 WHERE "VmId" = ?2 AND "Datacenter" = ?3 ` @@ -732,7 +732,7 @@ func (q *Queries) InventoryMarkDeleted(ctx context.Context, arg InventoryMarkDel } const inventoryUpdate = `-- name: InventoryUpdate :exec -UPDATE "Inventory" +UPDATE inventory SET "VmUuid" = ?1, "SrmPlaceholder" = ?2 WHERE "Iid" = ?3 ` @@ -749,19 +749,19 @@ func (q *Queries) InventoryUpdate(ctx context.Context, arg InventoryUpdateParams } const listEvents = `-- name: ListEvents :many -SELECT Eid, CloudId, Source, EventTime, ChainId, VmId, EventKey, DatacenterName, ComputeResourceName, UserName, Processed, DatacenterId, ComputeResourceId, VmName, EventType FROM "Events" +SELECT Eid, CloudId, Source, EventTime, ChainId, VmId, EventKey, DatacenterName, ComputeResourceName, UserName, Processed, DatacenterId, ComputeResourceId, VmName, EventType FROM events ORDER BY "EventTime" ` -func (q *Queries) ListEvents(ctx context.Context) ([]Events, error) { +func (q *Queries) ListEvents(ctx context.Context) ([]Event, error) { rows, err := q.db.QueryContext(ctx, listEvents) if err != nil { return nil, err } defer rows.Close() - var items []Events + var items []Event for rows.Next() { - var i Events + var i Event if err := rows.Scan( &i.Eid, &i.CloudId, @@ -793,7 +793,7 @@ func (q *Queries) ListEvents(ctx context.Context) ([]Events, error) { } const listInventory = `-- name: ListInventory :many -SELECT Iid, Name, Vcenter, VmId, EventKey, CloudId, CreationTime, DeletionTime, ResourcePool, VmType, Datacenter, Cluster, Folder, ProvisionedDisk, InitialVcpus, InitialRam, IsTemplate, PoweredOn, SrmPlaceholder, VmUuid FROM "Inventory" +SELECT Iid, Name, Vcenter, VmId, EventKey, CloudId, CreationTime, DeletionTime, ResourcePool, VmType, Datacenter, Cluster, Folder, ProvisionedDisk, InitialVcpus, InitialRam, IsTemplate, PoweredOn, SrmPlaceholder, VmUuid FROM inventory ORDER BY "Name" ` @@ -842,21 +842,21 @@ func (q *Queries) ListInventory(ctx context.Context) ([]Inventory, error) { } const listUnprocessedEvents = `-- name: ListUnprocessedEvents :many -SELECT Eid, CloudId, Source, EventTime, ChainId, VmId, EventKey, DatacenterName, ComputeResourceName, UserName, Processed, DatacenterId, ComputeResourceId, VmName, EventType FROM "Events" +SELECT Eid, CloudId, Source, EventTime, ChainId, VmId, EventKey, DatacenterName, ComputeResourceName, UserName, Processed, DatacenterId, ComputeResourceId, VmName, EventType FROM events WHERE "Processed" = 0 AND "EventTime" > ?1 ORDER BY "EventTime" ` -func (q *Queries) ListUnprocessedEvents(ctx context.Context, eventtime sql.NullInt64) ([]Events, error) { +func (q *Queries) ListUnprocessedEvents(ctx context.Context, eventtime sql.NullInt64) ([]Event, error) { rows, err := q.db.QueryContext(ctx, listUnprocessedEvents, eventtime) if err != nil { return nil, err } defer rows.Close() - var items []Events + var items []Event for rows.Next() { - var i Events + var i Event if err := rows.Scan( &i.Eid, &i.CloudId, @@ -888,7 +888,7 @@ func (q *Queries) ListUnprocessedEvents(ctx context.Context, eventtime sql.NullI } const updateEventsProcessed = `-- name: UpdateEventsProcessed :exec -UPDATE "Events" +UPDATE events SET "Processed" = 1 WHERE "Eid" = ?1 ` diff --git a/db/schema.sql b/db/schema.sql new file mode 100644 index 0000000..33f91ca --- /dev/null +++ b/db/schema.sql @@ -0,0 +1,68 @@ +CREATE TABLE IF NOT EXISTS inventory ( + "Iid" INTEGER PRIMARY KEY AUTOINCREMENT, + "Name" TEXT NOT NULL, + "Vcenter" TEXT NOT NULL, + "VmId" TEXT, + "EventKey" TEXT, + "CloudId" TEXT, + "CreationTime" INTEGER, + "DeletionTime" INTEGER, + "ResourcePool" TEXT, + "VmType" TEXT, + "Datacenter" TEXT, + "Cluster" TEXT, + "Folder" TEXT, + "ProvisionedDisk" REAL, + "InitialVcpus" INTEGER, + "InitialRam" INTEGER, + "IsTemplate" TEXT NOT NULL DEFAULT "FALSE", + "PoweredOn" TEXT NOT NULL DEFAULT "FALSE", + "SrmPlaceholder" TEXT NOT NULL DEFAULT "FALSE", + "VmUuid" TEXT +); + +CREATE TABLE IF NOT EXISTS updates ( + "Uid" INTEGER PRIMARY KEY AUTOINCREMENT, + "InventoryId" INTEGER, + "UpdateTime" INTEGER, + "UpdateType" TEXT NOT NULL, + "NewVcpus" INTEGER, + "NewRam" INTEGER, + "NewResourcePool" TEXT, + "EventKey" TEXT, + "EventId" TEXT, + "NewProvisionedDisk" REAL, + "UserName" TEXT, + "PlaceholderChange" TEXT, + "Name" TEXT, + "RawChangeString" BLOB +); + +CREATE TABLE IF NOT EXISTS events ( + "Eid" INTEGER PRIMARY KEY AUTOINCREMENT, + "CloudId" TEXT NOT NULL, + "Source" TEXT NOT NULL, + "EventTime" INTEGER, + "ChainId" TEXT NOT NULL, + "VmId" TEXT, + "EventKey" TEXT, + "DatacenterName" TEXT, + "ComputeResourceName" TEXT, + "UserName" TEXT, + "Processed" INTEGER NOT NULL DEFAULT 0, + "DatacenterId" TEXT, + "ComputeResourceId" TEXT, + "VmName" TEXT, + "EventType" TEXT +); + +CREATE TABLE IF NOT EXISTS inventory_history ( + "Hid" INTEGER PRIMARY KEY AUTOINCREMENT, + "InventoryId" INTEGER, + "ReportDate" INTEGER, + "UpdateTime" INTEGER, + "PreviousVcpus" INTEGER, + "PreviousRam" INTEGER, + "PreviousResourcePool" TEXT, + "PreviousProvisionedDisk" REAL +); diff --git a/internal/report/snapshots.go b/internal/report/snapshots.go new file mode 100644 index 0000000..23bc4c8 --- /dev/null +++ b/internal/report/snapshots.go @@ -0,0 +1,293 @@ +package report + +import ( + "bytes" + "context" + "database/sql" + "fmt" + "log/slog" + "strings" + "time" + "vctp/db" + + "github.com/jmoiron/sqlx" + "github.com/xuri/excelize/v2" +) + +func ListTablesByPrefix(ctx context.Context, database db.Database, prefix string) ([]string, error) { + dbConn := database.DB() + driver := strings.ToLower(dbConn.DriverName()) + pattern := prefix + "%" + + var rows *sqlx.Rows + var err error + + switch driver { + case "sqlite": + rows, err = dbConn.QueryxContext(ctx, ` +SELECT name +FROM sqlite_master +WHERE type = 'table' + AND name LIKE ? +ORDER BY name DESC +`, pattern) + case "pgx", "postgres": + rows, err = dbConn.QueryxContext(ctx, ` +SELECT tablename +FROM pg_catalog.pg_tables +WHERE schemaname = 'public' + AND tablename LIKE $1 +ORDER BY tablename DESC +`, pattern) + default: + return nil, fmt.Errorf("unsupported driver for listing tables: %s", driver) + } + + if err != nil { + return nil, err + } + defer rows.Close() + + tables := make([]string, 0) + for rows.Next() { + var name string + if err := rows.Scan(&name); err != nil { + return nil, err + } + tables = append(tables, name) + } + return tables, rows.Err() +} + +func FormatSnapshotLabel(prefix string, tableName string) (string, bool) { + if !strings.HasPrefix(tableName, prefix) { + return "", false + } + suffix := strings.TrimPrefix(tableName, prefix) + switch prefix { + case "inventory_daily_": + if t, err := time.Parse("20060102", suffix); err == nil { + return t.Format("2006-01-02"), true + } + case "inventory_daily_summary_": + if t, err := time.Parse("20060102", suffix); err == nil { + return t.Format("2006-01-02"), true + } + case "inventory_monthly_summary_": + if t, err := time.Parse("200601", suffix); err == nil { + return t.Format("2006-01"), true + } + } + return "", false +} + +func CreateTableReport(logger *slog.Logger, Database db.Database, ctx context.Context, tableName string) ([]byte, error) { + if err := validateTableName(tableName); err != nil { + return nil, err + } + + dbConn := Database.DB() + columns, err := tableColumns(ctx, dbConn, tableName) + if err != nil { + return nil, err + } + if len(columns) == 0 { + return nil, fmt.Errorf("no columns found for table %s", tableName) + } + + query := fmt.Sprintf(`SELECT * FROM %s`, tableName) + orderBy := snapshotOrderBy(columns) + if orderBy != "" { + query = fmt.Sprintf(`%s ORDER BY "%s" DESC`, query, orderBy) + } + + rows, err := dbConn.QueryxContext(ctx, query) + if err != nil { + return nil, err + } + defer rows.Close() + + sheetName := "Snapshot Report" + var buffer bytes.Buffer + + xlsx := excelize.NewFile() + if err := xlsx.SetSheetName("Sheet1", sheetName); err != nil { + return nil, err + } + if err := xlsx.SetDocProps(&excelize.DocProperties{ + Creator: "vctp", + Created: time.Now().Format(time.RFC3339), + }); err != nil { + logger.Error("Error setting document properties", "error", err, "sheet_name", sheetName) + } + + for i, columnName := range columns { + cell := fmt.Sprintf("%s1", string(rune('A'+i))) + xlsx.SetCellValue(sheetName, cell, columnName) + } + + if endCell, err := excelize.CoordinatesToCellName(len(columns), 1); err == nil { + filterRange := "A1:" + endCell + if err := xlsx.AutoFilter(sheetName, filterRange, nil); err != nil { + logger.Error("Error setting autofilter", "error", err) + } + } + + headerStyle, err := xlsx.NewStyle(&excelize.Style{ + Font: &excelize.Font{ + Bold: true, + }, + }) + if err == nil { + if err := xlsx.SetRowStyle(sheetName, 1, 1, headerStyle); err != nil { + logger.Error("Error setting header style", "error", err) + } + } + + rowIndex := 2 + for rows.Next() { + values, err := scanRowValues(rows, len(columns)) + if err != nil { + return nil, err + } + for colIndex, value := range values { + cell := fmt.Sprintf("%s%d", string(rune('A'+colIndex)), rowIndex) + xlsx.SetCellValue(sheetName, cell, normalizeCellValue(value)) + } + rowIndex++ + } + if err := rows.Err(); err != nil { + return nil, err + } + + if err := xlsx.SetPanes(sheetName, &excelize.Panes{ + Freeze: true, + Split: false, + XSplit: 0, + YSplit: 1, + TopLeftCell: "A2", + ActivePane: "bottomLeft", + Selection: []excelize.Selection{ + {SQRef: "A2", ActiveCell: "A2", Pane: "bottomLeft"}, + }, + }); err != nil { + logger.Error("Error freezing top row", "error", err) + } + + if err := xlsx.Write(&buffer); err != nil { + return nil, err + } + return buffer.Bytes(), nil +} + +func validateTableName(name string) error { + if name == "" { + return fmt.Errorf("table name is empty") + } + for _, r := range name { + if (r >= 'a' && r <= 'z') || (r >= '0' && r <= '9') || r == '_' { + continue + } + return fmt.Errorf("invalid table name: %s", name) + } + return nil +} + +func tableColumns(ctx context.Context, dbConn *sqlx.DB, tableName string) ([]string, error) { + driver := strings.ToLower(dbConn.DriverName()) + switch driver { + case "sqlite": + query := fmt.Sprintf(`PRAGMA table_info("%s")`, tableName) + rows, err := dbConn.QueryxContext(ctx, query) + if err != nil { + return nil, err + } + defer rows.Close() + + columns := make([]string, 0) + for rows.Next() { + var ( + cid int + name string + colType string + notNull int + defaultVal sql.NullString + pk int + ) + if err := rows.Scan(&cid, &name, &colType, ¬Null, &defaultVal, &pk); err != nil { + return nil, err + } + columns = append(columns, name) + } + return columns, rows.Err() + case "pgx", "postgres": + rows, err := dbConn.QueryxContext(ctx, ` +SELECT column_name +FROM information_schema.columns +WHERE table_schema = 'public' + AND table_name = $1 +ORDER BY ordinal_position +`, tableName) + if err != nil { + return nil, err + } + defer rows.Close() + + columns := make([]string, 0) + for rows.Next() { + var name string + if err := rows.Scan(&name); err != nil { + return nil, err + } + columns = append(columns, name) + } + return columns, rows.Err() + default: + return nil, fmt.Errorf("unsupported driver for table columns: %s", driver) + } +} + +func snapshotOrderBy(columns []string) string { + normalized := make(map[string]struct{}, len(columns)) + for _, col := range columns { + normalized[strings.ToLower(col)] = struct{}{} + } + if _, ok := normalized["snapshottime"]; ok { + return "SnapshotTime" + } + if _, ok := normalized["samplespresent"]; ok { + return "SamplesPresent" + } + if _, ok := normalized["avgispresent"]; ok { + return "AvgIsPresent" + } + if _, ok := normalized["name"]; ok { + return "Name" + } + return "" +} + +func scanRowValues(rows *sqlx.Rows, columnCount int) ([]interface{}, error) { + rawValues := make([]interface{}, columnCount) + scanArgs := make([]interface{}, columnCount) + for i := range rawValues { + scanArgs[i] = &rawValues[i] + } + if err := rows.Scan(scanArgs...); err != nil { + return nil, err + } + return rawValues, nil +} + +func normalizeCellValue(value interface{}) interface{} { + switch v := value.(type) { + case nil: + return "" + case []byte: + return string(v) + case time.Time: + return v.Format(time.RFC3339) + default: + return v + } +} diff --git a/internal/tasks/inventorySnapshots.go b/internal/tasks/inventorySnapshots.go new file mode 100644 index 0000000..d8dd816 --- /dev/null +++ b/internal/tasks/inventorySnapshots.go @@ -0,0 +1,674 @@ +package tasks + +import ( + "context" + "database/sql" + "fmt" + "log/slog" + "os" + "strconv" + "strings" + "time" + "vctp/db/queries" + "vctp/internal/report" + "vctp/internal/vcenter" + + "github.com/jmoiron/sqlx" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" +) + +type inventorySnapshotRow struct { + InventoryId sql.NullInt64 + Name string + Vcenter string + VmId sql.NullString + EventKey sql.NullString + CloudId sql.NullString + CreationTime sql.NullInt64 + DeletionTime sql.NullInt64 + ResourcePool sql.NullString + VmType sql.NullString + Datacenter sql.NullString + Cluster sql.NullString + Folder sql.NullString + ProvisionedDisk sql.NullFloat64 + InitialVcpus sql.NullInt64 + InitialRam sql.NullInt64 + IsTemplate string + PoweredOn string + SrmPlaceholder string + VmUuid sql.NullString + SnapshotTime int64 + IsPresent string +} + +// RunVcenterSnapshotHourly records hourly inventory snapshots into a daily table. +func (c *CronTask) RunVcenterSnapshotHourly(ctx context.Context, logger *slog.Logger) error { + startTime := time.Now() + tableName, err := dailyInventoryTableName(startTime) + if err != nil { + return err + } + + dbConn := c.Database.DB() + if err := ensureDailyInventoryTable(ctx, dbConn, tableName); err != nil { + return err + } + + // reload settings in case vcenter list has changed + c.Settings.ReadYMLSettings() + + for _, url := range c.Settings.Values.Settings.VcenterAddresses { + c.Logger.Debug("connecting to vcenter for hourly snapshot", "url", url) + vc := vcenter.New(c.Logger, c.VcCreds) + vc.Login(url) + + vcVms, err := vc.GetAllVmReferences() + if err != nil { + c.Logger.Error("unable to get VMs from vcenter", "error", err, "url", url) + vc.Logout() + continue + } + + inventoryRows, err := c.Database.Queries().GetInventoryByVcenter(ctx, url) + if err != nil { + c.Logger.Error("unable to query inventory table", "error", err, "url", url) + vc.Logout() + continue + } + + inventoryByVmID := make(map[string]queries.Inventory, len(inventoryRows)) + for _, inv := range inventoryRows { + if inv.VmId.Valid { + inventoryByVmID[inv.VmId.String] = inv + } + } + + presentSnapshots := make(map[string]inventorySnapshotRow, len(vcVms)) + for _, vm := range vcVms { + if strings.HasPrefix(vm.Name(), "vCLS-") { + continue + } + + vmObj, err := vc.ConvertObjToMoVM(vm) + if err != nil { + c.Logger.Error("failed to read VM details", "vm_id", vm.Reference().Value, "error", err) + continue + } + if vmObj.Config != nil && vmObj.Config.Template { + continue + } + + var inv *queries.Inventory + if existing, ok := inventoryByVmID[vm.Reference().Value]; ok { + existingCopy := existing + inv = &existingCopy + } + + row, err := snapshotFromVM(vmObj, vc, startTime, inv) + if err != nil { + c.Logger.Error("unable to build snapshot for VM", "vm_id", vm.Reference().Value, "error", err) + continue + } + row.IsPresent = "TRUE" + presentSnapshots[vm.Reference().Value] = row + } + + for _, row := range presentSnapshots { + if err := insertDailyInventoryRow(ctx, dbConn, tableName, row); err != nil { + c.Logger.Error("failed to insert hourly snapshot", "error", err, "vm_id", row.VmId.String) + } + } + + for _, inv := range inventoryRows { + vmID := inv.VmId.String + if vmID != "" { + if _, ok := presentSnapshots[vmID]; ok { + continue + } + } + + row := snapshotFromInventory(inv, startTime) + row.IsPresent = "FALSE" + if err := insertDailyInventoryRow(ctx, dbConn, tableName, row); err != nil { + c.Logger.Error("failed to insert missing VM snapshot", "error", err, "vm_id", row.VmId.String) + } + } + + vc.Logout() + } + + c.Logger.Debug("Finished hourly vcenter snapshot") + return nil +} + +// RunVcenterDailyAggregate summarizes hourly snapshots into a daily summary table. +func (c *CronTask) RunVcenterDailyAggregate(ctx context.Context, logger *slog.Logger) error { + targetTime := time.Now().Add(-time.Minute) + sourceTable, err := dailyInventoryTableName(targetTime) + if err != nil { + return err + } + summaryTable, err := dailySummaryTableName(targetTime) + if err != nil { + return err + } + + dbConn := c.Database.DB() + if err := ensureDailySummaryTable(ctx, dbConn, summaryTable); err != nil { + return err + } + + insertQuery := fmt.Sprintf(` +INSERT INTO %s ( + "InventoryId", "Name", "Vcenter", "VmId", "EventKey", "CloudId", "CreationTime", "DeletionTime", + "ResourcePool", "VmType", "Datacenter", "Cluster", "Folder", "ProvisionedDisk", "InitialVcpus", + "InitialRam", "IsTemplate", "PoweredOn", "SrmPlaceholder", "VmUuid", "SamplesPresent" +) +SELECT + "InventoryId", "Name", "Vcenter", "VmId", "EventKey", "CloudId", "CreationTime", "DeletionTime", + "ResourcePool", "VmType", "Datacenter", "Cluster", "Folder", "ProvisionedDisk", "InitialVcpus", + "InitialRam", "IsTemplate", "PoweredOn", "SrmPlaceholder", "VmUuid", + SUM(CASE WHEN "IsPresent" = 'TRUE' THEN 1 ELSE 0 END) AS "SamplesPresent" +FROM %s +GROUP BY + "InventoryId", "Name", "Vcenter", "VmId", "EventKey", "CloudId", "CreationTime", "DeletionTime", + "ResourcePool", "VmType", "Datacenter", "Cluster", "Folder", "ProvisionedDisk", "InitialVcpus", + "InitialRam", "IsTemplate", "PoweredOn", "SrmPlaceholder", "VmUuid"; +`, summaryTable, sourceTable) + + if _, err := dbConn.ExecContext(ctx, insertQuery); err != nil { + c.Logger.Error("failed to aggregate daily inventory", "error", err, "source_table", sourceTable) + return err + } + + c.Logger.Debug("Finished daily inventory aggregation", "source_table", sourceTable, "summary_table", summaryTable) + return nil +} + +// RunVcenterMonthlyAggregate summarizes the previous month's daily snapshots. +func (c *CronTask) RunVcenterMonthlyAggregate(ctx context.Context, logger *slog.Logger) error { + now := time.Now() + firstOfThisMonth := time.Date(now.Year(), now.Month(), 1, 0, 0, 0, 0, now.Location()) + targetMonth := firstOfThisMonth.AddDate(0, -1, 0) + + monthPrefix := fmt.Sprintf("inventory_daily_%s", targetMonth.Format("200601")) + dailyTables, err := report.ListTablesByPrefix(ctx, c.Database, monthPrefix) + if err != nil { + return err + } + if len(dailyTables) == 0 { + return fmt.Errorf("no daily snapshot tables found for %s", targetMonth.Format("2006-01")) + } + + monthlyTable, err := monthlySummaryTableName(targetMonth) + if err != nil { + return err + } + + dbConn := c.Database.DB() + if err := ensureMonthlySummaryTable(ctx, dbConn, monthlyTable); err != nil { + return err + } + + unionQuery := buildUnionQuery(dailyTables, []string{ + `"InventoryId"`, `"Name"`, `"Vcenter"`, `"VmId"`, `"EventKey"`, `"CloudId"`, `"CreationTime"`, + `"DeletionTime"`, `"ResourcePool"`, `"VmType"`, `"Datacenter"`, `"Cluster"`, `"Folder"`, + `"ProvisionedDisk"`, `"InitialVcpus"`, `"InitialRam"`, `"IsTemplate"`, `"PoweredOn"`, + `"SrmPlaceholder"`, `"VmUuid"`, `"IsPresent"`, + }) + if strings.TrimSpace(unionQuery) == "" { + return fmt.Errorf("no valid daily snapshot tables found for %s", targetMonth.Format("2006-01")) + } + + insertQuery := fmt.Sprintf(` +INSERT INTO %s ( + "InventoryId", "Name", "Vcenter", "VmId", "EventKey", "CloudId", "CreationTime", "DeletionTime", + "ResourcePool", "VmType", "Datacenter", "Cluster", "Folder", "ProvisionedDisk", "InitialVcpus", + "InitialRam", "IsTemplate", "PoweredOn", "SrmPlaceholder", "VmUuid", + "AvgVcpus", "AvgRam", "AvgIsPresent" +) +SELECT + "InventoryId", "Name", "Vcenter", "VmId", "EventKey", "CloudId", "CreationTime", "DeletionTime", + "ResourcePool", "VmType", "Datacenter", "Cluster", "Folder", "ProvisionedDisk", "InitialVcpus", + "InitialRam", "IsTemplate", "PoweredOn", "SrmPlaceholder", "VmUuid", + AVG(CASE WHEN "InitialVcpus" IS NOT NULL THEN "InitialVcpus" END) AS "AvgVcpus", + AVG(CASE WHEN "InitialRam" IS NOT NULL THEN "InitialRam" END) AS "AvgRam", + AVG(CASE WHEN "IsPresent" = 'TRUE' THEN 1 ELSE 0 END) AS "AvgIsPresent" +FROM ( +%s +) snapshots +GROUP BY + "InventoryId", "Name", "Vcenter", "VmId", "EventKey", "CloudId", "CreationTime", "DeletionTime", + "ResourcePool", "VmType", "Datacenter", "Cluster", "Folder", "ProvisionedDisk", "InitialVcpus", + "InitialRam", "IsTemplate", "PoweredOn", "SrmPlaceholder", "VmUuid"; +`, monthlyTable, unionQuery) + + if _, err := dbConn.ExecContext(ctx, insertQuery); err != nil { + c.Logger.Error("failed to aggregate monthly inventory", "error", err, "month", targetMonth.Format("2006-01")) + return err + } + + c.Logger.Debug("Finished monthly inventory aggregation", "summary_table", monthlyTable) + return nil +} + +// RunSnapshotCleanup drops hourly and daily snapshot tables older than retention. +func (c *CronTask) RunSnapshotCleanup(ctx context.Context, logger *slog.Logger) error { + now := time.Now() + hourlyMaxDays := getEnvInt("HOURLY_SNAPSHOT_MAX_AGE_DAYS", 60) + dailyMaxMonths := getEnvInt("DAILY_SNAPSHOT_MAX_AGE_MONTHS", 12) + + hourlyCutoff := now.AddDate(0, 0, -hourlyMaxDays) + dailyCutoff := now.AddDate(0, -dailyMaxMonths, 0) + + dbConn := c.Database.DB() + + hourlyTables, err := report.ListTablesByPrefix(ctx, c.Database, "inventory_daily_") + if err != nil { + return err + } + + for _, table := range hourlyTables { + if strings.HasPrefix(table, "inventory_daily_summary_") { + continue + } + tableDate, ok := parseSnapshotDate(table, "inventory_daily_", "20060102") + if !ok { + continue + } + if tableDate.Before(truncateDate(hourlyCutoff)) { + if err := dropSnapshotTable(ctx, dbConn, table); err != nil { + c.Logger.Error("failed to drop hourly snapshot table", "error", err, "table", table) + } + } + } + + dailyTables, err := report.ListTablesByPrefix(ctx, c.Database, "inventory_daily_summary_") + if err != nil { + return err + } + for _, table := range dailyTables { + tableDate, ok := parseSnapshotDate(table, "inventory_daily_summary_", "20060102") + if !ok { + continue + } + if tableDate.Before(truncateDate(dailyCutoff)) { + if err := dropSnapshotTable(ctx, dbConn, table); err != nil { + c.Logger.Error("failed to drop daily snapshot table", "error", err, "table", table) + } + } + } + + c.Logger.Debug("Finished snapshot cleanup") + return nil +} + +func dailyInventoryTableName(t time.Time) (string, error) { + return safeTableName(fmt.Sprintf("inventory_daily_%s", t.Format("20060102"))) +} + +func dailySummaryTableName(t time.Time) (string, error) { + return safeTableName(fmt.Sprintf("inventory_daily_summary_%s", t.Format("20060102"))) +} + +func monthlySummaryTableName(t time.Time) (string, error) { + return safeTableName(fmt.Sprintf("inventory_monthly_summary_%s", t.Format("200601"))) +} + +func safeTableName(name string) (string, error) { + for _, r := range name { + if (r >= 'a' && r <= 'z') || (r >= '0' && r <= '9') || r == '_' { + continue + } + return "", fmt.Errorf("invalid table name: %s", name) + } + return name, nil +} + +func ensureDailyInventoryTable(ctx context.Context, dbConn *sqlx.DB, tableName string) error { + ddl := fmt.Sprintf(`CREATE TABLE IF NOT EXISTS %s ( + "InventoryId" BIGINT, + "Name" TEXT NOT NULL, + "Vcenter" TEXT NOT NULL, + "VmId" TEXT, + "EventKey" TEXT, + "CloudId" TEXT, + "CreationTime" BIGINT, + "DeletionTime" BIGINT, + "ResourcePool" TEXT, + "VmType" TEXT, + "Datacenter" TEXT, + "Cluster" TEXT, + "Folder" TEXT, + "ProvisionedDisk" REAL, + "InitialVcpus" BIGINT, + "InitialRam" BIGINT, + "IsTemplate" TEXT, + "PoweredOn" TEXT, + "SrmPlaceholder" TEXT, + "VmUuid" TEXT, + "SnapshotTime" BIGINT NOT NULL, + "IsPresent" TEXT NOT NULL +);`, tableName) + + _, err := dbConn.ExecContext(ctx, ddl) + return err +} + +func ensureDailySummaryTable(ctx context.Context, dbConn *sqlx.DB, tableName string) error { + ddl := fmt.Sprintf(`CREATE TABLE IF NOT EXISTS %s ( + "InventoryId" BIGINT, + "Name" TEXT NOT NULL, + "Vcenter" TEXT NOT NULL, + "VmId" TEXT, + "EventKey" TEXT, + "CloudId" TEXT, + "CreationTime" BIGINT, + "DeletionTime" BIGINT, + "ResourcePool" TEXT, + "VmType" TEXT, + "Datacenter" TEXT, + "Cluster" TEXT, + "Folder" TEXT, + "ProvisionedDisk" REAL, + "InitialVcpus" BIGINT, + "InitialRam" BIGINT, + "IsTemplate" TEXT, + "PoweredOn" TEXT, + "SrmPlaceholder" TEXT, + "VmUuid" TEXT, + "SamplesPresent" BIGINT NOT NULL +);`, tableName) + + _, err := dbConn.ExecContext(ctx, ddl) + return err +} + +func ensureMonthlySummaryTable(ctx context.Context, dbConn *sqlx.DB, tableName string) error { + ddl := fmt.Sprintf(`CREATE TABLE IF NOT EXISTS %s ( + "InventoryId" BIGINT, + "Name" TEXT NOT NULL, + "Vcenter" TEXT NOT NULL, + "VmId" TEXT, + "EventKey" TEXT, + "CloudId" TEXT, + "CreationTime" BIGINT, + "DeletionTime" BIGINT, + "ResourcePool" TEXT, + "VmType" TEXT, + "Datacenter" TEXT, + "Cluster" TEXT, + "Folder" TEXT, + "ProvisionedDisk" REAL, + "InitialVcpus" BIGINT, + "InitialRam" BIGINT, + "IsTemplate" TEXT, + "PoweredOn" TEXT, + "SrmPlaceholder" TEXT, + "VmUuid" TEXT, + "AvgVcpus" REAL, + "AvgRam" REAL, + "AvgIsPresent" REAL +);`, tableName) + + _, err := dbConn.ExecContext(ctx, ddl) + return err +} + +func buildUnionQuery(tables []string, columns []string) string { + queries := make([]string, 0, len(tables)) + columnList := strings.Join(columns, ", ") + for _, table := range tables { + if _, err := safeTableName(table); err != nil { + continue + } + queries = append(queries, fmt.Sprintf("SELECT %s FROM %s", columnList, table)) + } + return strings.Join(queries, "\nUNION ALL\n") +} + +func parseSnapshotDate(table string, prefix string, layout string) (time.Time, bool) { + if !strings.HasPrefix(table, prefix) { + return time.Time{}, false + } + suffix := strings.TrimPrefix(table, prefix) + parsed, err := time.Parse(layout, suffix) + if err != nil { + return time.Time{}, false + } + return parsed, true +} + +func truncateDate(t time.Time) time.Time { + return time.Date(t.Year(), t.Month(), t.Day(), 0, 0, 0, 0, t.Location()) +} + +func dropSnapshotTable(ctx context.Context, dbConn *sqlx.DB, table string) error { + if _, err := safeTableName(table); err != nil { + return err + } + _, err := dbConn.ExecContext(ctx, fmt.Sprintf("DROP TABLE IF EXISTS %s", table)) + return err +} + +func getEnvInt(key string, fallback int) int { + raw := strings.TrimSpace(os.Getenv(key)) + if raw == "" { + return fallback + } + value, err := strconv.Atoi(raw) + if err != nil || value < 0 { + return fallback + } + return value +} + +func snapshotFromVM(vmObject *mo.VirtualMachine, vc *vcenter.Vcenter, snapshotTime time.Time, inv *queries.Inventory) (inventorySnapshotRow, error) { + if vmObject == nil { + return inventorySnapshotRow{}, fmt.Errorf("missing VM object") + } + + row := inventorySnapshotRow{ + Name: vmObject.Name, + Vcenter: vc.Vurl, + VmId: sql.NullString{String: vmObject.Reference().Value, Valid: vmObject.Reference().Value != ""}, + SnapshotTime: snapshotTime.Unix(), + } + + if inv != nil { + row.InventoryId = sql.NullInt64{Int64: inv.Iid, Valid: inv.Iid > 0} + row.EventKey = inv.EventKey + row.CloudId = inv.CloudId + row.DeletionTime = inv.DeletionTime + row.VmType = inv.VmType + } + + if vmObject.Config != nil { + row.VmUuid = sql.NullString{String: vmObject.Config.Uuid, Valid: vmObject.Config.Uuid != ""} + if !vmObject.Config.CreateDate.IsZero() { + row.CreationTime = sql.NullInt64{Int64: vmObject.Config.CreateDate.Unix(), Valid: true} + } + row.InitialVcpus = sql.NullInt64{Int64: int64(vmObject.Config.Hardware.NumCPU), Valid: vmObject.Config.Hardware.NumCPU > 0} + row.InitialRam = sql.NullInt64{Int64: int64(vmObject.Config.Hardware.MemoryMB), Valid: vmObject.Config.Hardware.MemoryMB > 0} + + totalDiskBytes := int64(0) + for _, device := range vmObject.Config.Hardware.Device { + if disk, ok := device.(*types.VirtualDisk); ok { + totalDiskBytes += disk.CapacityInBytes + } + } + if totalDiskBytes > 0 { + row.ProvisionedDisk = sql.NullFloat64{Float64: float64(totalDiskBytes / 1024 / 1024 / 1024), Valid: true} + } + + if vmObject.Config.ManagedBy != nil && vmObject.Config.ManagedBy.ExtensionKey == "com.vmware.vcDr" && vmObject.Config.ManagedBy.Type == "placeholderVm" { + row.SrmPlaceholder = "TRUE" + } else { + row.SrmPlaceholder = "FALSE" + } + + if vmObject.Config.Template { + row.IsTemplate = "TRUE" + } else { + row.IsTemplate = "FALSE" + } + } + + if vmObject.Runtime.PowerState == "poweredOff" { + row.PoweredOn = "FALSE" + } else { + row.PoweredOn = "TRUE" + } + + if inv != nil { + row.ResourcePool = inv.ResourcePool + row.Datacenter = inv.Datacenter + row.Cluster = inv.Cluster + row.Folder = inv.Folder + if !row.CreationTime.Valid { + row.CreationTime = inv.CreationTime + } + if !row.ProvisionedDisk.Valid { + row.ProvisionedDisk = inv.ProvisionedDisk + } + if !row.InitialVcpus.Valid { + row.InitialVcpus = inv.InitialVcpus + } + if !row.InitialRam.Valid { + row.InitialRam = inv.InitialRam + } + if row.IsTemplate == "" { + row.IsTemplate = boolStringFromInterface(inv.IsTemplate) + } + if row.PoweredOn == "" { + row.PoweredOn = boolStringFromInterface(inv.PoweredOn) + } + if row.SrmPlaceholder == "" { + row.SrmPlaceholder = boolStringFromInterface(inv.SrmPlaceholder) + } + if !row.VmUuid.Valid { + row.VmUuid = inv.VmUuid + } + } + + if row.ResourcePool.String == "" { + if rpName, err := vc.GetVmResourcePool(*vmObject); err == nil { + row.ResourcePool = sql.NullString{String: rpName, Valid: rpName != ""} + } + } + + if row.Folder.String == "" { + if folderPath, err := vc.GetVMFolderPath(*vmObject); err == nil { + row.Folder = sql.NullString{String: folderPath, Valid: folderPath != ""} + } + } + + if row.Cluster.String == "" { + if clusterName, err := vc.GetClusterFromHost(vmObject.Runtime.Host); err == nil { + row.Cluster = sql.NullString{String: clusterName, Valid: clusterName != ""} + } + } + + if row.Datacenter.String == "" { + if dcName, err := vc.GetDatacenterForVM(*vmObject); err == nil { + row.Datacenter = sql.NullString{String: dcName, Valid: dcName != ""} + } + } + + return row, nil +} + +func snapshotFromInventory(inv queries.Inventory, snapshotTime time.Time) inventorySnapshotRow { + return inventorySnapshotRow{ + InventoryId: sql.NullInt64{Int64: inv.Iid, Valid: inv.Iid > 0}, + Name: inv.Name, + Vcenter: inv.Vcenter, + VmId: inv.VmId, + EventKey: inv.EventKey, + CloudId: inv.CloudId, + CreationTime: inv.CreationTime, + DeletionTime: inv.DeletionTime, + ResourcePool: inv.ResourcePool, + VmType: inv.VmType, + Datacenter: inv.Datacenter, + Cluster: inv.Cluster, + Folder: inv.Folder, + ProvisionedDisk: inv.ProvisionedDisk, + InitialVcpus: inv.InitialVcpus, + InitialRam: inv.InitialRam, + IsTemplate: boolStringFromInterface(inv.IsTemplate), + PoweredOn: boolStringFromInterface(inv.PoweredOn), + SrmPlaceholder: boolStringFromInterface(inv.SrmPlaceholder), + VmUuid: inv.VmUuid, + SnapshotTime: snapshotTime.Unix(), + } +} + +func insertDailyInventoryRow(ctx context.Context, dbConn *sqlx.DB, tableName string, row inventorySnapshotRow) error { + query := fmt.Sprintf(` +INSERT INTO %s ( + "InventoryId", "Name", "Vcenter", "VmId", "EventKey", "CloudId", "CreationTime", "DeletionTime", + "ResourcePool", "VmType", "Datacenter", "Cluster", "Folder", "ProvisionedDisk", "InitialVcpus", + "InitialRam", "IsTemplate", "PoweredOn", "SrmPlaceholder", "VmUuid", "SnapshotTime", "IsPresent" +) +VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?); +`, tableName) + + query = sqlx.Rebind(sqlx.BindType(dbConn.DriverName()), query) + + _, err := dbConn.ExecContext(ctx, query, + row.InventoryId, + row.Name, + row.Vcenter, + row.VmId, + row.EventKey, + row.CloudId, + row.CreationTime, + row.DeletionTime, + row.ResourcePool, + row.VmType, + row.Datacenter, + row.Cluster, + row.Folder, + row.ProvisionedDisk, + row.InitialVcpus, + row.InitialRam, + row.IsTemplate, + row.PoweredOn, + row.SrmPlaceholder, + row.VmUuid, + row.SnapshotTime, + row.IsPresent, + ) + return err +} + +func boolStringFromInterface(value interface{}) string { + switch v := value.(type) { + case nil: + return "" + case string: + return v + case []byte: + return string(v) + case bool: + if v { + return "TRUE" + } + return "FALSE" + case int: + if v != 0 { + return "TRUE" + } + return "FALSE" + case int64: + if v != 0 { + return "TRUE" + } + return "FALSE" + default: + return fmt.Sprint(v) + } +} diff --git a/main.go b/main.go index 6da2b1f..6d82f01 100644 --- a/main.go +++ b/main.go @@ -23,12 +23,14 @@ import ( ) var ( - bindDisableTls bool - sha1ver string // sha1 revision used to build the program - buildTime string // when the executable was built - cronFrequency time.Duration - cronInvFrequency time.Duration - encryptionKey = []byte("5L1l3B5KvwOCzUHMAlCgsgUTRAYMfSpa") + bindDisableTls bool + sha1ver string // sha1 revision used to build the program + buildTime string // when the executable was built + cronFrequency time.Duration + cronInvFrequency time.Duration + cronSnapshotFrequency time.Duration + cronAggregateFrequency time.Duration + encryptionKey = []byte("5L1l3B5KvwOCzUHMAlCgsgUTRAYMfSpa") ) func main() { @@ -189,6 +191,30 @@ func main() { } logger.Debug("Setting VM inventory polling cronjob frequency to", "frequency", cronInvFrequency) + cronSnapshotFrequencyString := os.Getenv("VCENTER_INVENTORY_SNAPSHOT_SECONDS") + if cronSnapshotFrequencyString != "" { + cronSnapshotFrequency, err = time.ParseDuration(cronSnapshotFrequencyString) + if err != nil { + slog.Error("Can't convert VCENTER_INVENTORY_SNAPSHOT_SECONDS value to time duration. Defaulting to 3600", "value", cronSnapshotFrequencyString, "error", err) + cronSnapshotFrequency = time.Hour + } + } else { + cronSnapshotFrequency = time.Hour + } + logger.Debug("Setting VM inventory snapshot cronjob frequency to", "frequency", cronSnapshotFrequency) + + cronAggregateFrequencyString := os.Getenv("VCENTER_INVENTORY_AGGREGATE_SECONDS") + if cronAggregateFrequencyString != "" { + cronAggregateFrequency, err = time.ParseDuration(cronAggregateFrequencyString) + if err != nil { + slog.Error("Can't convert VCENTER_INVENTORY_AGGREGATE_SECONDS value to time duration. Defaulting to 86400", "value", cronAggregateFrequencyString, "error", err) + cronAggregateFrequency = time.Hour * 24 + } + } else { + cronAggregateFrequency = time.Hour * 24 + } + logger.Debug("Setting VM inventory aggregation cronjob frequency to", "frequency", cronAggregateFrequency) + // start background processing for events stored in events table startsAt := time.Now().Add(time.Second * 10) job, err := c.NewJob( @@ -219,6 +245,65 @@ func main() { } logger.Debug("Created vcenter inventory cron job", "job", job2.ID(), "starting_at", startsAt2) + startsAt3 := time.Now().Add(cronSnapshotFrequency) + if cronSnapshotFrequency == time.Hour { + startsAt3 = time.Now().Truncate(time.Hour).Add(time.Hour) + } + job3, err := c.NewJob( + gocron.DurationJob(cronSnapshotFrequency), + gocron.NewTask(func() { + ct.RunVcenterSnapshotHourly(ctx, logger) + }), gocron.WithSingletonMode(gocron.LimitModeReschedule), + gocron.WithStartAt(gocron.WithStartDateTime(startsAt3)), + ) + if err != nil { + logger.Error("failed to start vcenter inventory snapshot cron job", "error", err) + os.Exit(1) + } + logger.Debug("Created vcenter inventory snapshot cron job", "job", job3.ID(), "starting_at", startsAt3) + + startsAt4 := time.Now().Add(cronAggregateFrequency) + if cronAggregateFrequency == time.Hour*24 { + now := time.Now() + startsAt4 = time.Date(now.Year(), now.Month(), now.Day()+1, 0, 0, 0, 0, now.Location()) + } + job4, err := c.NewJob( + gocron.DurationJob(cronAggregateFrequency), + gocron.NewTask(func() { + ct.RunVcenterDailyAggregate(ctx, logger) + }), gocron.WithSingletonMode(gocron.LimitModeReschedule), + gocron.WithStartAt(gocron.WithStartDateTime(startsAt4)), + ) + if err != nil { + logger.Error("failed to start vcenter inventory aggregation cron job", "error", err) + os.Exit(1) + } + logger.Debug("Created vcenter inventory aggregation cron job", "job", job4.ID(), "starting_at", startsAt4) + + job5, err := c.NewJob( + gocron.CronJob("0 0 1 * *", false), + gocron.NewTask(func() { + ct.RunVcenterMonthlyAggregate(ctx, logger) + }), gocron.WithSingletonMode(gocron.LimitModeReschedule), + ) + if err != nil { + logger.Error("failed to start vcenter monthly aggregation cron job", "error", err) + os.Exit(1) + } + logger.Debug("Created vcenter monthly aggregation cron job", "job", job5.ID()) + + job6, err := c.NewJob( + gocron.CronJob("0 30 2 * *", false), + gocron.NewTask(func() { + ct.RunSnapshotCleanup(ctx, logger) + }), gocron.WithSingletonMode(gocron.LimitModeReschedule), + ) + if err != nil { + logger.Error("failed to start snapshot cleanup cron job", "error", err) + os.Exit(1) + } + logger.Debug("Created snapshot cleanup cron job", "job", job6.ID()) + // start cron scheduler c.Start() diff --git a/.drone.sh b/scripts/drone.sh similarity index 58% rename from .drone.sh rename to scripts/drone.sh index f098c0e..9fc6d94 100755 --- a/.drone.sh +++ b/scripts/drone.sh @@ -3,16 +3,19 @@ # disable CGO for cross-compiling export CGO_ENABLED=0 -commit=$(git rev-parse HEAD) -#tag=$(git describe --tags --abbrev=0) -buildtime=$(TZ=Australia/Sydney date +%Y-%m-%dT%T%z) -git_version=$(git describe --tags --always --long --dirty) package_name=vctp +commit=$(git rev-parse HEAD) +buildtime=$(date +%Y-%m-%dT%T%z) +#Extract the version from yml +package_version=$(grep 'version:' "$package_name.yml" | awk '{print $2}' | tr -d '"' | sed 's/^v//') #platforms=("linux/amd64" "darwin/amd64") platforms=("linux/amd64") -echo Building $package_name with git version: $git_version +echo Building:: +echo - Version $package_version +echo - Commit $commit +echo - Build Time $buildtime for platform in "${platforms[@]}" do platform_split=(${platform//\// }) @@ -25,14 +28,16 @@ do starttime=$(TZ=Australia/Sydney date +%Y-%m-%dT%T%z) echo "build commences at $starttime" - env GOOS=$GOOS GOARCH=$GOARCH go build -trimpath -ldflags="-X main.sha1ver=$commit -X main.buildTime=$buildtime" -o build/$output_name $package + env GOOS=$GOOS GOARCH=$GOARCH go build -trimpath -ldflags="-X main.version=$package_version -X main.commit=$commit -X main.buildTime=$buildtime" -o build/$output_name $package if [ $? -ne 0 ]; then echo 'An error has occurred! Aborting the script execution...' exit 1 fi - gzip build/$output_name + #gzip build/$output_name echo "build complete at $buildtime : $output_name" - sha256sum build/${output_name}.gz > build/${output_name}_checksum.txt + #sha256sum build/${output_name}.gz > build/${output_name}_checksum.txt done +#nfpm package --config $package_name.yml --packager rpm --target build/ + ls -lah build diff --git a/scripts/update-swagger-ui.sh b/scripts/update-swagger-ui.sh new file mode 100755 index 0000000..62487bd --- /dev/null +++ b/scripts/update-swagger-ui.sh @@ -0,0 +1,51 @@ +#!/usr/bin/env bash +set -euo pipefail + +# Usage: ./update-swagger-ui.sh [version] +# Example: ./update-swagger-ui.sh v5.17.14 +# If no version is provided, defaults below is used. +VERSION="${1:-v5.29.5}" + +TARGET_DIR="server/router/swagger-ui-dist" +TARBALL_URL="https://github.com/swagger-api/swagger-ui/archive/refs/tags/${VERSION}.tar.gz" + +echo ">> Fetching Swagger UI ${VERSION} …" +tmpdir="$(mktemp -d)" +cleanup() { rm -rf "$tmpdir"; } +trap cleanup EXIT + +# Requirements check +for cmd in curl tar; do + command -v "$cmd" >/dev/null 2>&1 || { echo "ERROR: $cmd not found"; exit 1; } +done + +# Download & unpack +curl -fsSL "$TARBALL_URL" | tar -xz -C "$tmpdir" +SRC_DIR="${tmpdir}/swagger-ui-${VERSION#v}/dist" +if [[ ! -d "$SRC_DIR" ]]; then + echo "ERROR: Unpacked dist not found at $SRC_DIR" + exit 1 +fi + +# Replace target +rm -rf "$TARGET_DIR" +mkdir -p "$TARGET_DIR" +# Use cp -a for portability (avoids rsync dependency) +cp -a "${SRC_DIR}/." "$TARGET_DIR/" + +INDEX="${TARGET_DIR}/swagger-initializer.js" +if [[ ! -f "$INDEX" ]]; then + echo "ERROR: ${INDEX} not found after copy" + exit 1 +fi + +echo ">> Patching swagger-initializer.js to point at /swagger.json" + +sed -i -E \ + -e 's#configUrl:[[:space:]]*["'\''"][^"'\''"]*["'\''"]#url: "/swagger.json"#' \ + -e 's#url:[[:space:]]*["'\''"][^"'\''"]*["'\''"]#url: "/swagger.json"#' \ + -e 's#urls:[[:space:]]*\[[^]]*\]#url: "/swagger.json"#' \ + -e '/url:[[:space:]]*"[^\"]*swagger\.json"[[:space:]]*,?$/a\ validatorUrl: null,' \ + "$INDEX" + +echo ">> Done. Files are in ${TARGET_DIR}" \ No newline at end of file diff --git a/server/handler/snapshots.go b/server/handler/snapshots.go new file mode 100644 index 0000000..a625ff0 --- /dev/null +++ b/server/handler/snapshots.go @@ -0,0 +1,124 @@ +package handler + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "net/url" + "strings" + "vctp/components/views" + "vctp/internal/report" + + "github.com/a-h/templ" +) + +// SnapshotHourlyList renders the hourly snapshot list page. +// @Summary List hourly snapshots +// @Description Lists hourly inventory snapshot tables. +// @Tags snapshots +// @Produce text/html +// @Success 200 {string} string "HTML page" +// @Failure 500 {string} string "Server error" +// @Router /snapshots/hourly [get] +func (h *Handler) SnapshotHourlyList(w http.ResponseWriter, r *http.Request) { + h.renderSnapshotList(w, r, "inventory_daily_", "Hourly Inventory Snapshots", views.SnapshotHourlyList) +} + +// SnapshotDailyList renders the daily snapshot list page. +// @Summary List daily snapshots +// @Description Lists daily summary snapshot tables. +// @Tags snapshots +// @Produce text/html +// @Success 200 {string} string "HTML page" +// @Failure 500 {string} string "Server error" +// @Router /snapshots/daily [get] +func (h *Handler) SnapshotDailyList(w http.ResponseWriter, r *http.Request) { + h.renderSnapshotList(w, r, "inventory_daily_summary_", "Daily Inventory Snapshots", views.SnapshotDailyList) +} + +// SnapshotMonthlyList renders the monthly snapshot list page. +// @Summary List monthly snapshots +// @Description Lists monthly summary snapshot tables. +// @Tags snapshots +// @Produce text/html +// @Success 200 {string} string "HTML page" +// @Failure 500 {string} string "Server error" +// @Router /snapshots/monthly [get] +func (h *Handler) SnapshotMonthlyList(w http.ResponseWriter, r *http.Request) { + h.renderSnapshotList(w, r, "inventory_monthly_summary_", "Monthly Inventory Snapshots", views.SnapshotMonthlyList) +} + +// SnapshotReportDownload streams a snapshot table as XLSX. +// @Summary Download snapshot report +// @Description Downloads a snapshot table as an XLSX file. +// @Tags snapshots +// @Produce application/vnd.openxmlformats-officedocument.spreadsheetml.sheet +// @Param table query string true "Snapshot table name" +// @Success 200 {file} file "Snapshot XLSX report" +// @Failure 400 {object} map[string]string "Invalid request" +// @Failure 500 {object} map[string]string "Server error" +// @Router /api/report/snapshot [get] +func (h *Handler) SnapshotReportDownload(w http.ResponseWriter, r *http.Request) { + ctx := context.Background() + tableName := r.URL.Query().Get("table") + if tableName == "" { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusBadRequest) + json.NewEncoder(w).Encode(map[string]string{ + "status": "ERROR", + "message": "Missing table parameter", + }) + return + } + + reportData, err := report.CreateTableReport(h.Logger, h.Database, ctx, tableName) + if err != nil { + h.Logger.Error("Failed to create snapshot report", "error", err, "table", tableName) + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusInternalServerError) + json.NewEncoder(w).Encode(map[string]string{ + "status": "ERROR", + "message": fmt.Sprintf("Unable to create snapshot report: '%s'", err), + }) + return + } + + filename := fmt.Sprintf("%s.xlsx", tableName) + w.Header().Set("Content-Type", "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet") + w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", filename)) + w.Header().Set("File-Name", filename) + w.Write(reportData) +} + +func (h *Handler) renderSnapshotList(w http.ResponseWriter, r *http.Request, prefix string, title string, renderer func([]views.SnapshotEntry) templ.Component) { + ctx := context.Background() + tables, err := report.ListTablesByPrefix(ctx, h.Database, prefix) + if err != nil { + h.Logger.Error("Failed to list snapshot tables", "error", err, "prefix", prefix) + w.WriteHeader(http.StatusInternalServerError) + fmt.Fprintf(w, "Unable to list snapshot tables: %s\n", err) + return + } + + entries := make([]views.SnapshotEntry, 0, len(tables)) + for _, table := range tables { + if prefix == "inventory_daily_" && strings.HasPrefix(table, "inventory_daily_summary_") { + continue + } + label := table + if parsed, ok := report.FormatSnapshotLabel(prefix, table); ok { + label = parsed + } + entries = append(entries, views.SnapshotEntry{ + Label: label, + Link: "/api/report/snapshot?table=" + url.QueryEscape(table), + }) + } + + if err := renderer(entries).Render(r.Context(), w); err != nil { + h.Logger.Error("Failed to render snapshot list", "error", err, "title", title) + w.WriteHeader(http.StatusInternalServerError) + fmt.Fprintf(w, "Failed to render snapshot list") + } +} diff --git a/server/router/.gitignore b/server/router/.gitignore new file mode 100644 index 0000000..4be9160 --- /dev/null +++ b/server/router/.gitignore @@ -0,0 +1 @@ +swagger-ui-dist/ \ No newline at end of file diff --git a/server/router/docs/docs.go b/server/router/docs/docs.go new file mode 100644 index 0000000..8e029de --- /dev/null +++ b/server/router/docs/docs.go @@ -0,0 +1,768 @@ +// Package docs Code generated by swaggo/swag. DO NOT EDIT +package docs + +import "github.com/swaggo/swag" + +const docTemplate = `{ + "schemes": {{ marshal .Schemes }}, + "swagger": "2.0", + "info": { + "description": "{{escape .Description}}", + "title": "{{.Title}}", + "contact": {}, + "version": "{{.Version}}" + }, + "host": "{{.Host}}", + "basePath": "{{.BasePath}}", + "paths": { + "/": { + "get": { + "description": "Renders the main UI page.", + "produces": [ + "text/html" + ], + "tags": [ + "ui" + ], + "summary": "Home page", + "responses": { + "200": { + "description": "HTML page", + "schema": { + "type": "string" + } + }, + "500": { + "description": "Render failed", + "schema": { + "type": "string" + } + } + } + } + }, + "/api/cleanup/updates": { + "delete": { + "description": "Removes update records that are no longer associated with a VM.", + "produces": [ + "text/plain" + ], + "tags": [ + "maintenance" + ], + "summary": "Cleanup updates", + "responses": { + "200": { + "description": "Cleanup completed", + "schema": { + "type": "string" + } + }, + "500": { + "description": "Server error", + "schema": { + "type": "string" + } + } + } + } + }, + "/api/cleanup/vcenter": { + "delete": { + "description": "Removes all inventory entries associated with a vCenter URL.", + "produces": [ + "application/json" + ], + "tags": [ + "maintenance" + ], + "summary": "Cleanup vCenter inventory", + "parameters": [ + { + "type": "string", + "description": "vCenter URL", + "name": "vc_url", + "in": "query", + "required": true + } + ], + "responses": { + "200": { + "description": "Cleanup completed", + "schema": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + }, + "400": { + "description": "Invalid request", + "schema": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + } + } + }, + "/api/encrypt": { + "post": { + "description": "Encrypts a plaintext value and returns the ciphertext.", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "crypto" + ], + "summary": "Encrypt data", + "parameters": [ + { + "description": "Plaintext payload", + "name": "payload", + "in": "body", + "required": true, + "schema": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + ], + "responses": { + "200": { + "description": "Ciphertext response", + "schema": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + }, + "500": { + "description": "Server error", + "schema": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + } + } + }, + "/api/event/vm/create": { + "post": { + "description": "Parses a VM create CloudEvent and stores the event data.", + "consumes": [ + "application/json" + ], + "produces": [ + "text/plain" + ], + "tags": [ + "events" + ], + "summary": "Record VM create event", + "parameters": [ + { + "description": "CloudEvent payload", + "name": "event", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/models.CloudEventReceived" + } + } + ], + "responses": { + "200": { + "description": "Create event processed", + "schema": { + "type": "string" + } + }, + "400": { + "description": "Invalid request", + "schema": { + "type": "string" + } + }, + "500": { + "description": "Server error", + "schema": { + "type": "string" + } + } + } + } + }, + "/api/event/vm/delete": { + "post": { + "description": "Parses a VM delete CloudEvent and marks the VM as deleted in inventory.", + "consumes": [ + "application/json" + ], + "produces": [ + "text/plain" + ], + "tags": [ + "events" + ], + "summary": "Record VM delete event", + "parameters": [ + { + "description": "CloudEvent payload", + "name": "event", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/models.CloudEventReceived" + } + } + ], + "responses": { + "200": { + "description": "Delete event processed", + "schema": { + "type": "string" + } + }, + "400": { + "description": "Invalid request", + "schema": { + "type": "string" + } + }, + "500": { + "description": "Server error", + "schema": { + "type": "string" + } + } + } + } + }, + "/api/event/vm/modify": { + "post": { + "description": "Parses a VM modify CloudEvent and creates an update record when relevant changes are detected.", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "events" + ], + "summary": "Record VM modify event", + "parameters": [ + { + "description": "CloudEvent payload", + "name": "event", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/models.CloudEventReceived" + } + } + ], + "responses": { + "200": { + "description": "Modify event processed", + "schema": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + }, + "202": { + "description": "No relevant changes", + "schema": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + }, + "500": { + "description": "Server error", + "schema": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + } + } + }, + "/api/event/vm/move": { + "post": { + "description": "Parses a VM move CloudEvent and creates an update record.", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "events" + ], + "summary": "Record VM move event", + "parameters": [ + { + "description": "CloudEvent payload", + "name": "event", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/models.CloudEventReceived" + } + } + ], + "responses": { + "200": { + "description": "Move event processed", + "schema": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + }, + "400": { + "description": "Invalid request", + "schema": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + }, + "500": { + "description": "Server error", + "schema": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + } + } + }, + "/api/import/vm": { + "post": { + "description": "Imports existing VM inventory data in bulk.", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "inventory" + ], + "summary": "Import VMs", + "parameters": [ + { + "description": "Bulk import payload", + "name": "import", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/models.ImportReceived" + } + } + ], + "responses": { + "200": { + "description": "Import processed", + "schema": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + }, + "500": { + "description": "Server error", + "schema": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + } + } + }, + "/api/inventory/vm/delete": { + "delete": { + "description": "Removes a VM inventory entry by VM ID and datacenter name.", + "produces": [ + "application/json" + ], + "tags": [ + "inventory" + ], + "summary": "Cleanup VM inventory entry", + "parameters": [ + { + "type": "string", + "description": "VM ID", + "name": "vm_id", + "in": "query", + "required": true + }, + { + "type": "string", + "description": "Datacenter name", + "name": "datacenter_name", + "in": "query", + "required": true + } + ], + "responses": { + "200": { + "description": "Cleanup completed", + "schema": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + }, + "400": { + "description": "Invalid request", + "schema": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + } + } + }, + "/api/inventory/vm/update": { + "post": { + "description": "Queries vCenter and updates inventory records with missing details.", + "produces": [ + "text/plain" + ], + "tags": [ + "inventory" + ], + "summary": "Refresh VM details", + "responses": { + "200": { + "description": "Update completed", + "schema": { + "type": "string" + } + }, + "500": { + "description": "Server error", + "schema": { + "type": "string" + } + } + } + } + }, + "/api/report/inventory": { + "get": { + "description": "Generates an inventory XLSX report and returns it as a file download.", + "produces": [ + "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet" + ], + "tags": [ + "reports" + ], + "summary": "Download inventory report", + "responses": { + "200": { + "description": "Inventory XLSX report", + "schema": { + "type": "file" + } + }, + "500": { + "description": "Report generation failed", + "schema": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + } + } + }, + "/api/report/snapshot": { + "get": { + "description": "Downloads a snapshot table as an XLSX file.", + "produces": [ + "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet" + ], + "tags": [ + "snapshots" + ], + "summary": "Download snapshot report", + "parameters": [ + { + "type": "string", + "description": "Snapshot table name", + "name": "table", + "in": "query", + "required": true + } + ], + "responses": { + "200": { + "description": "Snapshot XLSX report", + "schema": { + "type": "file" + } + }, + "400": { + "description": "Invalid request", + "schema": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + }, + "500": { + "description": "Server error", + "schema": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + } + } + }, + "/api/report/updates": { + "get": { + "description": "Generates an updates XLSX report and returns it as a file download.", + "produces": [ + "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet" + ], + "tags": [ + "reports" + ], + "summary": "Download updates report", + "responses": { + "200": { + "description": "Updates XLSX report", + "schema": { + "type": "file" + } + }, + "500": { + "description": "Report generation failed", + "schema": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + } + } + }, + "/snapshots/daily": { + "get": { + "description": "Lists daily summary snapshot tables.", + "produces": [ + "text/html" + ], + "tags": [ + "snapshots" + ], + "summary": "List daily snapshots", + "responses": { + "200": { + "description": "HTML page", + "schema": { + "type": "string" + } + }, + "500": { + "description": "Server error", + "schema": { + "type": "string" + } + } + } + } + }, + "/snapshots/hourly": { + "get": { + "description": "Lists hourly inventory snapshot tables.", + "produces": [ + "text/html" + ], + "tags": [ + "snapshots" + ], + "summary": "List hourly snapshots", + "responses": { + "200": { + "description": "HTML page", + "schema": { + "type": "string" + } + }, + "500": { + "description": "Server error", + "schema": { + "type": "string" + } + } + } + } + }, + "/snapshots/monthly": { + "get": { + "description": "Lists monthly summary snapshot tables.", + "produces": [ + "text/html" + ], + "tags": [ + "snapshots" + ], + "summary": "List monthly snapshots", + "responses": { + "200": { + "description": "HTML page", + "schema": { + "type": "string" + } + }, + "500": { + "description": "Server error", + "schema": { + "type": "string" + } + } + } + } + } + }, + "definitions": { + "models.CloudEventReceived": { + "type": "object" + }, + "models.CloudEventResourcePool": { + "type": "object", + "properties": { + "Name": { + "type": "string" + }, + "ResourcePool": { + "type": "object", + "properties": { + "Type": { + "type": "string" + }, + "Value": { + "type": "string" + } + } + } + } + }, + "models.CloudEventVm": { + "type": "object", + "properties": { + "Name": { + "type": "string" + }, + "Vm": { + "type": "object", + "properties": { + "Type": { + "type": "string" + }, + "Value": { + "type": "string" + } + } + } + } + }, + "models.ImportReceived": { + "type": "object", + "properties": { + "Cluster": { + "type": "string" + }, + "CreationTime": { + "type": "integer" + }, + "Datacenter": { + "type": "string" + }, + "Folder": { + "type": "string" + }, + "InitialRam": { + "type": "integer" + }, + "InitialVcpus": { + "type": "integer" + }, + "Name": { + "type": "string" + }, + "PowerState": { + "type": "integer" + }, + "ProvisionedDisk": { + "type": "number" + }, + "ResourcePool": { + "type": "string" + }, + "Vcenter": { + "type": "string" + }, + "VmId": { + "type": "string" + } + } + } + } +}` + +// SwaggerInfo holds exported Swagger Info so clients can modify it +var SwaggerInfo = &swag.Spec{ + Version: "", + Host: "", + BasePath: "", + Schemes: []string{}, + Title: "", + Description: "", + InfoInstanceName: "swagger", + SwaggerTemplate: docTemplate, + LeftDelim: "{{", + RightDelim: "}}", +} + +func init() { + swag.Register(SwaggerInfo.InstanceName(), SwaggerInfo) +} diff --git a/server/router/docs/swagger.json b/server/router/docs/swagger.json new file mode 100644 index 0000000..62e3124 --- /dev/null +++ b/server/router/docs/swagger.json @@ -0,0 +1,739 @@ +{ + "swagger": "2.0", + "info": { + "contact": {} + }, + "paths": { + "/": { + "get": { + "description": "Renders the main UI page.", + "produces": [ + "text/html" + ], + "tags": [ + "ui" + ], + "summary": "Home page", + "responses": { + "200": { + "description": "HTML page", + "schema": { + "type": "string" + } + }, + "500": { + "description": "Render failed", + "schema": { + "type": "string" + } + } + } + } + }, + "/api/cleanup/updates": { + "delete": { + "description": "Removes update records that are no longer associated with a VM.", + "produces": [ + "text/plain" + ], + "tags": [ + "maintenance" + ], + "summary": "Cleanup updates", + "responses": { + "200": { + "description": "Cleanup completed", + "schema": { + "type": "string" + } + }, + "500": { + "description": "Server error", + "schema": { + "type": "string" + } + } + } + } + }, + "/api/cleanup/vcenter": { + "delete": { + "description": "Removes all inventory entries associated with a vCenter URL.", + "produces": [ + "application/json" + ], + "tags": [ + "maintenance" + ], + "summary": "Cleanup vCenter inventory", + "parameters": [ + { + "type": "string", + "description": "vCenter URL", + "name": "vc_url", + "in": "query", + "required": true + } + ], + "responses": { + "200": { + "description": "Cleanup completed", + "schema": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + }, + "400": { + "description": "Invalid request", + "schema": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + } + } + }, + "/api/encrypt": { + "post": { + "description": "Encrypts a plaintext value and returns the ciphertext.", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "crypto" + ], + "summary": "Encrypt data", + "parameters": [ + { + "description": "Plaintext payload", + "name": "payload", + "in": "body", + "required": true, + "schema": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + ], + "responses": { + "200": { + "description": "Ciphertext response", + "schema": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + }, + "500": { + "description": "Server error", + "schema": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + } + } + }, + "/api/event/vm/create": { + "post": { + "description": "Parses a VM create CloudEvent and stores the event data.", + "consumes": [ + "application/json" + ], + "produces": [ + "text/plain" + ], + "tags": [ + "events" + ], + "summary": "Record VM create event", + "parameters": [ + { + "description": "CloudEvent payload", + "name": "event", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/models.CloudEventReceived" + } + } + ], + "responses": { + "200": { + "description": "Create event processed", + "schema": { + "type": "string" + } + }, + "400": { + "description": "Invalid request", + "schema": { + "type": "string" + } + }, + "500": { + "description": "Server error", + "schema": { + "type": "string" + } + } + } + } + }, + "/api/event/vm/delete": { + "post": { + "description": "Parses a VM delete CloudEvent and marks the VM as deleted in inventory.", + "consumes": [ + "application/json" + ], + "produces": [ + "text/plain" + ], + "tags": [ + "events" + ], + "summary": "Record VM delete event", + "parameters": [ + { + "description": "CloudEvent payload", + "name": "event", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/models.CloudEventReceived" + } + } + ], + "responses": { + "200": { + "description": "Delete event processed", + "schema": { + "type": "string" + } + }, + "400": { + "description": "Invalid request", + "schema": { + "type": "string" + } + }, + "500": { + "description": "Server error", + "schema": { + "type": "string" + } + } + } + } + }, + "/api/event/vm/modify": { + "post": { + "description": "Parses a VM modify CloudEvent and creates an update record when relevant changes are detected.", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "events" + ], + "summary": "Record VM modify event", + "parameters": [ + { + "description": "CloudEvent payload", + "name": "event", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/models.CloudEventReceived" + } + } + ], + "responses": { + "200": { + "description": "Modify event processed", + "schema": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + }, + "202": { + "description": "No relevant changes", + "schema": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + }, + "500": { + "description": "Server error", + "schema": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + } + } + }, + "/api/event/vm/move": { + "post": { + "description": "Parses a VM move CloudEvent and creates an update record.", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "events" + ], + "summary": "Record VM move event", + "parameters": [ + { + "description": "CloudEvent payload", + "name": "event", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/models.CloudEventReceived" + } + } + ], + "responses": { + "200": { + "description": "Move event processed", + "schema": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + }, + "400": { + "description": "Invalid request", + "schema": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + }, + "500": { + "description": "Server error", + "schema": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + } + } + }, + "/api/import/vm": { + "post": { + "description": "Imports existing VM inventory data in bulk.", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "inventory" + ], + "summary": "Import VMs", + "parameters": [ + { + "description": "Bulk import payload", + "name": "import", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/models.ImportReceived" + } + } + ], + "responses": { + "200": { + "description": "Import processed", + "schema": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + }, + "500": { + "description": "Server error", + "schema": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + } + } + }, + "/api/inventory/vm/delete": { + "delete": { + "description": "Removes a VM inventory entry by VM ID and datacenter name.", + "produces": [ + "application/json" + ], + "tags": [ + "inventory" + ], + "summary": "Cleanup VM inventory entry", + "parameters": [ + { + "type": "string", + "description": "VM ID", + "name": "vm_id", + "in": "query", + "required": true + }, + { + "type": "string", + "description": "Datacenter name", + "name": "datacenter_name", + "in": "query", + "required": true + } + ], + "responses": { + "200": { + "description": "Cleanup completed", + "schema": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + }, + "400": { + "description": "Invalid request", + "schema": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + } + } + }, + "/api/inventory/vm/update": { + "post": { + "description": "Queries vCenter and updates inventory records with missing details.", + "produces": [ + "text/plain" + ], + "tags": [ + "inventory" + ], + "summary": "Refresh VM details", + "responses": { + "200": { + "description": "Update completed", + "schema": { + "type": "string" + } + }, + "500": { + "description": "Server error", + "schema": { + "type": "string" + } + } + } + } + }, + "/api/report/inventory": { + "get": { + "description": "Generates an inventory XLSX report and returns it as a file download.", + "produces": [ + "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet" + ], + "tags": [ + "reports" + ], + "summary": "Download inventory report", + "responses": { + "200": { + "description": "Inventory XLSX report", + "schema": { + "type": "file" + } + }, + "500": { + "description": "Report generation failed", + "schema": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + } + } + }, + "/api/report/snapshot": { + "get": { + "description": "Downloads a snapshot table as an XLSX file.", + "produces": [ + "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet" + ], + "tags": [ + "snapshots" + ], + "summary": "Download snapshot report", + "parameters": [ + { + "type": "string", + "description": "Snapshot table name", + "name": "table", + "in": "query", + "required": true + } + ], + "responses": { + "200": { + "description": "Snapshot XLSX report", + "schema": { + "type": "file" + } + }, + "400": { + "description": "Invalid request", + "schema": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + }, + "500": { + "description": "Server error", + "schema": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + } + } + }, + "/api/report/updates": { + "get": { + "description": "Generates an updates XLSX report and returns it as a file download.", + "produces": [ + "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet" + ], + "tags": [ + "reports" + ], + "summary": "Download updates report", + "responses": { + "200": { + "description": "Updates XLSX report", + "schema": { + "type": "file" + } + }, + "500": { + "description": "Report generation failed", + "schema": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + } + } + }, + "/snapshots/daily": { + "get": { + "description": "Lists daily summary snapshot tables.", + "produces": [ + "text/html" + ], + "tags": [ + "snapshots" + ], + "summary": "List daily snapshots", + "responses": { + "200": { + "description": "HTML page", + "schema": { + "type": "string" + } + }, + "500": { + "description": "Server error", + "schema": { + "type": "string" + } + } + } + } + }, + "/snapshots/hourly": { + "get": { + "description": "Lists hourly inventory snapshot tables.", + "produces": [ + "text/html" + ], + "tags": [ + "snapshots" + ], + "summary": "List hourly snapshots", + "responses": { + "200": { + "description": "HTML page", + "schema": { + "type": "string" + } + }, + "500": { + "description": "Server error", + "schema": { + "type": "string" + } + } + } + } + }, + "/snapshots/monthly": { + "get": { + "description": "Lists monthly summary snapshot tables.", + "produces": [ + "text/html" + ], + "tags": [ + "snapshots" + ], + "summary": "List monthly snapshots", + "responses": { + "200": { + "description": "HTML page", + "schema": { + "type": "string" + } + }, + "500": { + "description": "Server error", + "schema": { + "type": "string" + } + } + } + } + } + }, + "definitions": { + "models.CloudEventReceived": { + "type": "object" + }, + "models.CloudEventResourcePool": { + "type": "object", + "properties": { + "Name": { + "type": "string" + }, + "ResourcePool": { + "type": "object", + "properties": { + "Type": { + "type": "string" + }, + "Value": { + "type": "string" + } + } + } + } + }, + "models.CloudEventVm": { + "type": "object", + "properties": { + "Name": { + "type": "string" + }, + "Vm": { + "type": "object", + "properties": { + "Type": { + "type": "string" + }, + "Value": { + "type": "string" + } + } + } + } + }, + "models.ImportReceived": { + "type": "object", + "properties": { + "Cluster": { + "type": "string" + }, + "CreationTime": { + "type": "integer" + }, + "Datacenter": { + "type": "string" + }, + "Folder": { + "type": "string" + }, + "InitialRam": { + "type": "integer" + }, + "InitialVcpus": { + "type": "integer" + }, + "Name": { + "type": "string" + }, + "PowerState": { + "type": "integer" + }, + "ProvisionedDisk": { + "type": "number" + }, + "ResourcePool": { + "type": "string" + }, + "Vcenter": { + "type": "string" + }, + "VmId": { + "type": "string" + } + } + } + } +} \ No newline at end of file diff --git a/server/router/docs/swagger.yaml b/server/router/docs/swagger.yaml new file mode 100644 index 0000000..44e1a19 --- /dev/null +++ b/server/router/docs/swagger.yaml @@ -0,0 +1,483 @@ +definitions: + models.CloudEventReceived: + type: object + models.CloudEventResourcePool: + properties: + Name: + type: string + ResourcePool: + properties: + Type: + type: string + Value: + type: string + type: object + type: object + models.CloudEventVm: + properties: + Name: + type: string + Vm: + properties: + Type: + type: string + Value: + type: string + type: object + type: object + models.ImportReceived: + properties: + Cluster: + type: string + CreationTime: + type: integer + Datacenter: + type: string + Folder: + type: string + InitialRam: + type: integer + InitialVcpus: + type: integer + Name: + type: string + PowerState: + type: integer + ProvisionedDisk: + type: number + ResourcePool: + type: string + Vcenter: + type: string + VmId: + type: string + type: object +info: + contact: {} +paths: + /: + get: + description: Renders the main UI page. + produces: + - text/html + responses: + "200": + description: HTML page + schema: + type: string + "500": + description: Render failed + schema: + type: string + summary: Home page + tags: + - ui + /api/cleanup/updates: + delete: + description: Removes update records that are no longer associated with a VM. + produces: + - text/plain + responses: + "200": + description: Cleanup completed + schema: + type: string + "500": + description: Server error + schema: + type: string + summary: Cleanup updates + tags: + - maintenance + /api/cleanup/vcenter: + delete: + description: Removes all inventory entries associated with a vCenter URL. + parameters: + - description: vCenter URL + in: query + name: vc_url + required: true + type: string + produces: + - application/json + responses: + "200": + description: Cleanup completed + schema: + additionalProperties: + type: string + type: object + "400": + description: Invalid request + schema: + additionalProperties: + type: string + type: object + summary: Cleanup vCenter inventory + tags: + - maintenance + /api/encrypt: + post: + consumes: + - application/json + description: Encrypts a plaintext value and returns the ciphertext. + parameters: + - description: Plaintext payload + in: body + name: payload + required: true + schema: + additionalProperties: + type: string + type: object + produces: + - application/json + responses: + "200": + description: Ciphertext response + schema: + additionalProperties: + type: string + type: object + "500": + description: Server error + schema: + additionalProperties: + type: string + type: object + summary: Encrypt data + tags: + - crypto + /api/event/vm/create: + post: + consumes: + - application/json + description: Parses a VM create CloudEvent and stores the event data. + parameters: + - description: CloudEvent payload + in: body + name: event + required: true + schema: + $ref: '#/definitions/models.CloudEventReceived' + produces: + - text/plain + responses: + "200": + description: Create event processed + schema: + type: string + "400": + description: Invalid request + schema: + type: string + "500": + description: Server error + schema: + type: string + summary: Record VM create event + tags: + - events + /api/event/vm/delete: + post: + consumes: + - application/json + description: Parses a VM delete CloudEvent and marks the VM as deleted in inventory. + parameters: + - description: CloudEvent payload + in: body + name: event + required: true + schema: + $ref: '#/definitions/models.CloudEventReceived' + produces: + - text/plain + responses: + "200": + description: Delete event processed + schema: + type: string + "400": + description: Invalid request + schema: + type: string + "500": + description: Server error + schema: + type: string + summary: Record VM delete event + tags: + - events + /api/event/vm/modify: + post: + consumes: + - application/json + description: Parses a VM modify CloudEvent and creates an update record when + relevant changes are detected. + parameters: + - description: CloudEvent payload + in: body + name: event + required: true + schema: + $ref: '#/definitions/models.CloudEventReceived' + produces: + - application/json + responses: + "200": + description: Modify event processed + schema: + additionalProperties: + type: string + type: object + "202": + description: No relevant changes + schema: + additionalProperties: + type: string + type: object + "500": + description: Server error + schema: + additionalProperties: + type: string + type: object + summary: Record VM modify event + tags: + - events + /api/event/vm/move: + post: + consumes: + - application/json + description: Parses a VM move CloudEvent and creates an update record. + parameters: + - description: CloudEvent payload + in: body + name: event + required: true + schema: + $ref: '#/definitions/models.CloudEventReceived' + produces: + - application/json + responses: + "200": + description: Move event processed + schema: + additionalProperties: + type: string + type: object + "400": + description: Invalid request + schema: + additionalProperties: + type: string + type: object + "500": + description: Server error + schema: + additionalProperties: + type: string + type: object + summary: Record VM move event + tags: + - events + /api/import/vm: + post: + consumes: + - application/json + description: Imports existing VM inventory data in bulk. + parameters: + - description: Bulk import payload + in: body + name: import + required: true + schema: + $ref: '#/definitions/models.ImportReceived' + produces: + - application/json + responses: + "200": + description: Import processed + schema: + additionalProperties: + type: string + type: object + "500": + description: Server error + schema: + additionalProperties: + type: string + type: object + summary: Import VMs + tags: + - inventory + /api/inventory/vm/delete: + delete: + description: Removes a VM inventory entry by VM ID and datacenter name. + parameters: + - description: VM ID + in: query + name: vm_id + required: true + type: string + - description: Datacenter name + in: query + name: datacenter_name + required: true + type: string + produces: + - application/json + responses: + "200": + description: Cleanup completed + schema: + additionalProperties: + type: string + type: object + "400": + description: Invalid request + schema: + additionalProperties: + type: string + type: object + summary: Cleanup VM inventory entry + tags: + - inventory + /api/inventory/vm/update: + post: + description: Queries vCenter and updates inventory records with missing details. + produces: + - text/plain + responses: + "200": + description: Update completed + schema: + type: string + "500": + description: Server error + schema: + type: string + summary: Refresh VM details + tags: + - inventory + /api/report/inventory: + get: + description: Generates an inventory XLSX report and returns it as a file download. + produces: + - application/vnd.openxmlformats-officedocument.spreadsheetml.sheet + responses: + "200": + description: Inventory XLSX report + schema: + type: file + "500": + description: Report generation failed + schema: + additionalProperties: + type: string + type: object + summary: Download inventory report + tags: + - reports + /api/report/snapshot: + get: + description: Downloads a snapshot table as an XLSX file. + parameters: + - description: Snapshot table name + in: query + name: table + required: true + type: string + produces: + - application/vnd.openxmlformats-officedocument.spreadsheetml.sheet + responses: + "200": + description: Snapshot XLSX report + schema: + type: file + "400": + description: Invalid request + schema: + additionalProperties: + type: string + type: object + "500": + description: Server error + schema: + additionalProperties: + type: string + type: object + summary: Download snapshot report + tags: + - snapshots + /api/report/updates: + get: + description: Generates an updates XLSX report and returns it as a file download. + produces: + - application/vnd.openxmlformats-officedocument.spreadsheetml.sheet + responses: + "200": + description: Updates XLSX report + schema: + type: file + "500": + description: Report generation failed + schema: + additionalProperties: + type: string + type: object + summary: Download updates report + tags: + - reports + /snapshots/daily: + get: + description: Lists daily summary snapshot tables. + produces: + - text/html + responses: + "200": + description: HTML page + schema: + type: string + "500": + description: Server error + schema: + type: string + summary: List daily snapshots + tags: + - snapshots + /snapshots/hourly: + get: + description: Lists hourly inventory snapshot tables. + produces: + - text/html + responses: + "200": + description: HTML page + schema: + type: string + "500": + description: Server error + schema: + type: string + summary: List hourly snapshots + tags: + - snapshots + /snapshots/monthly: + get: + description: Lists monthly summary snapshot tables. + produces: + - text/html + responses: + "200": + description: HTML page + schema: + type: string + "500": + description: Server error + schema: + type: string + summary: List monthly snapshots + tags: + - snapshots +swagger: "2.0" diff --git a/server/router/router.go b/server/router/router.go index 8a2bfdc..e976547 100644 --- a/server/router/router.go +++ b/server/router/router.go @@ -1,6 +1,7 @@ package router import ( + "io/fs" "log/slog" "net/http" "net/http/pprof" @@ -46,10 +47,31 @@ func New(logger *slog.Logger, database db.Database, buildTime string, sha1ver st mux.HandleFunc("/api/report/inventory", h.InventoryReportDownload) mux.HandleFunc("/api/report/updates", h.UpdateReportDownload) + mux.HandleFunc("/api/report/snapshot", h.SnapshotReportDownload) + + mux.HandleFunc("/snapshots/hourly", h.SnapshotHourlyList) + mux.HandleFunc("/snapshots/daily", h.SnapshotDailyList) + mux.HandleFunc("/snapshots/monthly", h.SnapshotMonthlyList) // endpoint for encrypting vcenter credential mux.HandleFunc("/api/encrypt", h.EncryptData) + // serve swagger related components from the embedded fs + swaggerSub, err := fs.Sub(swaggerUI, "swagger-ui-dist") + if err != nil { + logger.Error("failed to load swagger ui assets", "error", err) + } else { + mux.Handle("/swagger/", http.StripPrefix("/swagger/", http.FileServer(http.FS(swaggerSub)))) + } + mux.HandleFunc("/swagger", func(w http.ResponseWriter, r *http.Request) { + http.Redirect(w, r, "/swagger/", http.StatusPermanentRedirect) + }) + mux.HandleFunc("/swagger.json", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + w.Write(swaggerSpec) + }) + // Register pprof handlers mux.HandleFunc("/debug/pprof/", pprof.Index) mux.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline) diff --git a/server/router/swagger_embed.go b/server/router/swagger_embed.go new file mode 100644 index 0000000..90b9d4c --- /dev/null +++ b/server/router/swagger_embed.go @@ -0,0 +1,11 @@ +package router + +import ( + "embed" +) + +//go:embed swagger-ui-dist/* +var swaggerUI embed.FS + +//go:embed docs/swagger.json +var swaggerSpec []byte diff --git a/sqlc.yml b/sqlc.yml index 8faabbc..53f961a 100644 --- a/sqlc.yml +++ b/sqlc.yml @@ -3,7 +3,8 @@ sql: - engine: sqlite queries: - db/queries/query.sql - schema: db/migrations + schema: + - db/schema.sql gen: go: package: queries