Compare commits

..

33 Commits

Author SHA1 Message Date
ed0e5947ea Merge branch 'main' of https://git.coadcorp.com/nathan/go-events
All checks were successful
continuous-integration/drone/push Build is passing
2025-04-29 16:58:14 +10:00
a9aad6643d upgrade golang 2025-04-29 16:56:46 +10:00
f86aefdca6 Update main.go
All checks were successful
continuous-integration/drone/push Build is passing
2024-12-03 10:23:07 +11:00
a1e4649455 Update main.go
All checks were successful
continuous-integration/drone/push Build is passing
2024-12-03 09:15:19 +11:00
a615602bf8 Update main.go
All checks were successful
continuous-integration/drone/push Build is passing
extend HA host outage search duration
2024-12-03 08:41:08 +11:00
8dd6146818 again
All checks were successful
continuous-integration/drone/push Build is passing
2024-09-16 16:03:56 +10:00
66543b15b6 update
Some checks reported errors
continuous-integration/drone/push Build was killed
2024-09-16 16:03:22 +10:00
508ddd73f7 try avoid nil dereference
All checks were successful
continuous-integration/drone/push Build is passing
2024-09-16 15:57:16 +10:00
fb433b9ef2 update
All checks were successful
continuous-integration/drone/push Build is passing
2024-09-16 15:49:27 +10:00
71c397e5fe code tidy up
Some checks failed
continuous-integration/drone/push Build is passing
continuous-integration/drone Build is failing
2024-04-10 13:55:56 +10:00
4ea27bf071 try different plugin
All checks were successful
continuous-integration/drone/push Build is passing
2024-04-09 08:56:07 +10:00
ae5ce907c4 debug sftp
Some checks failed
continuous-integration/drone/push Build is failing
2024-04-09 08:46:11 +10:00
9e20f8b9a2 re-test
Some checks failed
continuous-integration/drone/push Build is failing
2024-04-09 08:41:10 +10:00
e47d660419 test dell sftp
Some checks failed
continuous-integration/drone/push Build is failing
2024-04-09 08:35:14 +10:00
47c61f0417 avoid panic when vm wasn't found in our search
All checks were successful
continuous-integration/drone/push Build is passing
2024-04-08 16:57:52 +10:00
1098910135 more bug fix
All checks were successful
continuous-integration/drone/push Build is passing
2024-04-08 16:52:15 +10:00
f059efc49f bugfix
All checks were successful
continuous-integration/drone/push Build is passing
2024-04-08 16:47:51 +10:00
b9a53b240a search all vm disconnected events
All checks were successful
continuous-integration/drone/push Build is passing
2024-04-08 16:35:45 +10:00
9fbe579f43 bugfix possible host selection
All checks were successful
continuous-integration/drone/push Build is passing
2024-04-08 16:07:24 +10:00
919ffd52cf remove extra logging
All checks were successful
continuous-integration/drone/push Build is passing
2024-04-08 15:54:30 +10:00
106eb7d1bb test
All checks were successful
continuous-integration/drone/push Build is passing
2024-04-08 15:50:47 +10:00
da8742ea64 debug
All checks were successful
continuous-integration/drone/push Build is passing
2024-04-08 15:45:09 +10:00
2f80601a40 bugfix vm not found
All checks were successful
continuous-integration/drone/push Build is passing
2024-04-08 15:41:11 +10:00
c2df4ea3af logging
All checks were successful
continuous-integration/drone/push Build is passing
2024-03-18 12:10:20 +11:00
e355f5f6bc improve logging
All checks were successful
continuous-integration/drone/push Build is passing
2024-03-18 11:25:13 +11:00
334b0b8ab6 improve logging
All checks were successful
continuous-integration/drone/push Build is passing
2024-03-18 11:17:47 +11:00
f20712beb0 test
All checks were successful
continuous-integration/drone/push Build is passing
2024-03-18 11:11:31 +11:00
328b027fdc more testing
All checks were successful
continuous-integration/drone/push Build is passing
2024-03-18 10:51:58 +11:00
82fb21c710 test
All checks were successful
continuous-integration/drone/push Build is passing
2024-03-18 10:39:44 +11:00
fd23121a7c filter ha unreachable to hosts that actually failed
All checks were successful
continuous-integration/drone/push Build is passing
2024-03-18 10:28:33 +11:00
6aa7627c96 use possible hosts for actual vm restart time search
All checks were successful
continuous-integration/drone/push Build is passing
2024-03-18 10:22:16 +11:00
29eab3df05 Merge branch 'main' of https://git.coadcorp.com/nathan/go-events
All checks were successful
continuous-integration/drone/push Build is passing
2024-03-18 10:07:18 +11:00
bdce428803 use HA unreachable events when trying to find a failure 2024-03-18 10:05:44 +11:00
4 changed files with 182 additions and 55 deletions

View File

@@ -11,18 +11,35 @@ steps:
commands: commands:
- sh ./.drone.sh - sh ./.drone.sh
- name: dell-deploy #- name: dell-deploy
# # https://github.com/cschlosser/drone-ftps/blob/master/README.md ## # https://github.com/cschlosser/drone-ftps/blob/master/README.md
image: cschlosser/drone-ftps # image: cschlosser/drone-ftps
environment: # environment:
FTP_USERNAME: # FTP_USERNAME:
from_secret: FTP_USERNAME # from_secret: FTP_USERNAME
FTP_PASSWORD: # FTP_PASSWORD:
from_secret: FTP_PASSWORD # from_secret: FTP_PASSWORD
PLUGIN_HOSTNAME: ftp.emc.com:21 # PLUGIN_HOSTNAME: ftp.emc.com:21
PLUGIN_SECURE: false # PLUGIN_SECURE: false
PLUGIN_VERIFY: false # PLUGIN_VERIFY: false
PLUGIN_CHMOD: false # PLUGIN_CHMOD: false
#PLUGIN_DEBUG: false # #PLUGIN_DEBUG: false
PLUGIN_INCLUDE: ^events$,^events_checksum.txt$ # PLUGIN_INCLUDE: ^events$,^events_checksum.txt$
PLUGIN_EXCLUDE: ^\.git/$ # PLUGIN_EXCLUDE: ^\.git/$
# https://github.com/hypervtechnics/drone-sftp
- name: dell-sftp-deploy
image: hypervtechnics/drone-sftp
settings:
host: deft.dell.com
username:
from_secret: DELLFTP_USER
password:
from_secret: DELLFTP_PASS
port: 22
source: ./
filter: events*
clean: false
target: /
overwrite: true
verbose: true

4
go.mod
View File

@@ -1,5 +1,5 @@
module nathan/go-events module nathan/go-events
go 1.19 go 1.24.2
require github.com/vmware/govmomi v0.30.4 require github.com/vmware/govmomi v0.50.0

5
go.sum
View File

@@ -1,3 +1,8 @@
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/vmware/govmomi v0.30.4 h1:BCKLoTmiBYRuplv3GxKEMBLtBaJm8PA56vo9bddIpYQ= github.com/vmware/govmomi v0.30.4 h1:BCKLoTmiBYRuplv3GxKEMBLtBaJm8PA56vo9bddIpYQ=
github.com/vmware/govmomi v0.30.4/go.mod h1:F7adsVewLNHsW/IIm7ziFURaXDaHEwcc+ym4r3INMdY= github.com/vmware/govmomi v0.30.4/go.mod h1:F7adsVewLNHsW/IIm7ziFURaXDaHEwcc+ym4r3INMdY=
github.com/vmware/govmomi v0.43.0 h1:7Kg3Bkdly+TrE67BYXzRq7ZrDnn7xqpKX95uEh2f9Go=
github.com/vmware/govmomi v0.43.0/go.mod h1:IOv5nTXCPqH9qVJAlRuAGffogaLsNs8aF+e7vLgsHJU=
github.com/vmware/govmomi v0.50.0 h1:vFOnUCBCX3m3MgTKfBp68Pz5gsHvKkO07Y2wCGYYQOM=
github.com/vmware/govmomi v0.50.0/go.mod h1:Z5uo7z0kRhVV00E4gfbUGwUaXIKTgqngsT+t/mIDpcI=

153
main.go
View File

@@ -50,6 +50,7 @@ var (
buildTime string // when the executable was built buildTime string // when the executable was built
results []OutageResults results []OutageResults
hostResults []HostFailureResults hostResults []HostFailureResults
haUnreachableEvents []types.Event
) )
// This function optionally filters events by a single MoRef, any additonal MoRefs are ignored // This function optionally filters events by a single MoRef, any additonal MoRefs are ignored
@@ -90,15 +91,15 @@ func getEvents(eventTypes []string, entities []types.ManagedObjectReference, beg
} }
for _, e := range entities { for _, e := range entities {
// Only log the entity we're filtering if it isn't the vcenter root if e == root {
if e != root { log.Printf("getEvents leaving event filter spec at root\n")
} else { // Only log the entity we're filtering if it isn't the vcenter root
log.Printf("getEvents setting entity '%v' to filter\n", e) log.Printf("getEvents setting entity '%v' to filter\n", e)
}
filter.Entity = &types.EventFilterSpecByEntity{ filter.Entity = &types.EventFilterSpecByEntity{
Entity: e, Entity: e,
Recursion: types.EventFilterSpecRecursionOptionAll, Recursion: types.EventFilterSpecRecursionOptionAll,
} }
}
collector, err := m.CreateCollectorForEvents(ctx, filter) collector, err := m.CreateCollectorForEvents(ctx, filter)
if err != nil { if err != nil {
@@ -181,28 +182,28 @@ func getCluster(name string) mo.ClusterComputeResource {
return mo.VirtualMachine{} return mo.VirtualMachine{}
} }
*/ */
func getVmInCluster(name string, cluster types.ManagedObjectReference) mo.VirtualMachine { func getVmInCluster(name string, cluster types.ManagedObjectReference) (mo.VirtualMachine, error) {
// Create a container view so that we can search vCenter for a VM if we found any failure events // Create a container view so that we can search vCenter for a VM if we found any failure events
m := view.NewManager(c.Client) m := view.NewManager(c.Client)
cv, _ := m.CreateContainerView(ctx, cluster, []string{"VirtualMachine"}, true) cv, _ := m.CreateContainerView(ctx, cluster, []string{"VirtualMachine"}, true)
var vms []mo.VirtualMachine var vms []mo.VirtualMachine
log.Printf("Searching for VM '%s'\n", name) log.Printf("Searching for VM '%s' in cluster '%v'\n", name, cluster.Reference().Value)
err := cv.Retrieve(ctx, []string{"VirtualMachine"}, []string{"summary", "name"}, &vms) err := cv.Retrieve(ctx, []string{"VirtualMachine"}, []string{"summary", "name"}, &vms)
if err != nil { if err != nil {
log.Printf("Failed searching for VM %s : %s\n", name, err) log.Printf("Failed searching for VM %s : %s\n", name, err)
return mo.VirtualMachine{} return mo.VirtualMachine{}, fmt.Errorf("error searching for VM %s : %s", name, err)
} else { } else {
for _, vm := range vms { for _, vm := range vms {
if vm.Name == name { if vm.Name == name {
log.Printf("Found corresponding VM with MoRef '%s'\n", vm.Reference()) log.Printf("Found corresponding VM with MoRef '%s'", vm.Reference())
return vm return vm, nil
} }
} }
} }
// If we reached here then we didn't find a VM // If we reached here then we didn't find a VM
return mo.VirtualMachine{} return mo.VirtualMachine{}, fmt.Errorf("no VM found with name %s", name)
} }
func main() { func main() {
@@ -215,6 +216,7 @@ func main() {
begin := flag.Duration("b", time.Hour, "Begin time") // default BeginTime is 1h ago begin := flag.Duration("b", time.Hour, "Begin time") // default BeginTime is 1h ago
end := flag.Duration("e", 0, "End time") end := flag.Duration("e", 0, "End time")
fuzzyMinutes := flag.Int("fuzziness", 5, "Number of minutes to offset VM restart time when searching for related Host failure event") fuzzyMinutes := flag.Int("fuzziness", 5, "Number of minutes to offset VM restart time when searching for related Host failure event")
unreachableMinutes := flag.Int("unreachable", 20, "Number of minutes to search for host HA events either side of a VM failure")
flag.Parse() flag.Parse()
// Print logs to file // Print logs to file
@@ -269,30 +271,109 @@ func main() {
log.Printf("Found at least one host failure, proceeding with VM restart search\n") log.Printf("Found at least one host failure, proceeding with VM restart search\n")
vmFailures := getEvents([]string{"com.vmware.vc.ha.VmRestartedByHAEvent"}, []types.ManagedObjectReference{}, *begin, *end) vmFailures := getEvents([]string{"com.vmware.vc.ha.VmRestartedByHAEvent"}, []types.ManagedObjectReference{}, *begin, *end)
log.Printf("Searching for ha status change events\n")
haStatusChanges := getEvents([]string{"com.vmware.vc.HA.HostStateChangedEvent"}, []types.ManagedObjectReference{}, *begin, *end)
log.Printf("Found %d ha status change events\n", len(haStatusChanges))
// filter ha status changed messages for unreachable ones
for _, h := range haStatusChanges {
unreachableMessage := strings.Contains(strings.ToLower(h.FullFormattedMessage), "changed to unreachable")
hostFailedMessage := strings.Contains(strings.ToLower(h.FullFormattedMessage), "changed to host failed")
if unreachableMessage || hostFailedMessage {
// make sure this host was in the host failures list
hostFound := false
var haFailedTime time.Time
for _, f := range hostFailures {
if h.Host.Name == f.Host.Name {
// got it
hostFound = true
haFailedTime = f.CreatedTime.In(location)
break
}
}
// make sure that this event is within 10 minutes either side of the corresponding host failed event
if hostFound {
unreachableStartComparison := h.CreatedTime.In(location).Add(time.Duration(int64(time.Minute) * -1 * int64(*unreachableMinutes)))
unreachableEndComparison := h.CreatedTime.In(location).Add(time.Duration(int64(time.Minute) * int64(*unreachableMinutes)))
if haFailedTime.Before(unreachableEndComparison) && haFailedTime.After(unreachableStartComparison) {
haUnreachableEvents = append(haUnreachableEvents, h)
log.Printf("Keeping host %s unreachable HA status event at %s\n", h.Host.Name, h.CreatedTime.In(location).Format(time.ANSIC))
} else {
log.Printf("Excluding HA Unreachable for Host %s at time %s since it was before %s or after %s\n", h.Host.Name, haFailedTime.Format(time.ANSIC),
unreachableEndComparison, unreachableStartComparison)
}
} else {
log.Printf("Host %s was not found in the list of hostfailure events, skipping this host\n", h.Host.Name)
}
}
}
// Sort the host failure events by time // Sort the host failure events by time
sort.Slice(hostFailures[:], func(i, j int) bool { sort.Slice(hostFailures[:], func(i, j int) bool {
return hostFailures[i].CreatedTime.Before(hostFailures[j].CreatedTime) return hostFailures[i].CreatedTime.Before(hostFailures[j].CreatedTime)
}) })
log.Printf("Searching for all vm disconnected events\n")
allVmDisconnectedEvents := getEvents([]string{"VmDisconnectedEvent"}, []types.ManagedObjectReference{}, *begin, *end)
log.Printf("Retrieved '%d' VmDisconnectedEvent events from '%s' to '%s'.\n", len(allVmDisconnectedEvents), begin.String(), end.String())
if len(allVmDisconnectedEvents) > 0 {
// Sort the disconnected events by time
sort.Slice(allVmDisconnectedEvents[:], func(i, j int) bool {
return allVmDisconnectedEvents[i].CreatedTime.Before(allVmDisconnectedEvents[j].CreatedTime)
})
}
for i := range vmFailures { for i := range vmFailures {
var vm mo.VirtualMachine
var outageStart, restartTime time.Time var outageStart, restartTime time.Time
var failedHost string var failedHost string
var possibleHosts []types.Event var possibleHosts []types.Event
var vmDisconnectedEvents []types.Event
var vmFound bool
var vmOS string
var vmPowerState string
event := vmFailures[i] event := vmFailures[i]
vmRestartTime := event.CreatedTime.In(location) vmRestartTime := event.CreatedTime.In(location)
// Sometimes host HA events can come through a few minutes after a VM restart event, so create a "fuzzy" starting time to search for host HA events // Sometimes host HA events can come through a few minutes after a VM restart event, so create a "fuzzy" starting time to search for host HA events
fuzzyTime := vmRestartTime.Add(time.Duration(int64(time.Minute) * int64(*fuzzyMinutes))) fuzzyTime := vmRestartTime.Add(time.Duration(int64(time.Minute) * int64(*fuzzyMinutes)))
log.Printf("Failure event for VM '%s' restarted in cluster '%s'\n", event.Vm.Name, event.ComputeResource.Name)
// Get a reference to the cluster mentioned if event.Vm == nil {
log.Printf("Can't read this event properly, skipping\n%v\n", event)
continue
}
log.Printf("Failure event for VM '%s' restarted in cluster '%s' at %s\n", event.Vm.Name, event.ComputeResource.Name, event.CreatedTime.In(location).Format(time.ANSIC))
// filter all the disconnected events to the ones belonging to this VM
for _, e := range allVmDisconnectedEvents {
if e.Vm.Name == event.Vm.Name {
log.Printf("Adding VM disconnected event on host %s at time %s\n", e.Host.Name, e.CreatedTime.In(location))
vmDisconnectedEvents = append(vmDisconnectedEvents, e)
}
}
log.Printf("Filtered '%d' VmDisconnectedEvent events belonging to VM '%s'\n", len(vmDisconnectedEvents), event.Vm.Name)
// Get a reference to the cluster mentioned in the event
cluster := getCluster((event.ComputeResource.Name)) cluster := getCluster((event.ComputeResource.Name))
vm := getVmInCluster(event.Vm.Name, cluster.Reference()) vm, err = getVmInCluster(event.Vm.Name, cluster.Reference())
//log.Printf("VM: '%+v'\n", vm)
//vm := getVM(event.Vm.Name) //vm := getVM(event.Vm.Name)
// If we couldn't find the vm then try using a list of all the disconnected events found
//if len(vm.ExtensibleManagedObject.Self.Value) == 0 {
if err != nil {
log.Printf("No VM matching string '%s' found in cluster '%s'\n", event.Vm.Name, cluster.Reference())
vmFound = false
} else {
// Use VmDisconnectedEvent to see which host this VM was on // Use VmDisconnectedEvent to see which host this VM was on
vmDisconnectedEvents := getEvents([]string{"VmDisconnectedEvent"}, []types.ManagedObjectReference{vm.Reference()}, *begin, *end) vmFound = true
log.Printf("Retrieved '%d' VmDisconnectedEvent events.\n", len(vmDisconnectedEvents)) //vmDisconnectedEvents = getEvents([]string{"VmDisconnectedEvent"}, []types.ManagedObjectReference{vm.Reference()}, *begin, *end)
//log.Printf("Retrieved '%d' VmDisconnectedEvent events.\n", len(vmDisconnectedEvents))
}
// Determine which host the VM was previoulsy running on // Determine which host the VM was previoulsy running on
if len(vmDisconnectedEvents) > 0 { if len(vmDisconnectedEvents) > 0 {
@@ -323,7 +404,7 @@ func main() {
if len(possibleHosts) == 0 { if len(possibleHosts) == 0 {
log.Printf("No corresponding VM disconnected messages, falling back to any applicable host that experienced a HA event.\n") log.Printf("No corresponding VM disconnected messages, falling back to any applicable host that experienced a HA event.\n")
// Search for host failures // Search for host failures
for _, hostEvent := range hostFailures { for _, hostEvent := range haUnreachableEvents {
if hostEvent.CreatedTime.In(location).Before(fuzzyTime) || hostEvent.CreatedTime.In(location).Equal(fuzzyTime) { if hostEvent.CreatedTime.In(location).Before(fuzzyTime) || hostEvent.CreatedTime.In(location).Equal(fuzzyTime) {
possibleHosts = append(possibleHosts, hostEvent) possibleHosts = append(possibleHosts, hostEvent)
} }
@@ -337,16 +418,26 @@ func main() {
} }
} }
} else { // Didn't find any VM disconnected events } else { // Didn't find any VM disconnected events
log.Printf("could not determine previous host for this VM. Filtering all host failures for events prior to fuzzy VM restart time '%s'\n", fuzzyTime) log.Printf("could not determine previous host for this VM. Filtering all ha unreachable events prior to fuzzy VM restart time '%s'\n", fuzzyTime)
// TODO Use HA unreachable events to find the host
for _, hostEvent := range haUnreachableEvents {
if hostEvent.CreatedTime.In(location).Before(fuzzyTime) || hostEvent.CreatedTime.In(location).Equal(fuzzyTime) {
possibleHosts = append(possibleHosts, hostEvent)
}
}
/*
// Search for host failures // Search for host failures
for _, hostEvent := range hostFailures { for _, hostEvent := range hostFailures {
if hostEvent.CreatedTime.In(location).Before(fuzzyTime) || hostEvent.CreatedTime.In(location).Equal(fuzzyTime) { if hostEvent.CreatedTime.In(location).Before(fuzzyTime) || hostEvent.CreatedTime.In(location).Equal(fuzzyTime) {
possibleHosts = append(possibleHosts, hostEvent) possibleHosts = append(possibleHosts, hostEvent)
} }
} }
*/
log.Printf("Based on event times there were %d possible hosts this VM was running on\n", len(possibleHosts)) log.Printf("Based on event times there were %d possible hosts this VM was running on\n", len(possibleHosts))
for _, hostEvent := range possibleHosts {
log.Printf("Host %s (%s)\n", hostEvent.Host.Name, hostEvent.CreatedTime.In(location).Format(time.ANSIC))
}
if len(possibleHosts) == 0 { if len(possibleHosts) == 0 {
log.Printf("No ESXi outage events happened before VM %s fuzzy restart event at %s, skipping this event.\n", event.Vm.Name, fuzzyTime) log.Printf("No ESXi outage events happened before VM %s fuzzy restart event at %s, skipping this event.\n", event.Vm.Name, fuzzyTime)
@@ -386,8 +477,8 @@ func main() {
var checkActualTime []types.Event var checkActualTime []types.Event
// Search for any disconnected messages prior to actual restart time rather than fuzzy time // Search for any disconnected messages prior to actual restart time rather than fuzzy time
log.Printf("Checking host failure list based on actual VM restart time %s\n", vmRestartTime) log.Printf("Checking possible hosts list based on actual VM restart time %s\n", vmRestartTime)
for _, hostEvent := range hostFailures { for _, hostEvent := range possibleHosts {
if hostEvent.CreatedTime.In(location).Before(vmRestartTime) || hostEvent.CreatedTime.In(location).Equal(vmRestartTime) { if hostEvent.CreatedTime.In(location).Before(vmRestartTime) || hostEvent.CreatedTime.In(location).Equal(vmRestartTime) {
checkActualTime = append(checkActualTime, hostEvent) checkActualTime = append(checkActualTime, hostEvent)
} }
@@ -399,10 +490,16 @@ func main() {
failedHost = checkActualTime[0].Host.Name failedHost = checkActualTime[0].Host.Name
outageStart = checkActualTime[0].CreatedTime.In(location) outageStart = checkActualTime[0].CreatedTime.In(location)
restartTime = vmRestartTime restartTime = vmRestartTime
} else if len(checkActualTime) > 1 {
lastIndex := len(checkActualTime) - 1
log.Printf("Found multiple hosts corresponding to actual VM restart time. Failed host was '%s', using outage start time of '%s'\n", checkActualTime[lastIndex].Host.Name, checkActualTime[lastIndex].CreatedTime.In(location))
failedHost = checkActualTime[lastIndex].Host.Name
outageStart = checkActualTime[lastIndex].CreatedTime.In(location)
restartTime = vmRestartTime
} else { } else {
// if using the actual VM restart time doesn't narrow things down then go back to using the last host failure time before the fuzzy VM restart time // if using the actual VM restart time doesn't narrow things down then go back to using the last host failure time before the fuzzy VM restart time
lastIndex := len(possibleHosts) - 1 lastIndex := len(possibleHosts) - 1
log.Printf("Failed host was '%s', using outage start time of '%s'\n", possibleHosts[lastIndex].Host.Name, possibleHosts[lastIndex].CreatedTime.In(location)) log.Printf("Last failed host before restart time was '%s'. Using outage start time of '%s'\n", possibleHosts[lastIndex].Host.Name, possibleHosts[lastIndex].CreatedTime.In(location))
failedHost = possibleHosts[lastIndex].Host.Name failedHost = possibleHosts[lastIndex].Host.Name
outageStart = possibleHosts[lastIndex].CreatedTime.In(location) outageStart = possibleHosts[lastIndex].CreatedTime.In(location)
restartTime = vmRestartTime restartTime = vmRestartTime
@@ -417,6 +514,14 @@ func main() {
duration := restartTime.Sub(outageStart) duration := restartTime.Sub(outageStart)
out := time.Time{}.Add(duration) out := time.Time{}.Add(duration)
if vmFound {
vmOS = vm.Summary.Guest.GuestFullName
vmPowerState = string(vm.Summary.Runtime.PowerState)
} else {
vmOS = ""
vmPowerState = ""
}
// Create a new result // Create a new result
result := OutageResults{ result := OutageResults{
VM: event.Vm.Name, VM: event.Vm.Name,
@@ -426,8 +531,8 @@ func main() {
Cluster: event.ComputeResource.Name, Cluster: event.ComputeResource.Name,
FailedHost: failedHost, FailedHost: failedHost,
NewHost: event.Host.Name, NewHost: event.Host.Name,
GuestOS: vm.Summary.Guest.GuestFullName, GuestOS: vmOS,
CurrentPowerState: string(vm.Summary.Runtime.PowerState), CurrentPowerState: vmPowerState,
Description: event.FullFormattedMessage, Description: event.FullFormattedMessage,
} }
// Append to list of all results // Append to list of all results
@@ -443,7 +548,7 @@ func main() {
}) })
} }
} else { } else {
log.Printf("Found %d hostfailure messages.", len(hostFailures)) log.Printf("Found %d hostfailure messages in last %.1f hour(s)", len(hostFailures), begin.Abs().Hours())
} }
// Combine details of host outages and VM outages into one interface // Combine details of host outages and VM outages into one interface