diff --git a/README.md b/README.md index 9ba2c0ca..9302521b 100644 --- a/README.md +++ b/README.md @@ -612,6 +612,71 @@ extraScrapeConfigs: | In IMDS mode, metrics can be collected as follows: - Use a `podMonitor` custom resource with the Prometheus Operator to collect metrics. +## Issuing Lifecycle Heartbeats + +You can set NTH to send heartbeats to ASG in Queue Processor mode. This allows for a much longer grace period (up to 48 hours) for termination than the maximum heartbeat timeout of two hours. + +### How it works + +- When NTH receives an ASG lifecycle termination event, it starts sending heartbeats to ASG to renew the heartbeat timeout associated with the ASG's termination lifecycle hook. +- The heartbeat timeout acts as a timer that starts when the termination event begins. +- Before the timeout reaches zero, the termination process is halted at the `Terminating:Wait` stage. +- Previously, NTH couldn't issue heartbeats, limiting the maximum time for preventing termination to the maximum heartbeat timeout (7200 seconds). +- Now, the graceful termination duration can be extended up to 48 hours, limited by the global timeout. + +### How to use + +- Specify values for `Heartbeat Interval` (required) and `Heartbeat Until` (optional). + +### Configurations +#### `Heartbeat Interval` +- Time period between consecutive heartbeat signals (in seconds) +- Range: 30 to 3600 seconds (30 seconds to 1 hour) +- Flag for custom resource definition by *.yaml / helm: `heartbeatInterval` +- CLI flag: `heartbeat-interval` + +#### `Heartbeat Until` +- Duration over which heartbeat signals are sent (in seconds) +- Range: 60 to 172800 seconds (1 minute to 48 hours) +- Flag for custom resource definition by *.yaml / helm: `heartbeatUntil` +- CLI flag: `heartbeat-until` + +#### Example Case + +- `Heartbeat Interval`: 1000 seconds +- `Heartbeat Until`: 4500 seconds +- `Heartbeat Timeout`: 3000 seconds + +| Time (s) | Event | Heartbeat Timeout (HT) | Heartbeat Until (HU) | Action | +|----------|-------------|------------------|----------------------|--------| +| 0 | Start | 3000 | 4500 | Termination Event Received | +| 1000 | HB1 Issued | 2000 -> 3000 | 3500 | Send Heartbeat | +| 2000 | HB2 Issued | 2000 -> 3000 | 2500 | Send Heartbeat | +| 3000 | HB3 Issued | 2000 -> 3000 | 1500 | Send Heartbeat | +| 4000 | HB4 Issued | 2000 -> 3000 | 500 | Send Heartbeat | +| 4500 | HB Expires | 2500 | 0 | Stop Heartbeats | +| 7000 | Termination | - | - | Instance Terminates | + +Note: The instance can terminate earlier if its pods finish draining and are ready for termination. + +### Example Helm Command + +```sh +helm upgrade --install aws-node-termination-handler \ + --namespace kube-system \ + --set enableSqsTerminationDraining=true \ + --set heartbeatInterval=1000 \ + --set heartbeatUntil=4500 \ + // other inputs.. +``` + +### Important Notes + +- A lifecycle hook for instance termination is required for this feature. Longer grace periods are achieved by renewing the heartbeat timeout of the ASG's lifecycle hook. Instances terminate instantly without a hook. +- Issuing lifecycle heartbeats is only supported in Queue Processor mode. Setting `enableSqsTerminationDraining=false` and specifying heartbeat flags is prevented in Helm. Directly editing deployment settings to do this will cause NTH to fail. +- The heartbeat interval should be sufficiently smaller than the heartbeat timeout. There's a time gap between instance start and NTH start. Setting the interval just slightly smaller than or equal to the timeout causes the heartbeat timeout to expire before the heartbeat is issued. Provide enough buffer for NTH to finish initializing. +- Issuing heartbeats is part of the termination process. The maximum number of instances that NTH can handle termination concurrently is limited by the number of workers. This implies that heartbeats can only be issued for up to the number of instances specified by the `workers` flag simultaneously. + ## Communication * If you've run into a bug or have a new feature request, please open an [issue](https://github.com/aws/aws-node-termination-handler/issues/new). * You can also chat with us in the [Kubernetes Slack](https://kubernetes.slack.com) in the `#provider-aws` channel diff --git a/config/helm/aws-node-termination-handler/templates/deployment.yaml b/config/helm/aws-node-termination-handler/templates/deployment.yaml index 2d7a8896..7c043fec 100644 --- a/config/helm/aws-node-termination-handler/templates/deployment.yaml +++ b/config/helm/aws-node-termination-handler/templates/deployment.yaml @@ -168,6 +168,10 @@ spec: value: {{ .Values.deleteSqsMsgIfNodeNotFound | quote }} - name: WORKERS value: {{ .Values.workers | quote }} + - name: HEARTBEAT_INTERVAL + value: {{ .Values.heartbeatInterval | quote }} + - name: HEARTBEAT_UNTIL + value: {{ .Values.heartbeatUntil | quote }} {{- with .Values.extraEnv }} {{- toYaml . | nindent 12 }} {{- end }} diff --git a/pkg/config/config.go b/pkg/config/config.go index 05fabdca..e87066cb 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -112,6 +112,9 @@ const ( queueURLConfigKey = "QUEUE_URL" completeLifecycleActionDelaySecondsKey = "COMPLETE_LIFECYCLE_ACTION_DELAY_SECONDS" deleteSqsMsgIfNodeNotFoundKey = "DELETE_SQS_MSG_IF_NODE_NOT_FOUND" + // heartbeat + heartbeatIntervalKey = "HEARTBEAT_INTERVAL" + heartbeatUntilKey = "HEARTBEAT_UNTIL" ) // Config arguments set via CLI, environment variables, or defaults @@ -166,6 +169,8 @@ type Config struct { CompleteLifecycleActionDelaySeconds int DeleteSqsMsgIfNodeNotFound bool UseAPIServerCacheToListPods bool + HeartbeatInterval int + HeartbeatUntil int } // ParseCliArgs parses cli arguments and uses environment variables as fallback values @@ -230,6 +235,8 @@ func ParseCliArgs() (config Config, err error) { flag.IntVar(&config.CompleteLifecycleActionDelaySeconds, "complete-lifecycle-action-delay-seconds", getIntEnv(completeLifecycleActionDelaySecondsKey, -1), "Delay completing the Autoscaling lifecycle action after a node has been drained.") flag.BoolVar(&config.DeleteSqsMsgIfNodeNotFound, "delete-sqs-msg-if-node-not-found", getBoolEnv(deleteSqsMsgIfNodeNotFoundKey, false), "If true, delete SQS Messages from the SQS Queue if the targeted node(s) are not found.") flag.BoolVar(&config.UseAPIServerCacheToListPods, "use-apiserver-cache", getBoolEnv(useAPIServerCache, false), "If true, leverage the k8s apiserver's index on pod's spec.nodeName to list pods on a node, instead of doing an etcd quorum read.") + flag.IntVar(&config.HeartbeatInterval, "heartbeat-interval", getIntEnv(heartbeatIntervalKey, -1), "The time period in seconds between consecutive heartbeat signals. Valid range: 30-3600 seconds (30 seconds to 1 hour).") + flag.IntVar(&config.HeartbeatUntil, "heartbeat-until", getIntEnv(heartbeatUntilKey, -1), "The duration in seconds over which heartbeat signals are sent. Valid range: 60-172800 seconds (1 minute to 48 hours).") flag.Parse() if isConfigProvided("pod-termination-grace-period", podTerminationGracePeriodConfigKey) && isConfigProvided("grace-period", gracePeriodConfigKey) { @@ -274,6 +281,26 @@ func ParseCliArgs() (config Config, err error) { panic("You must provide a node-name to the CLI or NODE_NAME environment variable.") } + // heartbeat value boundary and compability check + if config.EnableSQSTerminationDraining { + if config.HeartbeatInterval != -1 && (config.HeartbeatInterval < 30 || config.HeartbeatInterval > 3600) { + return config, fmt.Errorf("invalid heartbeat-interval passed: %d Should be between 30 and 3600 seconds", config.HeartbeatInterval) + } + if config.HeartbeatUntil != -1 && (config.HeartbeatUntil < 60 || config.HeartbeatUntil > 172800) { + return config, fmt.Errorf("invalid heartbeat-until passed: %d Should be between 60 and 172800 seconds", config.HeartbeatUntil) + } + if config.HeartbeatInterval != -1 && config.HeartbeatUntil == -1 { + config.HeartbeatUntil = 172800 + log.Info().Msgf("Since heartbeat-until is not set, defaulting to %d seconds", config.HeartbeatUntil) + } else if config.HeartbeatInterval == -1 && config.HeartbeatUntil != -1 { + return config, fmt.Errorf("invalid heartbeat configuration: heartbeat-interval is required when heartbeat-until is set") + } + } else { + if config.HeartbeatInterval != -1 || config.HeartbeatUntil != -1 { + return config, fmt.Errorf("currently using IMDS mode. Heartbeat is only supported for Queue Processor mode") + } + } + // client-go expects these to be set in env vars os.Setenv(kubernetesServiceHostConfigKey, config.KubernetesServiceHost) os.Setenv(kubernetesServicePortConfigKey, config.KubernetesServicePort) @@ -332,6 +359,8 @@ func (c Config) PrintJsonConfigArgs() { Str("ManagedTag", c.ManagedTag). Bool("use_provider_id", c.UseProviderId). Bool("use_apiserver_cache", c.UseAPIServerCacheToListPods). + Int("heartbeat_interval", c.HeartbeatInterval). + Int("heartbeat_until", c.HeartbeatUntil). Msg("aws-node-termination-handler arguments") } @@ -383,7 +412,9 @@ func (c Config) PrintHumanConfigArgs() { "\tmanaged-tag: %s,\n"+ "\tuse-provider-id: %t,\n"+ "\taws-endpoint: %s,\n"+ - "\tuse-apiserver-cache: %t,\n", + "\tuse-apiserver-cache: %t,\n"+ + "\theartbeat-interval: %d,\n"+ + "\theartbeat-until: %d\n", c.DryRun, c.NodeName, c.PodName, @@ -424,6 +455,8 @@ func (c Config) PrintHumanConfigArgs() { c.UseProviderId, c.AWSEndpoint, c.UseAPIServerCacheToListPods, + c.HeartbeatInterval, + c.HeartbeatUntil, ) } diff --git a/pkg/monitor/sqsevent/asg-lifecycle-event.go b/pkg/monitor/sqsevent/asg-lifecycle-event.go index fc034931..74d19a84 100644 --- a/pkg/monitor/sqsevent/asg-lifecycle-event.go +++ b/pkg/monitor/sqsevent/asg-lifecycle-event.go @@ -15,11 +15,14 @@ package sqsevent import ( "encoding/json" + "errors" "fmt" + "time" "github.com/aws/aws-node-termination-handler/pkg/monitor" "github.com/aws/aws-node-termination-handler/pkg/node" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/autoscaling" "github.com/aws/aws-sdk-go/service/sqs" "github.com/rs/zerolog/log" @@ -95,19 +98,30 @@ func (m SQSMonitor) asgTerminationToInterruptionEvent(event *EventBridgeEvent, m Description: fmt.Sprintf("ASG Lifecycle Termination event received. Instance will be interrupted at %s \n", event.getTime()), } + stopHeartbeatCh := make(chan struct{}) + interruptionEvent.PostDrainTask = func(interruptionEvent monitor.InterruptionEvent, _ node.Node) error { + _, err = m.continueLifecycleAction(lifecycleDetail) if err != nil { return fmt.Errorf("continuing ASG termination lifecycle: %w", err) } log.Info().Str("lifecycleHookName", lifecycleDetail.LifecycleHookName).Str("instanceID", lifecycleDetail.EC2InstanceID).Msg("Completed ASG Lifecycle Hook") + + close(stopHeartbeatCh) return m.deleteMessage(message) } interruptionEvent.PreDrainTask = func(interruptionEvent monitor.InterruptionEvent, n node.Node) error { + nthConfig := n.GetNthConfig() + if nthConfig.HeartbeatInterval != -1 && nthConfig.HeartbeatUntil != -1 { + go m.checkHeartbeatTimeout(nthConfig.HeartbeatInterval, lifecycleDetail) + go m.SendHeartbeats(nthConfig.HeartbeatInterval, nthConfig.HeartbeatUntil, lifecycleDetail, stopHeartbeatCh) + } + err := n.TaintASGLifecycleTermination(interruptionEvent.NodeName, interruptionEvent.EventID) if err != nil { - log.Err(err).Msgf("Unable to taint node with taint %s:%s", node.ASGLifecycleTerminationTaint, interruptionEvent.EventID) + log.Err(err).Msgf("unable to taint node with taint %s:%s", node.ASGLifecycleTerminationTaint, interruptionEvent.EventID) } return nil } @@ -115,6 +129,86 @@ func (m SQSMonitor) asgTerminationToInterruptionEvent(event *EventBridgeEvent, m return &interruptionEvent, nil } +// Compare the heartbeatInterval with the heartbeat timeout and warn if (heartbeatInterval >= heartbeat timeout) +func (m SQSMonitor) checkHeartbeatTimeout(heartbeatInterval int, lifecycleDetail *LifecycleDetail) { + input := &autoscaling.DescribeLifecycleHooksInput{ + AutoScalingGroupName: aws.String(lifecycleDetail.AutoScalingGroupName), + LifecycleHookNames: []*string{aws.String(lifecycleDetail.LifecycleHookName)}, + } + + lifecyclehook, err := m.ASG.DescribeLifecycleHooks(input) + if err != nil { + log.Err(err).Msg("failed to describe lifecycle hook") + return + } + + if len(lifecyclehook.LifecycleHooks) == 0 { + log.Warn(). + Str("asgName", lifecycleDetail.AutoScalingGroupName). + Str("lifecycleHookName", lifecycleDetail.LifecycleHookName). + Msg("Tried to check heartbeat timeout, but no lifecycle hook found from ASG") + return + } + + heartbeatTimeout := int(*lifecyclehook.LifecycleHooks[0].HeartbeatTimeout) + + if heartbeatInterval >= heartbeatTimeout { + log.Warn().Msgf("Heartbeat interval (%d seconds) is equal to or greater than the heartbeat timeout (%d seconds) for the lifecycle hook %s. The node would likely be terminated before the heartbeat is sent", heartbeatInterval, heartbeatTimeout, *lifecyclehook.LifecycleHooks[0].LifecycleHookName) + } +} + +// Issue lifecycle heartbeats to reset the heartbeat timeout timer in ASG +func (m SQSMonitor) SendHeartbeats(heartbeatInterval int, heartbeatUntil int, lifecycleDetail *LifecycleDetail, stopCh <-chan struct{}) { + ticker := time.NewTicker(time.Duration(heartbeatInterval) * time.Second) + defer ticker.Stop() + timeout := time.After(time.Duration(heartbeatUntil) * time.Second) + + for { + select { + case <-stopCh: + return + case <-ticker.C: + err := m.recordLifecycleActionHeartbeat(lifecycleDetail) + if err != nil { + log.Err(err).Msg("invalid heartbeat target, stopping heartbeat") + return + } + case <-timeout: + log.Info().Msg("Heartbeat deadline exceeded, stopping heartbeat") + return + } + } +} + +func (m SQSMonitor) recordLifecycleActionHeartbeat(lifecycleDetail *LifecycleDetail) error { + input := &autoscaling.RecordLifecycleActionHeartbeatInput{ + AutoScalingGroupName: aws.String(lifecycleDetail.AutoScalingGroupName), + LifecycleHookName: aws.String(lifecycleDetail.LifecycleHookName), + LifecycleActionToken: aws.String(lifecycleDetail.LifecycleActionToken), + InstanceId: aws.String(lifecycleDetail.EC2InstanceID), + } + + log.Info().Str("asgName", lifecycleDetail.AutoScalingGroupName). + Str("lifecycleHookName", lifecycleDetail.LifecycleHookName). + Str("lifecycleActionToken", lifecycleDetail.LifecycleActionToken). + Str("instanceID", lifecycleDetail.EC2InstanceID). + Msg("Sending lifecycle heartbeat") + + // Stop the heartbeat if the target is invalid + _, err := m.ASG.RecordLifecycleActionHeartbeat(input) + if err != nil { + var awsErr awserr.Error + log.Warn().Err(err).Msg("Failed to send lifecycle heartbeat") + if errors.As(err, &awsErr) && awsErr.Code() == "ValidationError" { + return err + } + return nil + } + + log.Info().Msg("Successfully sent lifecycle heartbeat") + return nil +} + func (m SQSMonitor) deleteMessage(message *sqs.Message) error { errs := m.deleteMessages([]*sqs.Message{message}) if errs != nil { @@ -123,7 +217,7 @@ func (m SQSMonitor) deleteMessage(message *sqs.Message) error { return nil } -// Continues the lifecycle hook thereby indicating a successful action occured +// Continues the lifecycle hook thereby indicating a successful action occurred func (m SQSMonitor) continueLifecycleAction(lifecycleDetail *LifecycleDetail) (*autoscaling.CompleteLifecycleActionOutput, error) { return m.completeLifecycleAction(&autoscaling.CompleteLifecycleActionInput{ AutoScalingGroupName: &lifecycleDetail.AutoScalingGroupName, diff --git a/pkg/monitor/sqsevent/sqs-monitor_test.go b/pkg/monitor/sqsevent/sqs-monitor_test.go index 2b93085e..28d337d0 100644 --- a/pkg/monitor/sqsevent/sqs-monitor_test.go +++ b/pkg/monitor/sqsevent/sqs-monitor_test.go @@ -18,6 +18,7 @@ import ( "fmt" "strings" "testing" + "time" "github.com/aws/aws-node-termination-handler/pkg/monitor" "github.com/aws/aws-node-termination-handler/pkg/monitor/sqsevent" @@ -276,7 +277,6 @@ func TestMonitor_AsgDirectToSqsSuccess(t *testing.T) { default: h.Ok(t, fmt.Errorf("Expected an event to be generated")) } - } func TestMonitor_AsgDirectToSqsTestNotification(t *testing.T) { @@ -520,7 +520,6 @@ func TestMonitor_DrainTasksASGFailure(t *testing.T) { default: h.Ok(t, fmt.Errorf("Expected to get an event with a failing post drain task")) } - } func TestMonitor_Failure(t *testing.T) { @@ -908,6 +907,130 @@ func TestMonitor_InstanceNotManaged(t *testing.T) { } } +func TestSendHeartbeats_EarlyClosure(t *testing.T) { + h.HeartbeatCallCount = 0 + + asgMock := h.MockedASG{ + RecordLifecycleActionHeartbeatResp: autoscaling.RecordLifecycleActionHeartbeatOutput{}, + RecordLifecycleActionHeartbeatErr: nil, + } + + sqsMonitor := sqsevent.SQSMonitor{ + ASG: asgMock, + } + + lifecycleDetail := sqsevent.LifecycleDetail{ + LifecycleHookName: "test-hook", + AutoScalingGroupName: "test-asg", + LifecycleActionToken: "test-token", + EC2InstanceID: "XXXXXXXXXXXX", + } + + stopHeartbeatCh := make(chan struct{}) + + go func() { + time.Sleep(3500 * time.Millisecond) + close(stopHeartbeatCh) + }() + + sqsMonitor.SendHeartbeats(1, 5, &lifecycleDetail, stopHeartbeatCh) + + h.Assert(t, h.HeartbeatCallCount == 3, "3 Heartbeat Expected, got %d", h.HeartbeatCallCount) +} + +func TestSendHeartbeats_NormalClosure(t *testing.T) { + h.HeartbeatCallCount = 0 + + asgMock := h.MockedASG{ + RecordLifecycleActionHeartbeatResp: autoscaling.RecordLifecycleActionHeartbeatOutput{}, + RecordLifecycleActionHeartbeatErr: nil, + } + + sqsMonitor := sqsevent.SQSMonitor{ + ASG: asgMock, + } + + lifecycleDetail := sqsevent.LifecycleDetail{ + LifecycleHookName: "test-hook", + AutoScalingGroupName: "test-asg", + LifecycleActionToken: "test-token", + EC2InstanceID: "XXXXXXXXXXXX", + } + + stopHeartbeatCh := make(chan struct{}) + + go func() { + time.Sleep(10 * time.Second) + close(stopHeartbeatCh) + }() + + sqsMonitor.SendHeartbeats(1, 5, &lifecycleDetail, stopHeartbeatCh) + + h.Assert(t, h.HeartbeatCallCount == 5, "5 Heartbeat Expected, got %d", h.HeartbeatCallCount) +} + +func TestSendHeartbeats_ErrThrottlingASG(t *testing.T) { + h.HeartbeatCallCount = 0 + + asgMock := h.MockedASG{ + RecordLifecycleActionHeartbeatResp: autoscaling.RecordLifecycleActionHeartbeatOutput{}, + RecordLifecycleActionHeartbeatErr: awserr.New("ThrottlingException", "Rate exceeded", nil), + } + + sqsMonitor := sqsevent.SQSMonitor{ + ASG: asgMock, + } + + lifecycleDetail := sqsevent.LifecycleDetail{ + LifecycleHookName: "test-hook", + AutoScalingGroupName: "test-asg", + LifecycleActionToken: "test-token", + EC2InstanceID: "XXXXXXXXXXXX", + } + + stopHeartbeatCh := make(chan struct{}) + + go func() { + time.Sleep(10 * time.Second) + close(stopHeartbeatCh) + }() + + sqsMonitor.SendHeartbeats(1, 6, &lifecycleDetail, stopHeartbeatCh) + + h.Assert(t, h.HeartbeatCallCount == 6, "6 Heartbeat Expected, got %d", h.HeartbeatCallCount) +} + +func TestSendHeartbeats_ErrInvalidTarget(t *testing.T) { + h.HeartbeatCallCount = 0 + + asgMock := h.MockedASG{ + RecordLifecycleActionHeartbeatResp: autoscaling.RecordLifecycleActionHeartbeatOutput{}, + RecordLifecycleActionHeartbeatErr: awserr.New("ValidationError", "No active Lifecycle Action found", nil), + } + + sqsMonitor := sqsevent.SQSMonitor{ + ASG: asgMock, + } + + lifecycleDetail := sqsevent.LifecycleDetail{ + LifecycleHookName: "test-hook", + AutoScalingGroupName: "test-asg", + LifecycleActionToken: "test-token", + EC2InstanceID: "XXXXXXXXXXXX", + } + + stopHeartbeatCh := make(chan struct{}) + + go func() { + time.Sleep(10 * time.Second) + close(stopHeartbeatCh) + }() + + sqsMonitor.SendHeartbeats(1, 6, &lifecycleDetail, stopHeartbeatCh) + + h.Assert(t, h.HeartbeatCallCount == 1, "1 Heartbeat Expected, got %d", h.HeartbeatCallCount) +} + // AWS Mock Helpers specific to sqs-monitor tests func getDescribeInstancesResp(privateDNSName string, withASGTag bool, withManagedTag bool) ec2.DescribeInstancesOutput { diff --git a/pkg/node/node.go b/pkg/node/node.go index 7e323d13..204c5de6 100644 --- a/pkg/node/node.go +++ b/pkg/node/node.go @@ -280,6 +280,10 @@ func (n Node) MarkForUncordonAfterReboot(nodeName string) error { return nil } +func (n Node) GetNthConfig() config.Config { + return n.nthConfig +} + // addLabel will add a label to the node given a label key and value // Specifying true for the skipExisting parameter will skip adding the label if it already exists func (n Node) addLabel(nodeName string, key string, value string, skipExisting bool) error { diff --git a/pkg/test/aws-mocks.go b/pkg/test/aws-mocks.go index 8d5c8ae5..b00ae365 100644 --- a/pkg/test/aws-mocks.go +++ b/pkg/test/aws-mocks.go @@ -56,12 +56,14 @@ func (m MockedEC2) DescribeInstances(input *ec2.DescribeInstancesInput) (*ec2.De // MockedASG mocks the autoscaling API type MockedASG struct { autoscalingiface.AutoScalingAPI - CompleteLifecycleActionResp autoscaling.CompleteLifecycleActionOutput - CompleteLifecycleActionErr error - DescribeAutoScalingInstancesResp autoscaling.DescribeAutoScalingInstancesOutput - DescribeAutoScalingInstancesErr error - DescribeTagsPagesResp autoscaling.DescribeTagsOutput - DescribeTagsPagesErr error + CompleteLifecycleActionResp autoscaling.CompleteLifecycleActionOutput + CompleteLifecycleActionErr error + DescribeAutoScalingInstancesResp autoscaling.DescribeAutoScalingInstancesOutput + DescribeAutoScalingInstancesErr error + DescribeTagsPagesResp autoscaling.DescribeTagsOutput + DescribeTagsPagesErr error + RecordLifecycleActionHeartbeatResp autoscaling.RecordLifecycleActionHeartbeatOutput + RecordLifecycleActionHeartbeatErr error } // CompleteLifecycleAction mocks the autoscaling.CompleteLifecycleAction API call @@ -81,3 +83,14 @@ func (m MockedASG) DescribeTagsPages(input *autoscaling.DescribeTagsInput, fn de fn(&m.DescribeTagsPagesResp, true) return m.DescribeTagsPagesErr } + +var HeartbeatCallCount int + +// RecordLifecycleActionHeartbeat mocks the autoscaling.RecordLifecycleActionHeartbeat API call +func (m MockedASG) RecordLifecycleActionHeartbeat(input *autoscaling.RecordLifecycleActionHeartbeatInput) (*autoscaling.RecordLifecycleActionHeartbeatOutput, error) { + HeartbeatCallCount++ + if m.RecordLifecycleActionHeartbeatErr != nil && HeartbeatCallCount%2 == 1 { + return &m.RecordLifecycleActionHeartbeatResp, m.RecordLifecycleActionHeartbeatErr + } + return &m.RecordLifecycleActionHeartbeatResp, nil +} diff --git a/test/e2e/asg-lifecycle-sqs-heartbeat-test b/test/e2e/asg-lifecycle-sqs-heartbeat-test new file mode 100755 index 00000000..be5202a7 --- /dev/null +++ b/test/e2e/asg-lifecycle-sqs-heartbeat-test @@ -0,0 +1,222 @@ +#!/bin/bash +set -euo pipefail + +# Available env vars: +# $TMP_DIR +# $CLUSTER_NAME +# $KUBECONFIG +# $NODE_TERMINATION_HANDLER_DOCKER_REPO +# $NODE_TERMINATION_HANDLER_DOCKER_TAG +# $WEBHOOK_DOCKER_REPO +# $WEBHOOK_DOCKER_TAG +# $AEMM_URL +# $AEMM_VERSION + + +function fail_and_exit { + echo "❌ ASG Lifecycle SQS Heartbeat Test failed $CLUSTER_NAME ❌" + exit "${1:-1}" +} + +echo "Starting ASG Lifecycle SQS Heartbeat Test for Node Termination Handler" +START_TIME=$(date -u +"%Y-%m-%dT%TZ") + +SCRIPTPATH="$( cd "$(dirname "$0")" ; pwd -P )" + +common_helm_args=() + +localstack_helm_args=( + upgrade + --install + --namespace default + "$CLUSTER_NAME-localstack" + "$SCRIPTPATH/../../config/helm/localstack/" + --set nodeSelector."${NTH_CONTROL_LABEL}" + --set defaultRegion="${AWS_REGION}" + --wait +) + +set -x +helm "${localstack_helm_args[@]}" +set +x + +sleep 10 + +RUN_INSTANCE_CMD="awslocal ec2 run-instances --private-ip-address ${WORKER_IP} --region ${AWS_REGION} --tag-specifications 'ResourceType=instance,Tags=[{Key=aws:autoscaling:groupName,Value=nth-integ-test},{Key=aws-node-termination-handler/managed,Value=blah}]'" +localstack_pod=$(kubectl get pods --selector app=localstack --field-selector="status.phase=Running" \ + -o go-template --template '{{range .items}}{{.metadata.name}} {{.metadata.creationTimestamp}}{{"\n"}}{{end}}' \ + | awk '$2 >= "'"${START_TIME//+0000/Z}"'" { print $1 }') +echo "🥑 Using localstack pod ${localstack_pod}" +run_instances_resp=$(kubectl exec -i "${localstack_pod}" -- bash -c "${RUN_INSTANCE_CMD}") +private_dns_name=$(echo "${run_instances_resp}" | jq -r '.Instances[] .PrivateDnsName') +instance_id=$(echo "${run_instances_resp}" | jq -r '.Instances[] .InstanceId') +echo "🥑 Started mock EC2 instance ($instance_id) w/ private DNS name: ${private_dns_name}" +set -x +CREATE_SQS_CMD="awslocal sqs create-queue --queue-name "${CLUSTER_NAME}-queue" --attributes MessageRetentionPeriod=300 --region ${AWS_REGION}" +queue_url=$(kubectl exec -i "${localstack_pod}" -- bash -c "${CREATE_SQS_CMD}" | jq -r .QueueUrl) +set +x + +echo "🥑 Created SQS Queue ${queue_url}" + +anth_helm_args=( + upgrade + --install + --namespace kube-system + "$CLUSTER_NAME-acth" + "$SCRIPTPATH/../../config/helm/aws-node-termination-handler/" + --set completeLifecycleActionDelaySeconds=120 + --set heartbeatInterval=30 + --set heartbeatUntil=100 + --set image.repository="$NODE_TERMINATION_HANDLER_DOCKER_REPO" + --set image.tag="$NODE_TERMINATION_HANDLER_DOCKER_TAG" + --set nodeSelector."${NTH_CONTROL_LABEL}" + --set tolerations[0].operator=Exists + --set awsAccessKeyID=foo + --set awsSecretAccessKey=bar + --set awsRegion="${AWS_REGION}" + --set awsEndpoint="http://localstack.default" + --set checkTagBeforeDraining=false + --set enableSqsTerminationDraining=true + --set queueURL="${queue_url}" + --wait +) +[[ -n "${NODE_TERMINATION_HANDLER_DOCKER_PULL_POLICY-}" ]] && + anth_helm_args+=(--set image.pullPolicy="$NODE_TERMINATION_HANDLER_DOCKER_PULL_POLICY") +[[ ${#common_helm_args[@]} -gt 0 ]] && + anth_helm_args+=("${common_helm_args[@]}") + +set -x +helm "${anth_helm_args[@]}" +set +x + +emtp_helm_args=( + upgrade + --install + --namespace default + "$CLUSTER_NAME-emtp" + "$SCRIPTPATH/../../config/helm/webhook-test-proxy/" + --set webhookTestProxy.image.repository="$WEBHOOK_DOCKER_REPO" + --set webhookTestProxy.image.tag="$WEBHOOK_DOCKER_TAG" + --wait +) +[[ -n "${WEBHOOK_DOCKER_PULL_POLICY-}" ]] && + emtp_helm_args+=(--set webhookTestProxy.image.pullPolicy="$WEBHOOK_DOCKER_PULL_POLICY") +[[ ${#common_helm_args[@]} -gt 0 ]] && + emtp_helm_args+=("${common_helm_args[@]}") + +set -x +helm "${emtp_helm_args[@]}" +set +x + +TAINT_CHECK_CYCLES=15 +TAINT_CHECK_SLEEP=15 + +DEPLOYED=0 + +for i in $(seq 1 $TAINT_CHECK_CYCLES); do + if [[ $(kubectl get deployments regular-pod-test -o jsonpath='{.status.unavailableReplicas}') -eq 0 ]]; then + echo "✅ Verified regular-pod-test pod was scheduled and started!" + DEPLOYED=1 + break + fi + echo "Setup Loop $i/$TAINT_CHECK_CYCLES, sleeping for $TAINT_CHECK_SLEEP seconds" + sleep $TAINT_CHECK_SLEEP +done + +if [[ $DEPLOYED -eq 0 ]]; then + echo "❌ regular-pod-test pod deployment failed" + fail_and_exit 2 +fi + +ASG_TERMINATE_EVENT=$(cat < /dev/null; then + echo "✅ Verified the worker node was cordoned!" + cordoned=1 + fi + + if [[ $cordoned -eq 1 && $(kubectl get deployments regular-pod-test -o=jsonpath='{.status.unavailableReplicas}') -eq 1 ]]; then + echo "✅ Verified the regular-pod-test pod was evicted!" + echo "✅ ASG Lifecycle SQS Test Passed $CLUSTER_NAME! ✅" + exit 0 + fi + echo "Assertion Loop $i/$TAINT_CHECK_CYCLES, sleeping for $TAINT_CHECK_SLEEP seconds" + sleep $TAINT_CHECK_SLEEP +done + +if [[ $cordoned -eq 0 ]]; then + echo "❌ Worker node was not cordoned" +else + echo "❌ regular-pod-test was not evicted" +fi + +fail_and_exit 1