diff --git a/test/e2e/framework/kubernetes/create-kapinger-deployment.go b/test/e2e/framework/kubernetes/create-kapinger-deployment.go index db2c428831c..83b4c684c99 100644 --- a/test/e2e/framework/kubernetes/create-kapinger-deployment.go +++ b/test/e2e/framework/kubernetes/create-kapinger-deployment.go @@ -146,7 +146,7 @@ func (c *CreateKapingerDeployment) GetKapingerDeployment() *appsv1.Deployment { Containers: []v1.Container{ { Name: "kapinger", - Image: "acnpublic.azurecr.io/kapinger:20241011.3", + Image: "acnpublic.azurecr.io/kapinger:20241011.4", Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ "memory": resource.MustParse("20Mi"), @@ -210,17 +210,6 @@ func (c *CreateKapingerDeployment) GetKapingerDeployment() *appsv1.Deployment { Value: c.BurstVolume, }, }, - LivenessProbe: &v1.Probe{ - ProbeHandler: v1.ProbeHandler{ - HTTPGet: &v1.HTTPGetAction{ - Path: "/", - Port: intstr.FromInt(KapingerHTTPPort), - }, - }, - TimeoutSeconds: 10, //nolint - PeriodSeconds: 10, //nolint - InitialDelaySeconds: 3, //nolint - }, }, }, }, diff --git a/test/e2e/framework/kubernetes/port-forward.go b/test/e2e/framework/kubernetes/port-forward.go index a04b75e6513..08cab66bd20 100644 --- a/test/e2e/framework/kubernetes/port-forward.go +++ b/test/e2e/framework/kubernetes/port-forward.go @@ -110,6 +110,8 @@ func (p *PortForward) Run() error { log.Printf("port forward validation HTTP request to \"%s\" succeeded, response: %s\n", p.pf.Address(), resp.Status) + log.Printf("starting keepalive for port forward...\n") + go p.pf.KeepAlive(pctx) return nil } @@ -117,6 +119,8 @@ func (p *PortForward) Run() error { return fmt.Errorf("could not start port forward within %ds: %w", defaultTimeoutSeconds, err) } log.Printf("successfully port forwarded to \"%s\"\n", p.pf.Address()) + log.Printf("starting port forward keepalive...\n") + go p.pf.KeepAlive(pctx) return nil } diff --git a/test/e2e/framework/kubernetes/portforward.go b/test/e2e/framework/kubernetes/portforward.go index a62728d2c31..d5376d6f8bc 100644 --- a/test/e2e/framework/kubernetes/portforward.go +++ b/test/e2e/framework/kubernetes/portforward.go @@ -171,6 +171,9 @@ func (p *PortForwarder) KeepAlive(ctx context.Context) { case <-ctx.Done(): p.logger.Logf("port forwarder: keep alive cancelled: %v", ctx.Err()) return + case <-p.stopChan: + p.logger.Logf("port forwarder: keep alive stopped via stop channel") + return case pfErr := <-p.errChan: // as of client-go v0.26.1, if the connection is successful at first but then fails, // an error is logged but only a nil error is sent to this channel. this will be fixed diff --git a/test/e2e/framework/kubernetes/pprof.go b/test/e2e/framework/kubernetes/pprof.go index 57ec06a65f0..a85226fbfc8 100644 --- a/test/e2e/framework/kubernetes/pprof.go +++ b/test/e2e/framework/kubernetes/pprof.go @@ -9,12 +9,11 @@ import ( "net/url" "os" "strconv" - "sync" "time" ) const ( - defaultTimeout = 30 * time.Second + defaultTimeout = 3200 * time.Second defaultRetinaPort = 10093 defaultSpanTime = 10 * time.Second ) @@ -96,22 +95,15 @@ func (p *PullPProf) Run() error { } } - var wg sync.WaitGroup - for name, path := range durationProfiles { - wg.Add(1) - go func(name, path string) { - file := folder + name + ".out" - err = p.scraper.GetProfileWithDuration(name, path, file, defaultSpanTime) - if err != nil { - // don't return here because some data is better than no data, - // and other profiles might be functional - log.Printf("error getting %s profile: %v\n", name, err) - } - wg.Done() - }(name, path) + file := folder + name + ".out" + err = p.scraper.GetProfileWithDuration(name, path, file, defaultSpanTime) + if err != nil { + // don't return here because some data is better than no data, + // and other profiles might be functional + log.Printf("error getting %s profile: %v\n", name, err) + } } - wg.Wait() log.Printf("-- finished scraping profiles, saved to to %s --\n", folder) log.Printf("waiting %s seconds for next scrape\n", p.ScrapeIntervalSeconds) @@ -166,12 +158,12 @@ func (p *PprofScraper) GetProfileWithDuration(name, path, outfile string, durati log.Printf("getting %s profile for %d seconds...\n", name, seconds) profileURL := p.formatURLWithSeconds(seconds) profileURL.Path += path - return p.scrape(profileURL.String(), defaultTimeout+duration, outfile) + return p.scrape(profileURL.String(), outfile) } func (p *PprofScraper) GetProfile(name, path, outfile string) error { log.Printf("getting %s profile...\n", name) - return p.scrape(p.baseURL.String()+path, defaultTimeout, outfile) + return p.scrape(p.baseURL.String()+path, outfile) } func (p *PprofScraper) formatURLWithSeconds(seconds int) url.URL { @@ -183,10 +175,8 @@ func (p *PprofScraper) formatURLWithSeconds(seconds int) url.URL { return queryURL } -func (p *PprofScraper) scrape(scrapingURL string, timeout time.Duration, outfile string) error { - client := http.Client{ - Timeout: timeout, - } +func (p *PprofScraper) scrape(scrapingURL, outfile string) error { + client := http.Client{} req, err := http.NewRequestWithContext(context.Background(), http.MethodGet, scrapingURL, http.NoBody) if err != nil { diff --git a/test/e2e/scenarios/longrunning/scenario.go b/test/e2e/scenarios/longrunning/scenario.go index 8bbd7b80fac..938a95649dc 100644 --- a/test/e2e/scenarios/longrunning/scenario.go +++ b/test/e2e/scenarios/longrunning/scenario.go @@ -17,7 +17,7 @@ func PullPProf(kubeConfigFilePath string) *types.Scenario { KapingerReplicas: "500", KubeConfigFilePath: kubeConfigFilePath, BurstIntervalMs: "10000", // 10 seconds - BurstVolume: "10", // 500 requests every 10 seconds + BurstVolume: "200", // 500 requests every 10 seconds }, }, {