From 29210901a032ec17fc22076432ab354285d816a6 Mon Sep 17 00:00:00 2001 From: Andy Pan Date: Fri, 5 Jul 2024 15:13:23 +0800 Subject: [PATCH 01/17] Fix a few broken image links --- README.md | 6 ++++-- README_ZH.md | 6 ++++-- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index bdf7487b..f2c7baf8 100644 --- a/README.md +++ b/README.md @@ -367,7 +367,7 @@ The following companies/organizations use `ants` in production. - + @@ -399,7 +399,7 @@ The following companies/organizations use `ants` in production. - + @@ -418,6 +418,8 @@ The following companies/organizations use `ants` in production. +If your company is also using `ants` in production, please help us enrich this list by opening a pull request. + ### open-source software The open-source projects below do concurrent programming with the help of `ants`. diff --git a/README_ZH.md b/README_ZH.md index fa041cba..7a75135f 100644 --- a/README_ZH.md +++ b/README_ZH.md @@ -368,7 +368,7 @@ pool.Reboot() - + @@ -400,7 +400,7 @@ pool.Reboot() - + @@ -419,6 +419,8 @@ pool.Reboot() +如果你的公司也在生产环境上使用 `ants`,欢迎提 PR 来丰富这份列表。 + ### 开源软件 这些开源项目借助 `ants` 进行并发编程。 From 1e73dc2c7baea0dd8685b2805593738d71ce3cd8 Mon Sep 17 00:00:00 2001 From: Andy Pan Date: Thu, 11 Jul 2024 12:16:27 +0800 Subject: [PATCH 02/17] doc: update the the section of use cases --- README.md | 23 ++++++++++++++--------- README_ZH.md | 25 +++++++++++++++---------- 2 files changed, 29 insertions(+), 19 deletions(-) diff --git a/README.md b/README.md index f2c7baf8..890fc98f 100644 --- a/README.md +++ b/README.md @@ -322,7 +322,7 @@ The following companies/organizations use `ants` in production. - + @@ -371,7 +371,7 @@ The following companies/organizations use `ants` in production. - + @@ -381,37 +381,42 @@ The following companies/organizations use `ants` in production. - + - + - + - + - + - - + + + + + + + diff --git a/README_ZH.md b/README_ZH.md index 7a75135f..bfcb4de1 100644 --- a/README_ZH.md +++ b/README_ZH.md @@ -323,7 +323,7 @@ pool.Reboot() - + @@ -372,47 +372,52 @@ pool.Reboot() - + - + - + - + - + - + - + - - + + + + + + + From 6169763f4f54b913ed92861a2244ae51193a9e33 Mon Sep 17 00:00:00 2001 From: Andy Pan Date: Tue, 23 Jul 2024 07:02:53 +0800 Subject: [PATCH 03/17] Add a new use case --- README.md | 10 ++++++++-- README_ZH.md | 7 ++++++- 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 890fc98f..5fe3c449 100644 --- a/README.md +++ b/README.md @@ -316,7 +316,7 @@ The source code in `ants` is available under the [MIT License](/LICENSE). ### business companies -The following companies/organizations use `ants` in production. +Trusted by the following corporations/organizations. @@ -419,11 +419,17 @@ The following companies/organizations use `ants` in production. + +
+ + + +
-If your company is also using `ants` in production, please help us enrich this list by opening a pull request. +If you're also using `ants` in production, please help us enrich this list by opening a pull request. ### open-source software diff --git a/README_ZH.md b/README_ZH.md index bfcb4de1..4728dbed 100644 --- a/README_ZH.md +++ b/README_ZH.md @@ -420,11 +420,16 @@ pool.Reboot() + + + + + -如果你的公司也在生产环境上使用 `ants`,欢迎提 PR 来丰富这份列表。 +如果你也正在生产环境上使用 `ants`,欢迎提 PR 来丰富这份列表。 ### 开源软件 From 4d0ebb896a98b26f0982edb83ad57076cefc5018 Mon Sep 17 00:00:00 2001 From: Andy Pan Date: Tue, 30 Jul 2024 12:11:29 +0800 Subject: [PATCH 04/17] chore: update README --- README.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 5fe3c449..f0ecf361 100644 --- a/README.md +++ b/README.md @@ -314,7 +314,7 @@ The source code in `ants` is available under the [MIT License](/LICENSE). ## 🖥 Use cases -### business companies +### business corporations Trusted by the following corporations/organizations. @@ -327,7 +327,7 @@ Trusted by the following corporations/organizations. - + @@ -344,12 +344,12 @@ Trusted by the following corporations/organizations. - + - + From 9df432d04056945b435fecb2b2ef212ac2b9bbdd Mon Sep 17 00:00:00 2001 From: Andy Pan Date: Mon, 19 Aug 2024 11:18:58 +0800 Subject: [PATCH 05/17] Move the list of patrons elsewhere Relocated to https://andypan.me/donation/#-patrons --- README.md | 44 -------------------------------------------- README_ZH.md | 44 -------------------------------------------- 2 files changed, 88 deletions(-) diff --git a/README.md b/README.md index f0ecf361..0baef5c2 100644 --- a/README.md +++ b/README.md @@ -490,50 +490,6 @@ Become a bronze sponsor with a monthly donation of $10 and get your logo on our       -## 💵 Patrons - - - - - - - - - - - - - -
- - Patrick Othmer - - - - Jimmy - - - - ChenZhen - - - - Mai Yang - - - - 王开帅 - - - - Unger Alejandro - - - - Weng Wei - -
- ## 🔋 Sponsorship

diff --git a/README_ZH.md b/README_ZH.md index 4728dbed..6910095e 100644 --- a/README_ZH.md +++ b/README_ZH.md @@ -490,50 +490,6 @@ pool.Reboot()       -## 资助者 - - - - - - - - - - - - - -
- - Patrick Othmer - - - - Jimmy - - - - ChenZhen - - - - Mai Yang - - - - 王开帅 - - - - Unger Alejandro - - - - Weng Wei - -
- ## 🔋 赞助商

From d85919e7160ccc5e4939cf910ac6e3e6a6aa7dc2 Mon Sep 17 00:00:00 2001 From: Andy Pan Date: Tue, 24 Sep 2024 17:13:55 +0800 Subject: [PATCH 06/17] chore: update the JetBrains logo --- README.md | 4 ++-- README_ZH.md | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 0baef5c2..b3615fa7 100644 --- a/README.md +++ b/README.md @@ -466,9 +466,9 @@ If you have `ants` integrated into projects, feel free to open a pull request re ## 🔋 JetBrains OS licenses -`ants` had been being developed with GoLand under the **free JetBrains Open Source license(s)** granted by JetBrains s.r.o., hence I would like to express my thanks here. +`ants` has been being developed with GoLand under the **free JetBrains Open Source license(s)** granted by JetBrains s.r.o., hence I would like to express my thanks here. - +JetBrains logo. ## 💰 Backers diff --git a/README_ZH.md b/README_ZH.md index 6910095e..5bd17bdd 100644 --- a/README_ZH.md +++ b/README_ZH.md @@ -468,7 +468,7 @@ pool.Reboot() `ants` 项目一直以来都是在 JetBrains 公司旗下的 GoLand 集成开发环境中进行开发,基于 **free JetBrains Open Source license(s)** 正版免费授权,在此表达我的谢意。 - +JetBrains logo. ## 💰 支持 From 2a562a7c2a982d6186f1b5696a1f778527437b53 Mon Sep 17 00:00:00 2001 From: Andy Pan Date: Thu, 26 Sep 2024 20:11:01 +0800 Subject: [PATCH 07/17] actions: add actions/stale --- .github/workflows/stale-bot.yml | 48 +++++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) create mode 100644 .github/workflows/stale-bot.yml diff --git a/.github/workflows/stale-bot.yml b/.github/workflows/stale-bot.yml new file mode 100644 index 00000000..b183962f --- /dev/null +++ b/.github/workflows/stale-bot.yml @@ -0,0 +1,48 @@ +name: Monitor inactive issues and PRs +on: + schedule: + - cron: '0 0 * * *' + workflow_dispatch: + +jobs: + stale-issues: + runs-on: ubuntu-latest + permissions: + actions: write + issues: write + pull-requests: write + steps: + - uses: actions/stale@v9 + with: + operations-per-run: 50 + days-before-issue-stale: 30 + days-before-issue-close: 7 + stale-issue-label: 'stale' + stale-issue-message: | + This issue is marked as stale because it has been open for 30 days with no activity. + + You should take one of the following actions: + - Manually close this issue if it is no longer relevant + - Comment if you have more information to share + + This issue will be automatically closed in 7 days if no further activity occurs. + close-issue-message: | + This issue was closed because it has been inactive for 7 days since being marked as stale. + + If you believe this is a false alarm, please leave a comment for it or open a new issue, you can also reopen this issue directly if you have permission. + days-before-pr-stale: 21 + days-before-pr-close: 7 + stale-pr-label: 'stale' + stale-pr-message: | + This PR is marked as stale because it has been open for 21 days with no activity. + + You should take one of the following actions: + - Manually close this PR if it is no longer relevant + - Push new commits or comment if you have more information to share + + This PR will be automatically closed in 7 days if no further activity occurs. + close-pr-message: | + This PR was closed because it has been inactive for 7 days since being marked as stale. + + If you believe this is a false alarm, feel free to reopen this PR or create a new one. + repo-token: ${{ secrets.GITHUB_TOKEN }} \ No newline at end of file From 2d40f3041b4d9472e10ec44158778dd4c6f1f784 Mon Sep 17 00:00:00 2001 From: Andy Pan Date: Thu, 17 Oct 2024 11:16:43 +0800 Subject: [PATCH 08/17] chore: update the READMEs --- README.md | 5 +++-- README_ZH.md | 5 +++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index b3615fa7..eb491127 100644 --- a/README.md +++ b/README.md @@ -22,10 +22,11 @@ Library `ants` implements a goroutine pool with fixed capacity, managing and rec - Managing and recycling a massive number of goroutines automatically - Purging overdue goroutines periodically -- Abundant APIs: submitting tasks, getting the number of running goroutines, tuning the capacity of the pool dynamically, releasing the pool, rebooting the pool +- Abundant APIs: submitting tasks, getting the number of running goroutines, tuning the capacity of the pool dynamically, releasing the pool, rebooting the pool, etc. - Handle panic gracefully to prevent programs from crash -- Efficient in memory usage and it even achieves [higher performance](#-performance-summary) than unlimited goroutines in Golang +- Efficient in memory usage and it may even achieve ***higher performance*** than unlimited goroutines in Golang - Nonblocking mechanism +- Preallocated memory (ring buffer, optional) ## 💡 How `ants` works diff --git a/README_ZH.md b/README_ZH.md index 5bd17bdd..015f60d6 100644 --- a/README_ZH.md +++ b/README_ZH.md @@ -22,10 +22,11 @@ - 自动调度海量的 goroutines,复用 goroutines - 定期清理过期的 goroutines,进一步节省资源 -- 提供了大量有用的接口:任务提交、获取运行中的 goroutine 数量、动态调整 Pool 大小、释放 Pool、重启 Pool +- 提供了大量实用的接口:任务提交、获取运行中的 goroutine 数量、动态调整 Pool 大小、释放 Pool、重启 Pool 等 - 优雅处理 panic,防止程序崩溃 -- 资源复用,极大节省内存使用量;在大规模批量并发任务场景下比原生 goroutine 并发具有[更高的性能](#-性能小结) +- 资源复用,极大节省内存使用量;在大规模批量并发任务场景下甚至可能比原生 goroutine 并发具有***更高的性能*** - 非阻塞机制 +- 预分配内存 (环形队列,可选) ## 💡 `ants` 是如何运行的 From e7e3c844aa308529262219ea57056c3ce9ee6a3f Mon Sep 17 00:00:00 2001 From: Andy Pan Date: Wed, 13 Nov 2024 11:12:40 +0800 Subject: [PATCH 09/17] chore: update the use cases --- README.md | 3 ++- README_ZH.md | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index eb491127..14bb7821 100644 --- a/README.md +++ b/README.md @@ -439,6 +439,7 @@ The open-source projects below do concurrent programming with the help of `ants` - [gnet](https://github.com/panjf2000/gnet): A high-performance, lightweight, non-blocking, event-driven networking framework written in pure Go. - [milvus](https://github.com/milvus-io/milvus): An open-source vector database for scalable similarity search and AI applications. - [nps](https://github.com/ehang-io/nps): A lightweight, high-performance, powerful intranet penetration proxy server, with a powerful web management terminal. +- [TDengine](https://github.com/taosdata/TDengine): TDengine is an open source, high-performance, cloud native time-series database optimized for Internet of Things (IoT), Connected Cars, and Industrial IoT. - [siyuan](https://github.com/siyuan-note/siyuan): SiYuan is a local-first personal knowledge management system that supports complete offline use, as well as end-to-end encrypted synchronization. - [osmedeus](https://github.com/j3ssie/osmedeus): A Workflow Engine for Offensive Security. - [jitsu](https://github.com/jitsucom/jitsu/tree/master): An open-source Segment alternative. Fully-scriptable data ingestion engine for modern data teams. Set-up a real-time data pipeline in minutes, not days. @@ -455,7 +456,7 @@ The open-source projects below do concurrent programming with the help of `ants` - [WatchAD2.0](https://github.com/Qihoo360/WatchAD2.0): WatchAD2.0 是 360 信息安全中心开发的一款针对域安全的日志分析与监控系统,它可以收集所有域控上的事件日志、网络流量,通过特征匹配、协议分析、历史行为、敏感操作和蜜罐账户等方式来检测各种已知与未知威胁,功能覆盖了大部分目前的常见内网域渗透手法。 - [vanus](https://github.com/vanus-labs/vanus): Vanus is a Serverless, event streaming system with processing capabilities. It easily connects SaaS, Cloud Services, and Databases to help users build next-gen Event-driven Applications. - [trpc-go](https://github.com/trpc-group/trpc-go): A pluggable, high-performance RPC framework written in Golang. -- [motan-go](https://github.com/weibocom/motan-go): a remote procedure call (RPC) framework for the rapid development of high-performance distributed services. +- [motan-go](https://github.com/weibocom/motan-go): Motan is a cross-language remote procedure call(RPC) framework for rapid development of high performance distributed services. motan-go is the golang implementation of Motan. #### All use cases: diff --git a/README_ZH.md b/README_ZH.md index 015f60d6..64883c05 100644 --- a/README_ZH.md +++ b/README_ZH.md @@ -439,6 +439,7 @@ pool.Reboot() - [gnet](https://github.com/panjf2000/gnet): gnet 是一个高性能、轻量级、非阻塞的事件驱动 Go 网络框架。 - [milvus](https://github.com/milvus-io/milvus): 一个高度灵活、可靠且速度极快的云原生开源向量数据库。 - [nps](https://github.com/ehang-io/nps): 一款轻量级、高性能、功能强大的内网穿透代理服务器。 +- [TDengine](https://github.com/taosdata/TDengine): TDengine 是一款开源、高性能、云原生的时序数据库 (Time-Series Database, TSDB)。TDengine 能被广泛运用于物联网、工业互联网、车联网、IT 运维、金融等领域。 - [siyuan](https://github.com/siyuan-note/siyuan): 思源笔记是一款本地优先的个人知识管理系统,支持完全离线使用,同时也支持端到端加密同步。 - [osmedeus](https://github.com/j3ssie/osmedeus): A Workflow Engine for Offensive Security. - [jitsu](https://github.com/jitsucom/jitsu/tree/master): An open-source Segment alternative. Fully-scriptable data ingestion engine for modern data teams. Set-up a real-time data pipeline in minutes, not days. @@ -455,7 +456,7 @@ pool.Reboot() - [WatchAD2.0](https://github.com/Qihoo360/WatchAD2.0): WatchAD2.0 是 360 信息安全中心开发的一款针对域安全的日志分析与监控系统,它可以收集所有域控上的事件日志、网络流量,通过特征匹配、协议分析、历史行为、敏感操作和蜜罐账户等方式来检测各种已知与未知威胁,功能覆盖了大部分目前的常见内网域渗透手法。 - [vanus](https://github.com/vanus-labs/vanus): Vanus is a Serverless, event streaming system with processing capabilities. It easily connects SaaS, Cloud Services, and Databases to help users build next-gen Event-driven Applications. - [trpc-go](https://github.com/trpc-group/trpc-go): 一个 Go 实现的可插拔的高性能 RPC 框架。 -- [motan-go](https://github.com/weibocom/motan-go): 一套高性能、易于使用的分布式远程服务调用(RPC)框架。motan-go 是 motan 的 Go 语言实现。 +- [motan-go](https://github.com/weibocom/motan-go): Motan 是一套高性能、易于使用的分布式远程服务调用 (RPC) 框架。motan-go 是 motan 的 Go 语言实现。 #### 所有案例: From 99121e2404eb94c560272de8eb43f21c6ecdd307 Mon Sep 17 00:00:00 2001 From: Andy Pan Date: Wed, 11 Dec 2024 21:09:56 +0800 Subject: [PATCH 10/17] chore: update some comments Fixes #346 --- pool.go | 5 +---- pool_func.go | 5 +---- 2 files changed, 2 insertions(+), 8 deletions(-) diff --git a/pool.go b/pool.go index 33e46ed9..2106e83f 100644 --- a/pool.go +++ b/pool.go @@ -110,10 +110,7 @@ func (p *Pool) purgeStaleWorkers() { isDormant = n == 0 || n == len(staleWorkers) p.lock.Unlock() - // Notify obsolete workers to stop. - // This notification must be outside the p.lock, since w.task - // may be blocking and may consume a lot of time if many workers - // are located on non-local CPUs. + // Clean up the stale workers. for i := range staleWorkers { staleWorkers[i].finish() staleWorkers[i] = nil diff --git a/pool_func.go b/pool_func.go index 140d5fe0..29541064 100644 --- a/pool_func.go +++ b/pool_func.go @@ -67,10 +67,7 @@ func (p *PoolWithFunc) purgeStaleWorkers() { isDormant = n == 0 || n == len(staleWorkers) p.lock.Unlock() - // Notify obsolete workers to stop. - // This notification must be outside the p.lock, since w.task - // may be blocking and may consume a lot of time if many workers - // are located on non-local CPUs. + // Clean up the stale workers. for i := range staleWorkers { staleWorkers[i].finish() staleWorkers[i] = nil From 0ee85b0a1ea70c5c57922c5984929a4474284aea Mon Sep 17 00:00:00 2001 From: Andy Pan Date: Wed, 11 Dec 2024 21:13:42 +0800 Subject: [PATCH 11/17] chore: update READMEs --- README.md | 6 ++++++ README_ZH.md | 6 ++++++ 2 files changed, 12 insertions(+) diff --git a/README.md b/README.md index 14bb7821..40f1ca8e 100644 --- a/README.md +++ b/README.md @@ -283,6 +283,12 @@ p, _ := ants.NewPool(100000, ants.WithPreAlloc(true)) pool.Release() ``` +or + +```go +pool.ReleaseTimeout(time.Second * 3) +``` + ### Reboot Pool ```go diff --git a/README_ZH.md b/README_ZH.md index 64883c05..20e03ffc 100644 --- a/README_ZH.md +++ b/README_ZH.md @@ -284,6 +284,12 @@ p, _ := ants.NewPool(100000, ants.WithPreAlloc(true)) pool.Release() ``` +或者 + +```go +pool.ReleaseTimeout(time.Second * 3) +``` + ### 重启 Pool ```go From 4acc96973c0457b6b9548515599b47058411514c Mon Sep 17 00:00:00 2001 From: Andy Pan Date: Wed, 11 Dec 2024 21:17:59 +0800 Subject: [PATCH 12/17] chore: update GitHub actions --- .github/workflows/test.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 04343509..4c860ccd 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -42,20 +42,20 @@ jobs: - name: Setup Go uses: actions/setup-go@v5 with: - go-version: '^1.16' + go-version: '^1.20' cache: false - name: Setup and run golangci-lint - uses: golangci/golangci-lint-action@v4 + uses: golangci/golangci-lint-action@v6 with: - version: v1.57.2 + version: v1.62.2 args: --timeout 5m -v -E gofumpt -E gocritic -E misspell -E revive -E godot test: needs: lint strategy: fail-fast: false matrix: - go: [1.13, 1.22] + go: [1.13, 1.23] os: [ubuntu-latest, macos-latest, windows-latest] include: # TODO(panjf2000): There is an uncanny issue arising when downloading From df029e6411dd08a0ef91c94ae397badf2c64d9b7 Mon Sep 17 00:00:00 2001 From: Andy Pan Date: Wed, 11 Dec 2024 21:31:38 +0800 Subject: [PATCH 13/17] opt: bump up the minimum required Go version to 1.16 --- .github/workflows/test.yml | 16 +--------------- ants.go | 4 +--- go.mod | 2 +- 3 files changed, 3 insertions(+), 19 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 4c860ccd..c4989a99 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -55,22 +55,8 @@ jobs: strategy: fail-fast: false matrix: - go: [1.13, 1.23] + go: [1.16, 1.23] os: [ubuntu-latest, macos-latest, windows-latest] - include: - # TODO(panjf2000): There is an uncanny issue arising when downloading - # go modules on macOS 13 for Go1.13. So we use macOS 12 for now, - # but try to figure it out and use macOS once it's resolved. - # https://github.com/panjf2000/ants/actions/runs/9546726268/job/26310385582 - - go: 1.13 - os: macos-12 - exclude: - # Starting macOS 14 GitHub Actions runners are arm-based, - # but Go didn't support arm64 until 1.16. Thus, we must - # replace the macOS 14 runner with macOS 12 runner for Go 1.13. - # Ref: https://github.com/actions/runner-images/issues/9741 - - go: 1.13 - os: macos-latest name: Go ${{ matrix.go }} @ ${{ matrix.os }} runs-on: ${{ matrix.os}} steps: diff --git a/ants.go b/ants.go index 4b61ba2b..dd170a60 100644 --- a/ants.go +++ b/ants.go @@ -88,9 +88,7 @@ var ( return 1 }() - // log.Lmsgprefix is not available in go1.13, just make an identical value for it. - logLmsgprefix = 64 - defaultLogger = Logger(log.New(os.Stderr, "[ants]: ", log.LstdFlags|logLmsgprefix|log.Lmicroseconds)) + defaultLogger = Logger(log.New(os.Stderr, "[ants]: ", log.LstdFlags|log.Lmsgprefix|log.Lmicroseconds)) // Init an instance pool when importing ants. defaultAntsPool, _ = NewPool(DefaultAntsPoolSize) diff --git a/go.mod b/go.mod index af3906bd..2e75d8f6 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/panjf2000/ants/v2 -go 1.13 +go 1.16 require ( github.com/stretchr/testify v1.8.2 From 4f33c6ef273c3785965a21e63f9f5accbc47b16d Mon Sep 17 00:00:00 2001 From: Andy Pan Date: Sun, 12 Jan 2025 10:38:09 +0800 Subject: [PATCH 14/17] feat: export the internal package sync (#349) --- .github/release-drafter.yml | 13 +++++++++++-- .github/workflows/test.yml | 2 +- README.md | 1 + README_ZH.md | 1 + ants.go | 7 +++++++ {internal => pkg}/sync/spinlock.go | 0 {internal => pkg}/sync/spinlock_test.go | 2 +- pkg/sync/sync.go | 25 +++++++++++++++++++++++++ pool.go | 2 +- pool_func.go | 2 +- 10 files changed, 49 insertions(+), 6 deletions(-) rename {internal => pkg}/sync/spinlock.go (100%) rename {internal => pkg}/sync/spinlock_test.go (96%) create mode 100644 pkg/sync/sync.go diff --git a/.github/release-drafter.yml b/.github/release-drafter.yml index 78f55b96..cfea99b3 100644 --- a/.github/release-drafter.yml +++ b/.github/release-drafter.yml @@ -44,7 +44,7 @@ autolabeler: title: - /fix/i - /bug/i - - /patch/i + - /resolve/i - label: docs files: - '*.md' @@ -60,6 +60,15 @@ autolabeler: - /update/i - /remove/i - /delete/i + - label: optimization + title: + - /opt:/i + - /refactor/i + - /optimize/i + - /improve/i + - /update/i + - /remove/i + - /delete/i - label: new feature title: - /feat:/i @@ -75,7 +84,7 @@ autolabeler: - label: chores title: - /chore/i - - /\bmisc\b/i + - /misc/i - /cleanup/i - /clean up/i - label: major diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index c4989a99..8a560b71 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -87,7 +87,7 @@ jobs: run: go test -v -race -coverprofile="codecov.report" -covermode=atomic - name: Upload code coverage report to Codecov - uses: codecov/codecov-action@v4 + uses: codecov/codecov-action@v5 with: file: ./codecov.report flags: unittests diff --git a/README.md b/README.md index 40f1ca8e..78c77e43 100644 --- a/README.md +++ b/README.md @@ -7,6 +7,7 @@
+ diff --git a/README_ZH.md b/README_ZH.md index 20e03ffc..e422f74c 100644 --- a/README_ZH.md +++ b/README_ZH.md @@ -7,6 +7,7 @@
+ diff --git a/ants.go b/ants.go index dd170a60..fa68482c 100644 --- a/ants.go +++ b/ants.go @@ -20,6 +20,13 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. +// Package ants implements an efficient and reliable goroutine pool for Go. +// +// With ants, Go applications are able to limit the number of active goroutines, +// recycle goroutines efficiently, and reduce the memory footprint significantly. +// Package ants is extremely useful in the scenarios where a massive number of +// goroutines are created and destroyed frequently, such as highly-concurrent +// batch processing systems, HTTP servers, services of asynchronous tasks, etc. package ants import ( diff --git a/internal/sync/spinlock.go b/pkg/sync/spinlock.go similarity index 100% rename from internal/sync/spinlock.go rename to pkg/sync/spinlock.go diff --git a/internal/sync/spinlock_test.go b/pkg/sync/spinlock_test.go similarity index 96% rename from internal/sync/spinlock_test.go rename to pkg/sync/spinlock_test.go index fa20401c..3934ec8f 100644 --- a/internal/sync/spinlock_test.go +++ b/pkg/sync/spinlock_test.go @@ -15,7 +15,7 @@ import ( Benchmark result for three types of locks: goos: darwin goarch: arm64 - pkg: github.com/panjf2000/ants/v2/internal/sync + pkg: github.com/panjf2000/ants/v2/pkg/sync BenchmarkMutex-10 10452573 111.1 ns/op 0 B/op 0 allocs/op BenchmarkSpinLock-10 58953211 18.01 ns/op 0 B/op 0 allocs/op BenchmarkBackOffSpinLock-10 100000000 10.81 ns/op 0 B/op 0 allocs/op diff --git a/pkg/sync/sync.go b/pkg/sync/sync.go new file mode 100644 index 00000000..d66192f0 --- /dev/null +++ b/pkg/sync/sync.go @@ -0,0 +1,25 @@ +/* + * Copyright (c) 2025. Andy Pan. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +// Package sync provides some handy implementations for synchronization access. +// At the moment, there is only an implementation of spin-lock. +package sync diff --git a/pool.go b/pool.go index 2106e83f..8361928b 100644 --- a/pool.go +++ b/pool.go @@ -28,7 +28,7 @@ import ( "sync/atomic" "time" - syncx "github.com/panjf2000/ants/v2/internal/sync" + syncx "github.com/panjf2000/ants/v2/pkg/sync" ) type poolCommon struct { diff --git a/pool_func.go b/pool_func.go index 29541064..f3d341d4 100644 --- a/pool_func.go +++ b/pool_func.go @@ -28,7 +28,7 @@ import ( "sync/atomic" "time" - syncx "github.com/panjf2000/ants/v2/internal/sync" + syncx "github.com/panjf2000/ants/v2/pkg/sync" ) // PoolWithFunc accepts the tasks and process them concurrently, From 9a1446b82323b8c9cd08634b4af9941ba82bff60 Mon Sep 17 00:00:00 2001 From: Andy Pan Date: Sun, 12 Jan 2025 13:55:38 +0800 Subject: [PATCH 15/17] opt: streamline pool implementation to reduce duplicated code (#350) Also, bump up the minimal required Go version from 1.16 to 1.18. --- .github/workflows/test.yml | 2 +- README.md | 12 +- README_ZH.md | 12 +- ants.go | 392 +++++++++++++++++++++++++++++++++++- ants_benchmark_test.go | 4 +- ants_test.go | 34 ++-- examples/main.go | 6 +- go.mod | 8 +- multipool_func.go | 4 +- options.go | 4 +- pool.go | 398 ++----------------------------------- pool_func.go | 364 ++------------------------------- worker.go | 6 +- worker_func.go | 8 +- worker_loop_queue_test.go | 1 - worker_queue.go | 3 +- worker_stack_test.go | 1 - 17 files changed, 479 insertions(+), 780 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 8a560b71..c9fb974e 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -55,7 +55,7 @@ jobs: strategy: fail-fast: false matrix: - go: [1.16, 1.23] + go: [1.18, 1.23] os: [ubuntu-latest, macos-latest, windows-latest] name: Go ${{ matrix.go }} @ ${{ matrix.os }} runs-on: ${{ matrix.os}} diff --git a/README.md b/README.md index 78c77e43..ccdff4ad 100644 --- a/README.md +++ b/README.md @@ -7,7 +7,7 @@
- + @@ -78,7 +78,7 @@ import ( var sum int32 -func myFunc(i interface{}) { +func myFunc(i any) { n := i.(int32) atomic.AddInt32(&sum, n) fmt.Printf("run with %d\n", n) @@ -110,7 +110,7 @@ func main() { // Use the pool with a function, // set 10 to the capacity of goroutine pool and 1 second for expired duration. - p, _ := ants.NewPoolWithFunc(10, func(i interface{}) { + p, _ := ants.NewPoolWithFunc(10, func(i any) { myFunc(i) wg.Done() }) @@ -141,7 +141,7 @@ func main() { fmt.Printf("finish all tasks.\n") // Use the MultiPoolFunc and set the capacity of 10 goroutine pools to (runTimes/10). - mpf, _ := ants.NewMultiPoolWithFunc(10, runTimes/10, func(i interface{}) { + mpf, _ := ants.NewMultiPoolWithFunc(10, runTimes/10, func(i any) { myFunc(i) wg.Done() }, ants.LeastTasks) @@ -186,7 +186,7 @@ type Options struct { // PanicHandler is used to handle panics from each worker goroutine. // if nil, panics will be thrown out again from worker goroutines. - PanicHandler func(interface{}) + PanicHandler func(any) // Logger is the customized logger for logging info, if it is not set, // default standard logger from log package is used. @@ -229,7 +229,7 @@ func WithNonblocking(nonblocking bool) Option { } // WithPanicHandler sets up panic handler. -func WithPanicHandler(panicHandler func(interface{})) Option { +func WithPanicHandler(panicHandler func(any)) Option { return func(opts *Options) { opts.PanicHandler = panicHandler } diff --git a/README_ZH.md b/README_ZH.md index e422f74c..275e539c 100644 --- a/README_ZH.md +++ b/README_ZH.md @@ -7,7 +7,7 @@
- + @@ -78,7 +78,7 @@ import ( var sum int32 -func myFunc(i interface{}) { +func myFunc(i any) { n := i.(int32) atomic.AddInt32(&sum, n) fmt.Printf("run with %d\n", n) @@ -110,7 +110,7 @@ func main() { // Use the pool with a function, // set 10 to the capacity of goroutine pool and 1 second for expired duration. - p, _ := ants.NewPoolWithFunc(10, func(i interface{}) { + p, _ := ants.NewPoolWithFunc(10, func(i any) { myFunc(i) wg.Done() }) @@ -141,7 +141,7 @@ func main() { fmt.Printf("finish all tasks.\n") // Use the MultiPoolFunc and set the capacity of 10 goroutine pools to (runTimes/10). - mpf, _ := ants.NewMultiPoolWithFunc(10, runTimes/10, func(i interface{}) { + mpf, _ := ants.NewMultiPoolWithFunc(10, runTimes/10, func(i any) { myFunc(i) wg.Done() }, ants.LeastTasks) @@ -186,7 +186,7 @@ type Options struct { // PanicHandler is used to handle panics from each worker goroutine. // if nil, panics will be thrown out again from worker goroutines. - PanicHandler func(interface{}) + PanicHandler func(any) // Logger is the customized logger for logging info, if it is not set, // default standard logger from log package is used. @@ -229,7 +229,7 @@ func WithNonblocking(nonblocking bool) Option { } // WithPanicHandler sets up panic handler. -func WithPanicHandler(panicHandler func(interface{})) Option { +func WithPanicHandler(panicHandler func(any)) Option { return func(opts *Options) { opts.PanicHandler = panicHandler } diff --git a/ants.go b/ants.go index fa68482c..d67ab5c7 100644 --- a/ants.go +++ b/ants.go @@ -30,12 +30,17 @@ package ants import ( + "context" "errors" "log" "math" "os" "runtime" + "sync" + "sync/atomic" "time" + + syncx "github.com/panjf2000/ants/v2/pkg/sync" ) const ( @@ -101,14 +106,6 @@ var ( defaultAntsPool, _ = NewPool(DefaultAntsPoolSize) ) -const nowTimeUpdateInterval = 500 * time.Millisecond - -// Logger is used for logging formatted messages. -type Logger interface { - // Printf must have the same semantics as log.Printf. - Printf(format string, args ...interface{}) -} - // Submit submits a task to pool. func Submit(task func()) error { return defaultAntsPool.Submit(task) @@ -143,3 +140,382 @@ func ReleaseTimeout(timeout time.Duration) error { func Reboot() { defaultAntsPool.Reboot() } + +// Logger is used for logging formatted messages. +type Logger interface { + // Printf must have the same semantics as log.Printf. + Printf(format string, args ...any) +} + +// poolCommon contains all common fields for other sophisticated pools. +type poolCommon struct { + // capacity of the pool, a negative value means that the capacity of pool is limitless, an infinite pool is used to + // avoid potential issue of endless blocking caused by nested usage of a pool: submitting a task to pool + // which submits a new task to the same pool. + capacity int32 + + // running is the number of the currently running goroutines. + running int32 + + // lock for protecting the worker queue. + lock sync.Locker + + // workers is a slice that store the available workers. + workers workerQueue + + // state is used to notice the pool to closed itself. + state int32 + + // cond for waiting to get an idle worker. + cond *sync.Cond + + // done is used to indicate that all workers are done. + allDone chan struct{} + // once is used to make sure the pool is closed just once. + once *sync.Once + + // workerCache speeds up the obtainment of a usable worker in function:retrieveWorker. + workerCache sync.Pool + + // waiting is the number of goroutines already been blocked on pool.Submit(), protected by pool.lock + waiting int32 + + purgeDone int32 + purgeCtx context.Context + stopPurge context.CancelFunc + + ticktockDone int32 + ticktockCtx context.Context + stopTicktock context.CancelFunc + + now atomic.Value + + options *Options +} + +func newPool(size int, options ...Option) (*poolCommon, error) { + if size <= 0 { + size = -1 + } + + opts := loadOptions(options...) + + if !opts.DisablePurge { + if expiry := opts.ExpiryDuration; expiry < 0 { + return nil, ErrInvalidPoolExpiry + } else if expiry == 0 { + opts.ExpiryDuration = DefaultCleanIntervalTime + } + } + + if opts.Logger == nil { + opts.Logger = defaultLogger + } + + p := &poolCommon{ + capacity: int32(size), + allDone: make(chan struct{}), + lock: syncx.NewSpinLock(), + once: &sync.Once{}, + options: opts, + } + if p.options.PreAlloc { + if size == -1 { + return nil, ErrInvalidPreAllocSize + } + p.workers = newWorkerQueue(queueTypeLoopQueue, size) + } else { + p.workers = newWorkerQueue(queueTypeStack, 0) + } + + p.cond = sync.NewCond(p.lock) + + p.goPurge() + p.goTicktock() + + return p, nil +} + +// purgeStaleWorkers clears stale workers periodically, it runs in an individual goroutine, as a scavenger. +func (p *poolCommon) purgeStaleWorkers() { + ticker := time.NewTicker(p.options.ExpiryDuration) + + defer func() { + ticker.Stop() + atomic.StoreInt32(&p.purgeDone, 1) + }() + + purgeCtx := p.purgeCtx // copy to the local variable to avoid race from Reboot() + for { + select { + case <-purgeCtx.Done(): + return + case <-ticker.C: + } + + if p.IsClosed() { + break + } + + var isDormant bool + p.lock.Lock() + staleWorkers := p.workers.refresh(p.options.ExpiryDuration) + n := p.Running() + isDormant = n == 0 || n == len(staleWorkers) + p.lock.Unlock() + + // Clean up the stale workers. + for i := range staleWorkers { + staleWorkers[i].finish() + staleWorkers[i] = nil + } + + // There might be a situation where all workers have been cleaned up (no worker is running), + // while some invokers still are stuck in p.cond.Wait(), then we need to awake those invokers. + if isDormant && p.Waiting() > 0 { + p.cond.Broadcast() + } + } +} + +const nowTimeUpdateInterval = 500 * time.Millisecond + +// ticktock is a goroutine that updates the current time in the pool regularly. +func (p *poolCommon) ticktock() { + ticker := time.NewTicker(nowTimeUpdateInterval) + defer func() { + ticker.Stop() + atomic.StoreInt32(&p.ticktockDone, 1) + }() + + ticktockCtx := p.ticktockCtx // copy to the local variable to avoid race from Reboot() + for { + select { + case <-ticktockCtx.Done(): + return + case <-ticker.C: + } + + if p.IsClosed() { + break + } + + p.now.Store(time.Now()) + } +} + +func (p *poolCommon) goPurge() { + if p.options.DisablePurge { + return + } + + // Start a goroutine to clean up expired workers periodically. + p.purgeCtx, p.stopPurge = context.WithCancel(context.Background()) + go p.purgeStaleWorkers() +} + +func (p *poolCommon) goTicktock() { + p.now.Store(time.Now()) + p.ticktockCtx, p.stopTicktock = context.WithCancel(context.Background()) + go p.ticktock() +} + +func (p *poolCommon) nowTime() time.Time { + return p.now.Load().(time.Time) +} + +// Running returns the number of workers currently running. +func (p *poolCommon) Running() int { + return int(atomic.LoadInt32(&p.running)) +} + +// Free returns the number of available workers, -1 indicates this pool is unlimited. +func (p *poolCommon) Free() int { + c := p.Cap() + if c < 0 { + return -1 + } + return c - p.Running() +} + +// Waiting returns the number of tasks waiting to be executed. +func (p *poolCommon) Waiting() int { + return int(atomic.LoadInt32(&p.waiting)) +} + +// Cap returns the capacity of this pool. +func (p *poolCommon) Cap() int { + return int(atomic.LoadInt32(&p.capacity)) +} + +// Tune changes the capacity of this pool, note that it is noneffective to the infinite or pre-allocation pool. +func (p *poolCommon) Tune(size int) { + capacity := p.Cap() + if capacity == -1 || size <= 0 || size == capacity || p.options.PreAlloc { + return + } + atomic.StoreInt32(&p.capacity, int32(size)) + if size > capacity { + if size-capacity == 1 { + p.cond.Signal() + return + } + p.cond.Broadcast() + } +} + +// IsClosed indicates whether the pool is closed. +func (p *poolCommon) IsClosed() bool { + return atomic.LoadInt32(&p.state) == CLOSED +} + +// Release closes this pool and releases the worker queue. +func (p *poolCommon) Release() { + if !atomic.CompareAndSwapInt32(&p.state, OPENED, CLOSED) { + return + } + + if p.stopPurge != nil { + p.stopPurge() + p.stopPurge = nil + } + if p.stopTicktock != nil { + p.stopTicktock() + p.stopTicktock = nil + } + + p.lock.Lock() + p.workers.reset() + p.lock.Unlock() + // There might be some callers waiting in retrieveWorker(), so we need to wake them up to prevent + // those callers blocking infinitely. + p.cond.Broadcast() +} + +// ReleaseTimeout is like Release but with a timeout, it waits all workers to exit before timing out. +func (p *poolCommon) ReleaseTimeout(timeout time.Duration) error { + if p.IsClosed() || (!p.options.DisablePurge && p.stopPurge == nil) || p.stopTicktock == nil { + return ErrPoolClosed + } + + p.Release() + + var purgeCh <-chan struct{} + if !p.options.DisablePurge { + purgeCh = p.purgeCtx.Done() + } else { + purgeCh = p.allDone + } + + if p.Running() == 0 { + p.once.Do(func() { + close(p.allDone) + }) + } + + timer := time.NewTimer(timeout) + defer timer.Stop() + for { + select { + case <-timer.C: + return ErrTimeout + case <-p.allDone: + <-purgeCh + <-p.ticktockCtx.Done() + if p.Running() == 0 && + (p.options.DisablePurge || atomic.LoadInt32(&p.purgeDone) == 1) && + atomic.LoadInt32(&p.ticktockDone) == 1 { + return nil + } + } + } +} + +// Reboot reboots a closed pool, it does nothing if the pool is not closed. +// If you intend to reboot a closed pool, use ReleaseTimeout() instead of +// Release() to ensure that all workers are stopped and resource are released +// before rebooting, otherwise you may run into data race. +func (p *poolCommon) Reboot() { + if atomic.CompareAndSwapInt32(&p.state, CLOSED, OPENED) { + atomic.StoreInt32(&p.purgeDone, 0) + p.goPurge() + atomic.StoreInt32(&p.ticktockDone, 0) + p.goTicktock() + p.allDone = make(chan struct{}) + p.once = &sync.Once{} + } +} + +func (p *poolCommon) addRunning(delta int) int { + return int(atomic.AddInt32(&p.running, int32(delta))) +} + +func (p *poolCommon) addWaiting(delta int) { + atomic.AddInt32(&p.waiting, int32(delta)) +} + +// retrieveWorker returns an available worker to run the tasks. +func (p *poolCommon) retrieveWorker() (w worker, err error) { + p.lock.Lock() + +retry: + // First try to fetch the worker from the queue. + if w = p.workers.detach(); w != nil { + p.lock.Unlock() + return + } + + // If the worker queue is empty, and we don't run out of the pool capacity, + // then just spawn a new worker goroutine. + if capacity := p.Cap(); capacity == -1 || capacity > p.Running() { + p.lock.Unlock() + w = p.workerCache.Get().(worker) + w.run() + return + } + + // Bail out early if it's in nonblocking mode or the number of pending callers reaches the maximum limit value. + if p.options.Nonblocking || (p.options.MaxBlockingTasks != 0 && p.Waiting() >= p.options.MaxBlockingTasks) { + p.lock.Unlock() + return nil, ErrPoolOverload + } + + // Otherwise, we'll have to keep them blocked and wait for at least one worker to be put back into pool. + p.addWaiting(1) + p.cond.Wait() // block and wait for an available worker + p.addWaiting(-1) + + if p.IsClosed() { + p.lock.Unlock() + return nil, ErrPoolClosed + } + + goto retry +} + +// revertWorker puts a worker back into free pool, recycling the goroutines. +func (p *poolCommon) revertWorker(worker worker) bool { + if capacity := p.Cap(); (capacity > 0 && p.Running() > capacity) || p.IsClosed() { + p.cond.Broadcast() + return false + } + + worker.setLastUsedTime(p.nowTime()) + + p.lock.Lock() + // To avoid memory leaks, add a double check in the lock scope. + // Issue: https://github.com/panjf2000/ants/issues/113 + if p.IsClosed() { + p.lock.Unlock() + return false + } + if err := p.workers.insert(worker); err != nil { + p.lock.Unlock() + return false + } + // Notify the invoker stuck in 'retrieveWorker()' of there is an available worker in the worker queue. + p.cond.Signal() + p.lock.Unlock() + + return true +} diff --git a/ants_benchmark_test.go b/ants_benchmark_test.go index f39b0e67..1dcc8dde 100644 --- a/ants_benchmark_test.go +++ b/ants_benchmark_test.go @@ -43,7 +43,7 @@ func demoFunc() { time.Sleep(time.Duration(BenchParam) * time.Millisecond) } -func demoPoolFunc(args interface{}) { +func demoPoolFunc(args any) { n := args.(int) time.Sleep(time.Duration(n) * time.Millisecond) } @@ -58,7 +58,7 @@ func longRunningFunc() { var stopLongRunningPoolFunc int32 -func longRunningPoolFunc(arg interface{}) { +func longRunningPoolFunc(arg any) { if ch, ok := arg.(chan struct{}); ok { <-ch return diff --git a/ants_test.go b/ants_test.go index 52972437..7909ea2a 100644 --- a/ants_test.go +++ b/ants_test.go @@ -93,7 +93,7 @@ func TestAntsPoolWaitToGetWorkerPreMalloc(t *testing.T) { // TestAntsPoolWithFuncWaitToGetWorker is used to test waiting to get worker. func TestAntsPoolWithFuncWaitToGetWorker(t *testing.T) { var wg sync.WaitGroup - p, _ := NewPoolWithFunc(AntsSize, func(i interface{}) { + p, _ := NewPoolWithFunc(AntsSize, func(i any) { demoPoolFunc(i) wg.Done() }) @@ -113,7 +113,7 @@ func TestAntsPoolWithFuncWaitToGetWorker(t *testing.T) { func TestAntsPoolWithFuncWaitToGetWorkerPreMalloc(t *testing.T) { var wg sync.WaitGroup - p, _ := NewPoolWithFunc(AntsSize, func(i interface{}) { + p, _ := NewPoolWithFunc(AntsSize, func(i any) { demoPoolFunc(i) wg.Done() }, WithPreAlloc(true)) @@ -227,7 +227,7 @@ func TestAntsPool(t *testing.T) { func TestPanicHandler(t *testing.T) { var panicCounter int64 var wg sync.WaitGroup - p0, err := NewPool(10, WithPanicHandler(func(p interface{}) { + p0, err := NewPool(10, WithPanicHandler(func(p any) { defer wg.Done() atomic.AddInt64(&panicCounter, 1) t.Logf("catch panic with PanicHandler: %v", p) @@ -242,7 +242,7 @@ func TestPanicHandler(t *testing.T) { c := atomic.LoadInt64(&panicCounter) assert.EqualValuesf(t, 1, c, "panic handler didn't work, panicCounter: %d", c) assert.EqualValues(t, 0, p0.Running(), "pool should be empty after panic") - p1, err := NewPoolWithFunc(10, func(p interface{}) { panic(p) }, WithPanicHandler(func(_ interface{}) { + p1, err := NewPoolWithFunc(10, func(p any) { panic(p) }, WithPanicHandler(func(_ any) { defer wg.Done() atomic.AddInt64(&panicCounter, 1) })) @@ -259,7 +259,7 @@ func TestPanicHandler(t *testing.T) { func TestPanicHandlerPreMalloc(t *testing.T) { var panicCounter int64 var wg sync.WaitGroup - p0, err := NewPool(10, WithPreAlloc(true), WithPanicHandler(func(p interface{}) { + p0, err := NewPool(10, WithPreAlloc(true), WithPanicHandler(func(p any) { defer wg.Done() atomic.AddInt64(&panicCounter, 1) t.Logf("catch panic with PanicHandler: %v", p) @@ -274,7 +274,7 @@ func TestPanicHandlerPreMalloc(t *testing.T) { c := atomic.LoadInt64(&panicCounter) assert.EqualValuesf(t, 1, c, "panic handler didn't work, panicCounter: %d", c) assert.EqualValues(t, 0, p0.Running(), "pool should be empty after panic") - p1, err := NewPoolWithFunc(10, func(p interface{}) { panic(p) }, WithPanicHandler(func(_ interface{}) { + p1, err := NewPoolWithFunc(10, func(p any) { panic(p) }, WithPanicHandler(func(_ any) { defer wg.Done() atomic.AddInt64(&panicCounter, 1) })) @@ -296,7 +296,7 @@ func TestPoolPanicWithoutHandler(t *testing.T) { panic("Oops!") }) - p1, err := NewPoolWithFunc(10, func(p interface{}) { + p1, err := NewPoolWithFunc(10, func(p any) { panic(p) }) assert.NoErrorf(t, err, "create new pool with func failed: %v", err) @@ -312,7 +312,7 @@ func TestPoolPanicWithoutHandlerPreMalloc(t *testing.T) { panic("Oops!") }) - p1, err := NewPoolWithFunc(10, func(p interface{}) { + p1, err := NewPoolWithFunc(10, func(p any) { panic(p) }) @@ -345,7 +345,7 @@ func TestPurgePool(t *testing.T) { assert.Equalf(t, 0, p.Running(), "pool should be empty after purge, but got %d", p.Running()) ch = make(chan struct{}) - f := func(i interface{}) { + f := func(i any) { <-ch d := i.(int) % 100 time.Sleep(time.Duration(d) * time.Millisecond) @@ -445,7 +445,7 @@ func TestMaxBlockingSubmit(t *testing.T) { func TestNonblockingSubmitWithFunc(t *testing.T) { poolSize := 10 var wg sync.WaitGroup - p, err := NewPoolWithFunc(poolSize, func(i interface{}) { + p, err := NewPoolWithFunc(poolSize, func(i any) { longRunningPoolFunc(i) wg.Done() }, WithNonblocking(true)) @@ -537,7 +537,7 @@ func TestRebootNewPool(t *testing.T) { assert.NoError(t, p.Submit(func() { wg.Done() }), "pool should be rebooted") wg.Wait() - p1, err := NewPoolWithFunc(10, func(i interface{}) { + p1, err := NewPoolWithFunc(10, func(i any) { demoPoolFunc(i) wg.Done() }) @@ -667,7 +667,7 @@ func TestWithDisablePurgePoolFunc(t *testing.T) { var wg1, wg2 sync.WaitGroup wg1.Add(numWorker) wg2.Add(numWorker) - p, _ := NewPoolWithFunc(numWorker, func(_ interface{}) { + p, _ := NewPoolWithFunc(numWorker, func(_ any) { wg1.Done() <-sig wg2.Done() @@ -682,7 +682,7 @@ func TestWithDisablePurgeAndWithExpirationPoolFunc(t *testing.T) { wg1.Add(numWorker) wg2.Add(numWorker) expiredDuration := time.Millisecond * 100 - p, _ := NewPoolWithFunc(numWorker, func(_ interface{}) { + p, _ := NewPoolWithFunc(numWorker, func(_ any) { wg1.Done() <-sig wg2.Done() @@ -692,7 +692,7 @@ func TestWithDisablePurgeAndWithExpirationPoolFunc(t *testing.T) { func TestInfinitePoolWithFunc(t *testing.T) { c := make(chan struct{}) - p, _ := NewPoolWithFunc(-1, func(i interface{}) { + p, _ := NewPoolWithFunc(-1, func(i any) { demoPoolFunc(i) <-c }) @@ -759,7 +759,7 @@ func TestReleaseWhenRunningPool(t *testing.T) { func TestReleaseWhenRunningPoolWithFunc(t *testing.T) { var wg sync.WaitGroup - p, _ := NewPoolWithFunc(1, func(i interface{}) { + p, _ := NewPoolWithFunc(1, func(i any) { t.Log("do task", i) time.Sleep(1 * time.Second) }) @@ -914,7 +914,7 @@ func TestPoolTuneScaleUp(t *testing.T) { p.Release() // test PoolWithFunc - pf, _ := NewPoolWithFunc(2, func(_ interface{}) { + pf, _ := NewPoolWithFunc(2, func(_ any) { <-c }) for i := 0; i < 2; i++ { @@ -962,7 +962,7 @@ func TestReleaseTimeout(t *testing.T) { assert.NoError(t, err) var pf *PoolWithFunc - pf, _ = NewPoolWithFunc(10, func(i interface{}) { + pf, _ = NewPoolWithFunc(10, func(i any) { dur := i.(time.Duration) time.Sleep(dur) }) diff --git a/examples/main.go b/examples/main.go index b9670986..bc00ef83 100644 --- a/examples/main.go +++ b/examples/main.go @@ -33,7 +33,7 @@ import ( var sum int32 -func myFunc(i interface{}) { +func myFunc(i any) { n := i.(int32) atomic.AddInt32(&sum, n) fmt.Printf("run with %d\n", n) @@ -65,7 +65,7 @@ func main() { // Use the pool with a function, // set 10 to the capacity of goroutine pool and 1 second for expired duration. - p, _ := ants.NewPoolWithFunc(10, func(i interface{}) { + p, _ := ants.NewPoolWithFunc(10, func(i any) { myFunc(i) wg.Done() }) @@ -96,7 +96,7 @@ func main() { fmt.Printf("finish all tasks.\n") // Use the MultiPoolFunc and set the capacity of 10 goroutine pools to (runTimes/10). - mpf, _ := ants.NewMultiPoolWithFunc(10, runTimes/10, func(i interface{}) { + mpf, _ := ants.NewMultiPoolWithFunc(10, runTimes/10, func(i any) { myFunc(i) wg.Done() }, ants.LeastTasks) diff --git a/go.mod b/go.mod index 2e75d8f6..9ce61a62 100644 --- a/go.mod +++ b/go.mod @@ -1,8 +1,14 @@ module github.com/panjf2000/ants/v2 -go 1.16 +go 1.18 require ( github.com/stretchr/testify v1.8.2 golang.org/x/sync v0.3.0 ) + +require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) diff --git a/multipool_func.go b/multipool_func.go index 868c0dea..ed7e1dc2 100644 --- a/multipool_func.go +++ b/multipool_func.go @@ -46,7 +46,7 @@ type MultiPoolWithFunc struct { // NewMultiPoolWithFunc instantiates a MultiPoolWithFunc with a size of the pool list and a size // per pool, and the load-balancing strategy. -func NewMultiPoolWithFunc(size, sizePerPool int, fn func(interface{}), lbs LoadBalancingStrategy, options ...Option) (*MultiPoolWithFunc, error) { +func NewMultiPoolWithFunc(size, sizePerPool int, fn func(any), lbs LoadBalancingStrategy, options ...Option) (*MultiPoolWithFunc, error) { if lbs != RoundRobin && lbs != LeastTasks { return nil, ErrInvalidLoadBalancingStrategy } @@ -82,7 +82,7 @@ func (mp *MultiPoolWithFunc) next(lbs LoadBalancingStrategy) (idx int) { } // Invoke submits a task to a pool selected by the load-balancing strategy. -func (mp *MultiPoolWithFunc) Invoke(args interface{}) (err error) { +func (mp *MultiPoolWithFunc) Invoke(args any) (err error) { if mp.IsClosed() { return ErrPoolClosed } diff --git a/options.go b/options.go index 90d1ad51..b859bef3 100644 --- a/options.go +++ b/options.go @@ -34,7 +34,7 @@ type Options struct { // PanicHandler is used to handle panics from each worker goroutine. // if nil, panics will be thrown out again from worker goroutines. - PanicHandler func(interface{}) + PanicHandler func(any) // Logger is the customized logger for logging info, if it is not set, // default standard logger from log package is used. @@ -80,7 +80,7 @@ func WithNonblocking(nonblocking bool) Option { } // WithPanicHandler sets up panic handler. -func WithPanicHandler(panicHandler func(interface{})) Option { +func WithPanicHandler(panicHandler func(any)) Option { return func(opts *Options) { opts.PanicHandler = panicHandler } diff --git a/pool.go b/pool.go index 8361928b..b1dfa991 100644 --- a/pool.go +++ b/pool.go @@ -22,203 +22,13 @@ package ants -import ( - "context" - "sync" - "sync/atomic" - "time" - - syncx "github.com/panjf2000/ants/v2/pkg/sync" -) - -type poolCommon struct { - // capacity of the pool, a negative value means that the capacity of pool is limitless, an infinite pool is used to - // avoid potential issue of endless blocking caused by nested usage of a pool: submitting a task to pool - // which submits a new task to the same pool. - capacity int32 - - // running is the number of the currently running goroutines. - running int32 - - // lock for protecting the worker queue. - lock sync.Locker - - // workers is a slice that store the available workers. - workers workerQueue - - // state is used to notice the pool to closed itself. - state int32 - - // cond for waiting to get an idle worker. - cond *sync.Cond - - // done is used to indicate that all workers are done. - allDone chan struct{} - // once is used to make sure the pool is closed just once. - once *sync.Once - - // workerCache speeds up the obtainment of a usable worker in function:retrieveWorker. - workerCache sync.Pool - - // waiting is the number of goroutines already been blocked on pool.Submit(), protected by pool.lock - waiting int32 - - purgeDone int32 - purgeCtx context.Context - stopPurge context.CancelFunc - - ticktockDone int32 - ticktockCtx context.Context - stopTicktock context.CancelFunc - - now atomic.Value - - options *Options -} - -// Pool accepts the tasks and process them concurrently, -// it limits the total of goroutines to a given number by recycling goroutines. +// Pool is a goroutine pool that limits and recycles a mass of goroutines. +// The pool capacity can be fixed or unlimited. type Pool struct { - poolCommon -} - -// purgeStaleWorkers clears stale workers periodically, it runs in an individual goroutine, as a scavenger. -func (p *Pool) purgeStaleWorkers() { - ticker := time.NewTicker(p.options.ExpiryDuration) - - defer func() { - ticker.Stop() - atomic.StoreInt32(&p.purgeDone, 1) - }() - - purgeCtx := p.purgeCtx // copy to the local variable to avoid race from Reboot() - for { - select { - case <-purgeCtx.Done(): - return - case <-ticker.C: - } - - if p.IsClosed() { - break - } - - var isDormant bool - p.lock.Lock() - staleWorkers := p.workers.refresh(p.options.ExpiryDuration) - n := p.Running() - isDormant = n == 0 || n == len(staleWorkers) - p.lock.Unlock() - - // Clean up the stale workers. - for i := range staleWorkers { - staleWorkers[i].finish() - staleWorkers[i] = nil - } - - // There might be a situation where all workers have been cleaned up (no worker is running), - // while some invokers still are stuck in p.cond.Wait(), then we need to awake those invokers. - if isDormant && p.Waiting() > 0 { - p.cond.Broadcast() - } - } -} - -// ticktock is a goroutine that updates the current time in the pool regularly. -func (p *Pool) ticktock() { - ticker := time.NewTicker(nowTimeUpdateInterval) - defer func() { - ticker.Stop() - atomic.StoreInt32(&p.ticktockDone, 1) - }() - - ticktockCtx := p.ticktockCtx // copy to the local variable to avoid race from Reboot() - for { - select { - case <-ticktockCtx.Done(): - return - case <-ticker.C: - } - - if p.IsClosed() { - break - } - - p.now.Store(time.Now()) - } -} - -func (p *Pool) goPurge() { - if p.options.DisablePurge { - return - } - - // Start a goroutine to clean up expired workers periodically. - p.purgeCtx, p.stopPurge = context.WithCancel(context.Background()) - go p.purgeStaleWorkers() -} - -func (p *Pool) goTicktock() { - p.now.Store(time.Now()) - p.ticktockCtx, p.stopTicktock = context.WithCancel(context.Background()) - go p.ticktock() -} - -func (p *Pool) nowTime() time.Time { - return p.now.Load().(time.Time) + *poolCommon } -// NewPool instantiates a Pool with customized options. -func NewPool(size int, options ...Option) (*Pool, error) { - if size <= 0 { - size = -1 - } - - opts := loadOptions(options...) - - if !opts.DisablePurge { - if expiry := opts.ExpiryDuration; expiry < 0 { - return nil, ErrInvalidPoolExpiry - } else if expiry == 0 { - opts.ExpiryDuration = DefaultCleanIntervalTime - } - } - - if opts.Logger == nil { - opts.Logger = defaultLogger - } - - p := &Pool{poolCommon: poolCommon{ - capacity: int32(size), - allDone: make(chan struct{}), - lock: syncx.NewSpinLock(), - once: &sync.Once{}, - options: opts, - }} - p.workerCache.New = func() interface{} { - return &goWorker{ - pool: p, - task: make(chan func(), workerChanCap), - } - } - if p.options.PreAlloc { - if size == -1 { - return nil, ErrInvalidPreAllocSize - } - p.workers = newWorkerQueue(queueTypeLoopQueue, size) - } else { - p.workers = newWorkerQueue(queueTypeStack, 0) - } - - p.cond = sync.NewCond(p.lock) - - p.goPurge() - p.goTicktock() - - return p, nil -} - -// Submit submits a task to this pool. +// Submit submits a task to the pool. // // Note that you are allowed to call Pool.Submit() from the current Pool.Submit(), // but what calls for special attention is that you will get blocked with the last @@ -236,198 +46,20 @@ func (p *Pool) Submit(task func()) error { return err } -// Running returns the number of workers currently running. -func (p *Pool) Running() int { - return int(atomic.LoadInt32(&p.running)) -} - -// Free returns the number of available workers, -1 indicates this pool is unlimited. -func (p *Pool) Free() int { - c := p.Cap() - if c < 0 { - return -1 - } - return c - p.Running() -} - -// Waiting returns the number of tasks waiting to be executed. -func (p *Pool) Waiting() int { - return int(atomic.LoadInt32(&p.waiting)) -} - -// Cap returns the capacity of this pool. -func (p *Pool) Cap() int { - return int(atomic.LoadInt32(&p.capacity)) -} - -// Tune changes the capacity of this pool, note that it is noneffective to the infinite or pre-allocation pool. -func (p *Pool) Tune(size int) { - capacity := p.Cap() - if capacity == -1 || size <= 0 || size == capacity || p.options.PreAlloc { - return - } - atomic.StoreInt32(&p.capacity, int32(size)) - if size > capacity { - if size-capacity == 1 { - p.cond.Signal() - return - } - p.cond.Broadcast() - } -} - -// IsClosed indicates whether the pool is closed. -func (p *Pool) IsClosed() bool { - return atomic.LoadInt32(&p.state) == CLOSED -} - -// Release closes this pool and releases the worker queue. -func (p *Pool) Release() { - if !atomic.CompareAndSwapInt32(&p.state, OPENED, CLOSED) { - return - } - - if p.stopPurge != nil { - p.stopPurge() - p.stopPurge = nil - } - if p.stopTicktock != nil { - p.stopTicktock() - p.stopTicktock = nil - } - - p.lock.Lock() - p.workers.reset() - p.lock.Unlock() - // There might be some callers waiting in retrieveWorker(), so we need to wake them up to prevent - // those callers blocking infinitely. - p.cond.Broadcast() -} - -// ReleaseTimeout is like Release but with a timeout, it waits all workers to exit before timing out. -func (p *Pool) ReleaseTimeout(timeout time.Duration) error { - if p.IsClosed() || (!p.options.DisablePurge && p.stopPurge == nil) || p.stopTicktock == nil { - return ErrPoolClosed - } - - p.Release() - - var purgeCh <-chan struct{} - if !p.options.DisablePurge { - purgeCh = p.purgeCtx.Done() - } else { - purgeCh = p.allDone - } - - if p.Running() == 0 { - p.once.Do(func() { - close(p.allDone) - }) +// NewPool instantiates a Pool with customized options. +func NewPool(size int, options ...Option) (*Pool, error) { + pc, err := newPool(size, options...) + if err != nil { + return nil, err } - timer := time.NewTimer(timeout) - defer timer.Stop() - for { - select { - case <-timer.C: - return ErrTimeout - case <-p.allDone: - <-purgeCh - <-p.ticktockCtx.Done() - if p.Running() == 0 && - (p.options.DisablePurge || atomic.LoadInt32(&p.purgeDone) == 1) && - atomic.LoadInt32(&p.ticktockDone) == 1 { - return nil - } + pool := &Pool{poolCommon: pc} + pool.workerCache.New = func() any { + return &goWorker{ + pool: pool, + task: make(chan func(), workerChanCap), } } -} - -// Reboot reboots a closed pool, it does nothing if the pool is not closed. -// If you intend to reboot a closed pool, use ReleaseTimeout() instead of -// Release() to ensure that all workers are stopped and resource are released -// before rebooting, otherwise you may run into data race. -func (p *Pool) Reboot() { - if atomic.CompareAndSwapInt32(&p.state, CLOSED, OPENED) { - atomic.StoreInt32(&p.purgeDone, 0) - p.goPurge() - atomic.StoreInt32(&p.ticktockDone, 0) - p.goTicktock() - p.allDone = make(chan struct{}) - p.once = &sync.Once{} - } -} - -func (p *Pool) addRunning(delta int) int { - return int(atomic.AddInt32(&p.running, int32(delta))) -} - -func (p *Pool) addWaiting(delta int) { - atomic.AddInt32(&p.waiting, int32(delta)) -} - -// retrieveWorker returns an available worker to run the tasks. -func (p *Pool) retrieveWorker() (w worker, err error) { - p.lock.Lock() - -retry: - // First try to fetch the worker from the queue. - if w = p.workers.detach(); w != nil { - p.lock.Unlock() - return - } - - // If the worker queue is empty, and we don't run out of the pool capacity, - // then just spawn a new worker goroutine. - if capacity := p.Cap(); capacity == -1 || capacity > p.Running() { - p.lock.Unlock() - w = p.workerCache.Get().(*goWorker) - w.run() - return - } - - // Bail out early if it's in nonblocking mode or the number of pending callers reaches the maximum limit value. - if p.options.Nonblocking || (p.options.MaxBlockingTasks != 0 && p.Waiting() >= p.options.MaxBlockingTasks) { - p.lock.Unlock() - return nil, ErrPoolOverload - } - - // Otherwise, we'll have to keep them blocked and wait for at least one worker to be put back into pool. - p.addWaiting(1) - p.cond.Wait() // block and wait for an available worker - p.addWaiting(-1) - - if p.IsClosed() { - p.lock.Unlock() - return nil, ErrPoolClosed - } - - goto retry -} - -// revertWorker puts a worker back into free pool, recycling the goroutines. -func (p *Pool) revertWorker(worker *goWorker) bool { - if capacity := p.Cap(); (capacity > 0 && p.Running() > capacity) || p.IsClosed() { - p.cond.Broadcast() - return false - } - - worker.lastUsed = p.nowTime() - - p.lock.Lock() - // To avoid memory leaks, add a double check in the lock scope. - // Issue: https://github.com/panjf2000/ants/issues/113 - if p.IsClosed() { - p.lock.Unlock() - return false - } - if err := p.workers.insert(worker); err != nil { - p.lock.Unlock() - return false - } - // Notify the invoker stuck in 'retrieveWorker()' of there is an available worker in the worker queue. - p.cond.Signal() - p.lock.Unlock() - return true + return pool, nil } diff --git a/pool_func.go b/pool_func.go index f3d341d4..70f5fae2 100644 --- a/pool_func.go +++ b/pool_func.go @@ -22,173 +22,21 @@ package ants -import ( - "context" - "sync" - "sync/atomic" - "time" - - syncx "github.com/panjf2000/ants/v2/pkg/sync" -) - -// PoolWithFunc accepts the tasks and process them concurrently, -// it limits the total of goroutines to a given number by recycling goroutines. +// PoolWithFunc is like Pool but accepts a unified function for all goroutines to execute. type PoolWithFunc struct { - poolCommon - - // poolFunc is the function for processing tasks. - poolFunc func(interface{}) -} - -// purgeStaleWorkers clears stale workers periodically, it runs in an individual goroutine, as a scavenger. -func (p *PoolWithFunc) purgeStaleWorkers() { - ticker := time.NewTicker(p.options.ExpiryDuration) - defer func() { - ticker.Stop() - atomic.StoreInt32(&p.purgeDone, 1) - }() - - purgeCtx := p.purgeCtx // copy to the local variable to avoid race from Reboot() - for { - select { - case <-purgeCtx.Done(): - return - case <-ticker.C: - } - - if p.IsClosed() { - break - } - - var isDormant bool - p.lock.Lock() - staleWorkers := p.workers.refresh(p.options.ExpiryDuration) - n := p.Running() - isDormant = n == 0 || n == len(staleWorkers) - p.lock.Unlock() - - // Clean up the stale workers. - for i := range staleWorkers { - staleWorkers[i].finish() - staleWorkers[i] = nil - } - - // There might be a situation where all workers have been cleaned up (no worker is running), - // while some invokers still are stuck in p.cond.Wait(), then we need to awake those invokers. - if isDormant && p.Waiting() > 0 { - p.cond.Broadcast() - } - } -} - -// ticktock is a goroutine that updates the current time in the pool regularly. -func (p *PoolWithFunc) ticktock() { - ticker := time.NewTicker(nowTimeUpdateInterval) - defer func() { - ticker.Stop() - atomic.StoreInt32(&p.ticktockDone, 1) - }() - - ticktockCtx := p.ticktockCtx // copy to the local variable to avoid race from Reboot() - for { - select { - case <-ticktockCtx.Done(): - return - case <-ticker.C: - } - - if p.IsClosed() { - break - } - - p.now.Store(time.Now()) - } -} - -func (p *PoolWithFunc) goPurge() { - if p.options.DisablePurge { - return - } - - // Start a goroutine to clean up expired workers periodically. - p.purgeCtx, p.stopPurge = context.WithCancel(context.Background()) - go p.purgeStaleWorkers() -} + *poolCommon -func (p *PoolWithFunc) goTicktock() { - p.now.Store(time.Now()) - p.ticktockCtx, p.stopTicktock = context.WithCancel(context.Background()) - go p.ticktock() + // poolFunc is the unified function for processing tasks. + poolFunc func(any) } -func (p *PoolWithFunc) nowTime() time.Time { - return p.now.Load().(time.Time) -} - -// NewPoolWithFunc instantiates a PoolWithFunc with customized options. -func NewPoolWithFunc(size int, pf func(interface{}), options ...Option) (*PoolWithFunc, error) { - if size <= 0 { - size = -1 - } - - if pf == nil { - return nil, ErrLackPoolFunc - } - - opts := loadOptions(options...) - - if !opts.DisablePurge { - if expiry := opts.ExpiryDuration; expiry < 0 { - return nil, ErrInvalidPoolExpiry - } else if expiry == 0 { - opts.ExpiryDuration = DefaultCleanIntervalTime - } - } - - if opts.Logger == nil { - opts.Logger = defaultLogger - } - - p := &PoolWithFunc{ - poolCommon: poolCommon{ - capacity: int32(size), - allDone: make(chan struct{}), - lock: syncx.NewSpinLock(), - once: &sync.Once{}, - options: opts, - }, - poolFunc: pf, - } - p.workerCache.New = func() interface{} { - return &goWorkerWithFunc{ - pool: p, - args: make(chan interface{}, workerChanCap), - } - } - if p.options.PreAlloc { - if size == -1 { - return nil, ErrInvalidPreAllocSize - } - p.workers = newWorkerQueue(queueTypeLoopQueue, size) - } else { - p.workers = newWorkerQueue(queueTypeStack, 0) - } - - p.cond = sync.NewCond(p.lock) - - p.goPurge() - p.goTicktock() - - return p, nil -} - -// Invoke submits a task to pool. +// Invoke passes arguments to the pool. // // Note that you are allowed to call Pool.Invoke() from the current Pool.Invoke(), // but what calls for special attention is that you will get blocked with the last // Pool.Invoke() call once the current Pool runs out of its capacity, and to avoid this, // you should instantiate a PoolWithFunc with ants.WithNonblocking(true). -func (p *PoolWithFunc) Invoke(args interface{}) error { +func (p *PoolWithFunc) Invoke(args any) error { if p.IsClosed() { return ErrPoolClosed } @@ -200,198 +48,28 @@ func (p *PoolWithFunc) Invoke(args interface{}) error { return err } -// Running returns the number of workers currently running. -func (p *PoolWithFunc) Running() int { - return int(atomic.LoadInt32(&p.running)) -} - -// Free returns the number of available workers, -1 indicates this pool is unlimited. -func (p *PoolWithFunc) Free() int { - c := p.Cap() - if c < 0 { - return -1 - } - return c - p.Running() -} - -// Waiting returns the number of tasks waiting to be executed. -func (p *PoolWithFunc) Waiting() int { - return int(atomic.LoadInt32(&p.waiting)) -} - -// Cap returns the capacity of this pool. -func (p *PoolWithFunc) Cap() int { - return int(atomic.LoadInt32(&p.capacity)) -} - -// Tune changes the capacity of this pool, note that it is noneffective to the infinite or pre-allocation pool. -func (p *PoolWithFunc) Tune(size int) { - capacity := p.Cap() - if capacity == -1 || size <= 0 || size == capacity || p.options.PreAlloc { - return - } - atomic.StoreInt32(&p.capacity, int32(size)) - if size > capacity { - if size-capacity == 1 { - p.cond.Signal() - return - } - p.cond.Broadcast() - } -} - -// IsClosed indicates whether the pool is closed. -func (p *PoolWithFunc) IsClosed() bool { - return atomic.LoadInt32(&p.state) == CLOSED -} - -// Release closes this pool and releases the worker queue. -func (p *PoolWithFunc) Release() { - if !atomic.CompareAndSwapInt32(&p.state, OPENED, CLOSED) { - return - } - - if p.stopPurge != nil { - p.stopPurge() - p.stopPurge = nil - } - if p.stopTicktock != nil { - p.stopTicktock() - p.stopTicktock = nil - } - - p.lock.Lock() - p.workers.reset() - p.lock.Unlock() - // There might be some callers waiting in retrieveWorker(), so we need to wake them up to prevent - // those callers blocking infinitely. - p.cond.Broadcast() -} - -// ReleaseTimeout is like Release but with a timeout, it waits all workers to exit before timing out. -func (p *PoolWithFunc) ReleaseTimeout(timeout time.Duration) error { - if p.IsClosed() || (!p.options.DisablePurge && p.stopPurge == nil) || p.stopTicktock == nil { - return ErrPoolClosed +// NewPoolWithFunc instantiates a PoolWithFunc with customized options. +func NewPoolWithFunc(size int, pf func(any), options ...Option) (*PoolWithFunc, error) { + if pf == nil { + return nil, ErrLackPoolFunc } - p.Release() - - var purgeCh <-chan struct{} - if !p.options.DisablePurge { - purgeCh = p.purgeCtx.Done() - } else { - purgeCh = p.allDone + pc, err := newPool(size, options...) + if err != nil { + return nil, err } - if p.Running() == 0 { - p.once.Do(func() { - close(p.allDone) - }) + pool := &PoolWithFunc{ + poolCommon: pc, + poolFunc: pf, } - timer := time.NewTimer(timeout) - defer timer.Stop() - for { - select { - case <-timer.C: - return ErrTimeout - case <-p.allDone: - <-purgeCh - <-p.ticktockCtx.Done() - if p.Running() == 0 && - (p.options.DisablePurge || atomic.LoadInt32(&p.purgeDone) == 1) && - atomic.LoadInt32(&p.ticktockDone) == 1 { - return nil - } + pool.workerCache.New = func() any { + return &goWorkerWithFunc{ + pool: pool, + args: make(chan any, workerChanCap), } } -} - -// Reboot reboots a closed pool, it does nothing if the pool is not closed. -// If you intend to reboot a closed pool, use ReleaseTimeout() instead of -// Release() to ensure that all workers are stopped and resource are released -// before rebooting, otherwise you may run into data race. -func (p *PoolWithFunc) Reboot() { - if atomic.CompareAndSwapInt32(&p.state, CLOSED, OPENED) { - atomic.StoreInt32(&p.purgeDone, 0) - p.goPurge() - atomic.StoreInt32(&p.ticktockDone, 0) - p.goTicktock() - p.allDone = make(chan struct{}) - p.once = &sync.Once{} - } -} - -func (p *PoolWithFunc) addRunning(delta int) int { - return int(atomic.AddInt32(&p.running, int32(delta))) -} - -func (p *PoolWithFunc) addWaiting(delta int) { - atomic.AddInt32(&p.waiting, int32(delta)) -} - -// retrieveWorker returns an available worker to run the tasks. -func (p *PoolWithFunc) retrieveWorker() (w worker, err error) { - p.lock.Lock() - -retry: - // First try to fetch the worker from the queue. - if w = p.workers.detach(); w != nil { - p.lock.Unlock() - return - } - - // If the worker queue is empty, and we don't run out of the pool capacity, - // then just spawn a new worker goroutine. - if capacity := p.Cap(); capacity == -1 || capacity > p.Running() { - p.lock.Unlock() - w = p.workerCache.Get().(*goWorkerWithFunc) - w.run() - return - } - - // Bail out early if it's in nonblocking mode or the number of pending callers reaches the maximum limit value. - if p.options.Nonblocking || (p.options.MaxBlockingTasks != 0 && p.Waiting() >= p.options.MaxBlockingTasks) { - p.lock.Unlock() - return nil, ErrPoolOverload - } - - // Otherwise, we'll have to keep them blocked and wait for at least one worker to be put back into pool. - p.addWaiting(1) - p.cond.Wait() // block and wait for an available worker - p.addWaiting(-1) - - if p.IsClosed() { - p.lock.Unlock() - return nil, ErrPoolClosed - } - - goto retry -} - -// revertWorker puts a worker back into free pool, recycling the goroutines. -func (p *PoolWithFunc) revertWorker(worker *goWorkerWithFunc) bool { - if capacity := p.Cap(); (capacity > 0 && p.Running() > capacity) || p.IsClosed() { - p.cond.Broadcast() - return false - } - - worker.lastUsed = p.nowTime() - - p.lock.Lock() - // To avoid memory leaks, add a double check in the lock scope. - // Issue: https://github.com/panjf2000/ants/issues/113 - if p.IsClosed() { - p.lock.Unlock() - return false - } - if err := p.workers.insert(worker); err != nil { - p.lock.Unlock() - return false - } - // Notify the invoker stuck in 'retrieveWorker()' of there is an available worker in the worker queue. - p.cond.Signal() - p.lock.Unlock() - return true + return pool, nil } diff --git a/worker.go b/worker.go index 73166f80..f8dd6506 100644 --- a/worker.go +++ b/worker.go @@ -84,10 +84,14 @@ func (w *goWorker) lastUsedTime() time.Time { return w.lastUsed } +func (w *goWorker) setLastUsedTime(t time.Time) { + w.lastUsed = t +} + func (w *goWorker) inputFunc(fn func()) { w.task <- fn } -func (w *goWorker) inputParam(interface{}) { +func (w *goWorker) inputParam(any) { panic("unreachable") } diff --git a/worker_func.go b/worker_func.go index a25f4f9e..76c697ac 100644 --- a/worker_func.go +++ b/worker_func.go @@ -35,7 +35,7 @@ type goWorkerWithFunc struct { pool *PoolWithFunc // args is a job should be done. - args chan interface{} + args chan any // lastUsed will be updated when putting a worker back into queue. lastUsed time.Time @@ -84,10 +84,14 @@ func (w *goWorkerWithFunc) lastUsedTime() time.Time { return w.lastUsed } +func (w *goWorkerWithFunc) setLastUsedTime(t time.Time) { + w.lastUsed = t +} + func (w *goWorkerWithFunc) inputFunc(func()) { panic("unreachable") } -func (w *goWorkerWithFunc) inputParam(arg interface{}) { +func (w *goWorkerWithFunc) inputParam(arg any) { w.args <- arg } diff --git a/worker_loop_queue_test.go b/worker_loop_queue_test.go index 3bd495e5..755cf156 100644 --- a/worker_loop_queue_test.go +++ b/worker_loop_queue_test.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package ants diff --git a/worker_queue.go b/worker_queue.go index bcb74807..1c44ee64 100644 --- a/worker_queue.go +++ b/worker_queue.go @@ -17,8 +17,9 @@ type worker interface { run() finish() lastUsedTime() time.Time + setLastUsedTime(t time.Time) inputFunc(func()) - inputParam(interface{}) + inputParam(any) } type workerQueue interface { diff --git a/worker_stack_test.go b/worker_stack_test.go index 6fd3d762..453d6e3a 100644 --- a/worker_stack_test.go +++ b/worker_stack_test.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package ants From 60bd4c42f9c4e17d974d7a7137d75657d625f9d6 Mon Sep 17 00:00:00 2001 From: Andy Pan Date: Sun, 12 Jan 2025 20:50:22 +0800 Subject: [PATCH 16/17] feat: implement generic pool (#351) --- ants.go | 4 + ants_benchmark_test.go | 18 +- ants_test.go | 832 +++++++++++++++++++++++++++++--------- multipool.go | 12 +- multipool_func.go | 12 +- multipool_func_generic.go | 215 ++++++++++ pool_func.go | 12 +- pool_func_generic.go | 71 ++++ worker.go | 12 +- worker_func.go | 22 +- worker_func_generic.go | 96 +++++ worker_loop_queue.go | 7 +- worker_loop_queue_test.go | 48 +-- worker_queue.go | 11 +- worker_stack.go | 54 +-- worker_stack_test.go | 26 +- 16 files changed, 1136 insertions(+), 316 deletions(-) create mode 100644 multipool_func_generic.go create mode 100644 pool_func_generic.go create mode 100644 worker_func_generic.go diff --git a/ants.go b/ants.go index d67ab5c7..eae6a149 100644 --- a/ants.go +++ b/ants.go @@ -84,6 +84,9 @@ var ( // ErrInvalidLoadBalancingStrategy will be returned when trying to create a MultiPool with an invalid load-balancing strategy. ErrInvalidLoadBalancingStrategy = errors.New("invalid load-balancing strategy") + // ErrInvalidMultiPoolSize will be returned when trying to create a MultiPool with an invalid size. + ErrInvalidMultiPoolSize = errors.New("invalid size for multiple pool") + // workerChanCap determines whether the channel of a worker should be a buffered channel // to get the best performance. Inspired by fasthttp at // https://github.com/valyala/fasthttp/blob/master/workerpool.go#L139 @@ -387,6 +390,7 @@ func (p *poolCommon) Release() { p.lock.Lock() p.workers.reset() p.lock.Unlock() + // There might be some callers waiting in retrieveWorker(), so we need to wake them up to prevent // those callers blocking infinitely. p.cond.Broadcast() diff --git a/ants_benchmark_test.go b/ants_benchmark_test.go index 1dcc8dde..33b4c1e4 100644 --- a/ants_benchmark_test.go +++ b/ants_benchmark_test.go @@ -48,6 +48,10 @@ func demoPoolFunc(args any) { time.Sleep(time.Duration(n) * time.Millisecond) } +func demoPoolFuncInt(n int) { + time.Sleep(time.Duration(n) * time.Millisecond) +} + var stopLongRunningFunc int32 func longRunningFunc() { @@ -56,16 +60,12 @@ func longRunningFunc() { } } -var stopLongRunningPoolFunc int32 - func longRunningPoolFunc(arg any) { - if ch, ok := arg.(chan struct{}); ok { - <-ch - return - } - for atomic.LoadInt32(&stopLongRunningPoolFunc) == 0 { - runtime.Gosched() - } + <-arg.(chan struct{}) +} + +func longRunningPoolFuncCh(ch chan struct{}) { + <-ch } func BenchmarkGoroutines(b *testing.B) { diff --git a/ants_test.go b/ants_test.go index 7909ea2a..316497d1 100644 --- a/ants_test.go +++ b/ants_test.go @@ -31,7 +31,7 @@ import ( "testing" "time" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) const ( @@ -111,6 +111,27 @@ func TestAntsPoolWithFuncWaitToGetWorker(t *testing.T) { t.Logf("memory usage:%d MB", curMem) } +// TestAntsPoolWithFuncGenericWaitToGetWorker is used to test waiting to get worker. +func TestAntsPoolWithFuncGenericWaitToGetWorker(t *testing.T) { + var wg sync.WaitGroup + p, _ := NewPoolWithFuncGeneric(AntsSize, func(i int) { + demoPoolFuncInt(i) + wg.Done() + }) + defer p.Release() + + for i := 0; i < n; i++ { + wg.Add(1) + _ = p.Invoke(Param) + } + wg.Wait() + t.Logf("pool with func, running workers number:%d", p.Running()) + mem := runtime.MemStats{} + runtime.ReadMemStats(&mem) + curMem = mem.TotalAlloc/MiB - curMem + t.Logf("memory usage:%d MB", curMem) +} + func TestAntsPoolWithFuncWaitToGetWorkerPreMalloc(t *testing.T) { var wg sync.WaitGroup p, _ := NewPoolWithFunc(AntsSize, func(i any) { @@ -131,6 +152,26 @@ func TestAntsPoolWithFuncWaitToGetWorkerPreMalloc(t *testing.T) { t.Logf("memory usage:%d MB", curMem) } +func TestAntsPoolWithFuncGenericWaitToGetWorkerPreMalloc(t *testing.T) { + var wg sync.WaitGroup + p, _ := NewPoolWithFuncGeneric(AntsSize, func(i int) { + demoPoolFuncInt(i) + wg.Done() + }, WithPreAlloc(true)) + defer p.Release() + + for i := 0; i < n; i++ { + wg.Add(1) + _ = p.Invoke(Param) + } + wg.Wait() + t.Logf("pool with func, running workers number:%d", p.Running()) + mem := runtime.MemStats{} + runtime.ReadMemStats(&mem) + curMem = mem.TotalAlloc/MiB - curMem + t.Logf("memory usage:%d MB", curMem) +} + // TestAntsPoolGetWorkerFromCache is used to test getting worker from sync.Pool. func TestAntsPoolGetWorkerFromCache(t *testing.T) { p, _ := NewPool(TestSize) @@ -166,6 +207,24 @@ func TestAntsPoolWithFuncGetWorkerFromCache(t *testing.T) { t.Logf("memory usage:%d MB", curMem) } +// TestAntsPoolWithFuncGenericGetWorkerFromCache is used to test getting worker from sync.Pool. +func TestAntsPoolWithFuncGenericGetWorkerFromCache(t *testing.T) { + dur := 10 + p, _ := NewPoolWithFuncGeneric(TestSize, demoPoolFuncInt) + defer p.Release() + + for i := 0; i < AntsSize; i++ { + _ = p.Invoke(dur) + } + time.Sleep(2 * DefaultCleanIntervalTime) + _ = p.Invoke(dur) + t.Logf("pool with func, running workers number:%d", p.Running()) + mem := runtime.MemStats{} + runtime.ReadMemStats(&mem) + curMem = mem.TotalAlloc/MiB - curMem + t.Logf("memory usage:%d MB", curMem) +} + func TestAntsPoolWithFuncGetWorkerFromCachePreMalloc(t *testing.T) { dur := 10 p, _ := NewPoolWithFunc(TestSize, demoPoolFunc, WithPreAlloc(true)) @@ -183,6 +242,23 @@ func TestAntsPoolWithFuncGetWorkerFromCachePreMalloc(t *testing.T) { t.Logf("memory usage:%d MB", curMem) } +func TestAntsPoolWithFuncGenericGetWorkerFromCachePreMalloc(t *testing.T) { + dur := 10 + p, _ := NewPoolWithFuncGeneric(TestSize, demoPoolFuncInt, WithPreAlloc(true)) + defer p.Release() + + for i := 0; i < AntsSize; i++ { + _ = p.Invoke(dur) + } + time.Sleep(2 * DefaultCleanIntervalTime) + _ = p.Invoke(dur) + t.Logf("pool with func, running workers number:%d", p.Running()) + mem := runtime.MemStats{} + runtime.ReadMemStats(&mem) + curMem = mem.TotalAlloc/MiB - curMem + t.Logf("memory usage:%d MB", curMem) +} + // Contrast between goroutines without a pool and goroutines with ants pool. func TestNoPool(t *testing.T) { @@ -232,7 +308,7 @@ func TestPanicHandler(t *testing.T) { atomic.AddInt64(&panicCounter, 1) t.Logf("catch panic with PanicHandler: %v", p) })) - assert.NoErrorf(t, err, "create new pool failed: %v", err) + require.NoErrorf(t, err, "create new pool failed: %v", err) defer p0.Release() wg.Add(1) _ = p0.Submit(func() { @@ -240,20 +316,34 @@ func TestPanicHandler(t *testing.T) { }) wg.Wait() c := atomic.LoadInt64(&panicCounter) - assert.EqualValuesf(t, 1, c, "panic handler didn't work, panicCounter: %d", c) - assert.EqualValues(t, 0, p0.Running(), "pool should be empty after panic") + require.EqualValuesf(t, 1, c, "panic handler didn't work, panicCounter: %d", c) + require.EqualValues(t, 0, p0.Running(), "pool should be empty after panic") + p1, err := NewPoolWithFunc(10, func(p any) { panic(p) }, WithPanicHandler(func(_ any) { defer wg.Done() atomic.AddInt64(&panicCounter, 1) })) - assert.NoErrorf(t, err, "create new pool with func failed: %v", err) + require.NoErrorf(t, err, "create new pool with func failed: %v", err) defer p1.Release() wg.Add(1) _ = p1.Invoke("Oops!") wg.Wait() c = atomic.LoadInt64(&panicCounter) - assert.EqualValuesf(t, 2, c, "panic handler didn't work, panicCounter: %d", c) - assert.EqualValues(t, 0, p1.Running(), "pool should be empty after panic") + require.EqualValuesf(t, 2, c, "panic handler didn't work, panicCounter: %d", c) + require.EqualValues(t, 0, p1.Running(), "pool should be empty after panic") + + p2, err := NewPoolWithFuncGeneric(10, func(s string) { panic(s) }, WithPanicHandler(func(_ any) { + defer wg.Done() + atomic.AddInt64(&panicCounter, 1) + })) + require.NoErrorf(t, err, "create new pool with func failed: %v", err) + defer p2.Release() + wg.Add(1) + _ = p2.Invoke("Oops!") + wg.Wait() + c = atomic.LoadInt64(&panicCounter) + require.EqualValuesf(t, 3, c, "panic handler didn't work, panicCounter: %d", c) + require.EqualValues(t, 0, p2.Running(), "pool should be empty after panic") } func TestPanicHandlerPreMalloc(t *testing.T) { @@ -264,7 +354,7 @@ func TestPanicHandlerPreMalloc(t *testing.T) { atomic.AddInt64(&panicCounter, 1) t.Logf("catch panic with PanicHandler: %v", p) })) - assert.NoErrorf(t, err, "create new pool failed: %v", err) + require.NoErrorf(t, err, "create new pool failed: %v", err) defer p0.Release() wg.Add(1) _ = p0.Submit(func() { @@ -272,41 +362,58 @@ func TestPanicHandlerPreMalloc(t *testing.T) { }) wg.Wait() c := atomic.LoadInt64(&panicCounter) - assert.EqualValuesf(t, 1, c, "panic handler didn't work, panicCounter: %d", c) - assert.EqualValues(t, 0, p0.Running(), "pool should be empty after panic") - p1, err := NewPoolWithFunc(10, func(p any) { panic(p) }, WithPanicHandler(func(_ any) { + require.EqualValuesf(t, 1, c, "panic handler didn't work, panicCounter: %d", c) + require.EqualValues(t, 0, p0.Running(), "pool should be empty after panic") + + p1, err := NewPoolWithFunc(10, func(p any) { panic(p) }, WithPreAlloc(true), WithPanicHandler(func(_ any) { defer wg.Done() atomic.AddInt64(&panicCounter, 1) })) - assert.NoErrorf(t, err, "create new pool with func failed: %v", err) + require.NoErrorf(t, err, "create new pool with func failed: %v", err) defer p1.Release() wg.Add(1) _ = p1.Invoke("Oops!") wg.Wait() c = atomic.LoadInt64(&panicCounter) - assert.EqualValuesf(t, 2, c, "panic handler didn't work, panicCounter: %d", c) - assert.EqualValues(t, 0, p1.Running(), "pool should be empty after panic") + require.EqualValuesf(t, 2, c, "panic handler didn't work, panicCounter: %d", c) + require.EqualValues(t, 0, p1.Running(), "pool should be empty after panic") + + p2, err := NewPoolWithFuncGeneric(10, func(p string) { panic(p) }, WithPreAlloc(true), WithPanicHandler(func(_ any) { + defer wg.Done() + atomic.AddInt64(&panicCounter, 1) + })) + require.NoErrorf(t, err, "create new pool with func failed: %v", err) + defer p2.Release() + wg.Add(1) + _ = p2.Invoke("Oops!") + wg.Wait() + c = atomic.LoadInt64(&panicCounter) + require.EqualValuesf(t, 3, c, "panic handler didn't work, panicCounter: %d", c) + require.EqualValues(t, 0, p1.Running(), "pool should be empty after panic") } func TestPoolPanicWithoutHandler(t *testing.T) { p0, err := NewPool(10) - assert.NoErrorf(t, err, "create new pool failed: %v", err) + require.NoErrorf(t, err, "create new pool failed: %v", err) defer p0.Release() _ = p0.Submit(func() { panic("Oops!") }) - p1, err := NewPoolWithFunc(10, func(p any) { - panic(p) - }) - assert.NoErrorf(t, err, "create new pool with func failed: %v", err) + p1, err := NewPoolWithFunc(10, func(p any) { panic(p) }) + require.NoErrorf(t, err, "create new pool with func failed: %v", err) defer p1.Release() _ = p1.Invoke("Oops!") + + p2, err := NewPoolWithFuncGeneric(10, func(p string) { panic(p) }) + require.NoErrorf(t, err, "create new pool with func failed: %v", err) + defer p2.Release() + _ = p2.Invoke("Oops!") } func TestPoolPanicWithoutHandlerPreMalloc(t *testing.T) { p0, err := NewPool(10, WithPreAlloc(true)) - assert.NoErrorf(t, err, "create new pool failed: %v", err) + require.NoErrorf(t, err, "create new pool failed: %v", err) defer p0.Release() _ = p0.Submit(func() { panic("Oops!") @@ -315,11 +422,16 @@ func TestPoolPanicWithoutHandlerPreMalloc(t *testing.T) { p1, err := NewPoolWithFunc(10, func(p any) { panic(p) }) - - assert.NoErrorf(t, err, "create new pool with func failed: %v", err) - + require.NoErrorf(t, err, "create new pool with func failed: %v", err) defer p1.Release() _ = p1.Invoke("Oops!") + + p2, err := NewPoolWithFuncGeneric(10, func(p any) { + panic(p) + }) + require.NoErrorf(t, err, "create new pool with func failed: %v", err) + defer p2.Release() + _ = p2.Invoke("Oops!") } func TestPurgePool(t *testing.T) { @@ -327,7 +439,7 @@ func TestPurgePool(t *testing.T) { ch := make(chan struct{}) p, err := NewPool(size) - assert.NoErrorf(t, err, "create TimingPool failed: %v", err) + require.NoErrorf(t, err, "create TimingPool failed: %v", err) defer p.Release() for i := 0; i < size; i++ { @@ -338,11 +450,11 @@ func TestPurgePool(t *testing.T) { time.Sleep(time.Duration(d) * time.Millisecond) }) } - assert.Equalf(t, size, p.Running(), "pool should be full, expected: %d, but got: %d", size, p.Running()) + require.Equalf(t, size, p.Running(), "pool should be full, expected: %d, but got: %d", size, p.Running()) close(ch) time.Sleep(5 * DefaultCleanIntervalTime) - assert.Equalf(t, 0, p.Running(), "pool should be empty after purge, but got %d", p.Running()) + require.Equalf(t, 0, p.Running(), "pool should be empty after purge, but got %d", p.Running()) ch = make(chan struct{}) f := func(i any) { @@ -352,41 +464,69 @@ func TestPurgePool(t *testing.T) { } p1, err := NewPoolWithFunc(size, f) - assert.NoErrorf(t, err, "create TimingPoolWithFunc failed: %v", err) + require.NoErrorf(t, err, "create TimingPoolWithFunc failed: %v", err) defer p1.Release() for i := 0; i < size; i++ { _ = p1.Invoke(i) } - assert.Equalf(t, size, p1.Running(), "pool should be full, expected: %d, but got: %d", size, p1.Running()) + require.Equalf(t, size, p1.Running(), "pool should be full, expected: %d, but got: %d", size, p1.Running()) + + close(ch) + time.Sleep(5 * DefaultCleanIntervalTime) + require.Equalf(t, 0, p1.Running(), "pool should be empty after purge, but got %d", p1.Running()) + + ch = make(chan struct{}) + f1 := func(i int) { + <-ch + d := i % 100 + time.Sleep(time.Duration(d) * time.Millisecond) + } + + p2, err := NewPoolWithFuncGeneric(size, f1) + require.NoErrorf(t, err, "create TimingPoolWithFunc failed: %v", err) + defer p2.Release() + + for i := 0; i < size; i++ { + _ = p2.Invoke(i) + } + require.Equalf(t, size, p2.Running(), "pool should be full, expected: %d, but got: %d", size, p2.Running()) close(ch) time.Sleep(5 * DefaultCleanIntervalTime) - assert.Equalf(t, 0, p1.Running(), "pool should be empty after purge, but got %d", p1.Running()) + require.Equalf(t, 0, p2.Running(), "pool should be empty after purge, but got %d", p2.Running()) } func TestPurgePreMallocPool(t *testing.T) { p, err := NewPool(10, WithPreAlloc(true)) - assert.NoErrorf(t, err, "create TimingPool failed: %v", err) + require.NoErrorf(t, err, "create TimingPool failed: %v", err) defer p.Release() _ = p.Submit(demoFunc) time.Sleep(3 * DefaultCleanIntervalTime) - assert.EqualValues(t, 0, p.Running(), "all p should be purged") + require.EqualValues(t, 0, p.Running(), "all p should be purged") + p1, err := NewPoolWithFunc(10, demoPoolFunc) - assert.NoErrorf(t, err, "create TimingPoolWithFunc failed: %v", err) + require.NoErrorf(t, err, "create TimingPoolWithFunc failed: %v", err) defer p1.Release() _ = p1.Invoke(1) time.Sleep(3 * DefaultCleanIntervalTime) - assert.EqualValues(t, 0, p.Running(), "all p should be purged") + require.EqualValues(t, 0, p1.Running(), "all p should be purged") + + p2, err := NewPoolWithFuncGeneric(10, demoPoolFuncInt) + require.NoErrorf(t, err, "create TimingPoolWithFunc failed: %v", err) + defer p2.Release() + _ = p2.Invoke(1) + time.Sleep(3 * DefaultCleanIntervalTime) + require.EqualValues(t, 0, p2.Running(), "all p should be purged") } func TestNonblockingSubmit(t *testing.T) { poolSize := 10 p, err := NewPool(poolSize, WithNonblocking(true)) - assert.NoErrorf(t, err, "create TimingPool failed: %v", err) + require.NoErrorf(t, err, "create TimingPool failed: %v", err) defer p.Release() for i := 0; i < poolSize-1; i++ { - assert.NoError(t, p.Submit(longRunningFunc), "nonblocking submit when pool is not full shouldn't return error") + require.NoError(t, p.Submit(longRunningFunc), "nonblocking submit when pool is not full shouldn't return error") } ch := make(chan struct{}) ch1 := make(chan struct{}) @@ -395,29 +535,29 @@ func TestNonblockingSubmit(t *testing.T) { close(ch1) } // p is full now. - assert.NoError(t, p.Submit(f), "nonblocking submit when pool is not full shouldn't return error") - assert.EqualError(t, p.Submit(demoFunc), ErrPoolOverload.Error(), + require.NoError(t, p.Submit(f), "nonblocking submit when pool is not full shouldn't return error") + require.ErrorIsf(t, p.Submit(demoFunc), ErrPoolOverload, "nonblocking submit when pool is full should get an ErrPoolOverload") // interrupt f to get an available worker close(ch) <-ch1 - assert.NoError(t, p.Submit(demoFunc), "nonblocking submit when pool is not full shouldn't return error") + require.NoError(t, p.Submit(demoFunc), "nonblocking submit when pool is not full shouldn't return error") } func TestMaxBlockingSubmit(t *testing.T) { poolSize := 10 p, err := NewPool(poolSize, WithMaxBlockingTasks(1)) - assert.NoErrorf(t, err, "create TimingPool failed: %v", err) + require.NoErrorf(t, err, "create TimingPool failed: %v", err) defer p.Release() for i := 0; i < poolSize-1; i++ { - assert.NoError(t, p.Submit(longRunningFunc), "submit when pool is not full shouldn't return error") + require.NoError(t, p.Submit(longRunningFunc), "submit when pool is not full shouldn't return error") } ch := make(chan struct{}) f := func() { <-ch } // p is full now. - assert.NoError(t, p.Submit(f), "submit when pool is not full shouldn't return error") + require.NoError(t, p.Submit(f), "submit when pool is not full shouldn't return error") var wg sync.WaitGroup wg.Add(1) errCh := make(chan error, 1) @@ -430,7 +570,7 @@ func TestMaxBlockingSubmit(t *testing.T) { }() time.Sleep(1 * time.Second) // already reached max blocking limit - assert.EqualError(t, p.Submit(demoFunc), ErrPoolOverload.Error(), + require.ErrorIsf(t, p.Submit(demoFunc), ErrPoolOverload, "blocking submit when pool reach max blocking submit should return ErrPoolOverload") // interrupt f to make blocking submit successful. close(ch) @@ -444,52 +584,115 @@ func TestMaxBlockingSubmit(t *testing.T) { func TestNonblockingSubmitWithFunc(t *testing.T) { poolSize := 10 + ch := make(chan struct{}) var wg sync.WaitGroup p, err := NewPoolWithFunc(poolSize, func(i any) { longRunningPoolFunc(i) wg.Done() }, WithNonblocking(true)) - assert.NoError(t, err, "create TimingPool failed: %v", err) + require.NoError(t, err, "create TimingPool failed: %v", err) + defer p.Release() + wg.Add(poolSize) + for i := 0; i < poolSize-1; i++ { + require.NoError(t, p.Invoke(ch), "nonblocking submit when pool is not full shouldn't return error") + } + // p is full now. + require.NoError(t, p.Invoke(ch), "nonblocking submit when pool is not full shouldn't return error") + require.ErrorIsf(t, p.Invoke(nil), ErrPoolOverload, + "nonblocking submit when pool is full should get an ErrPoolOverload") + // interrupt f to get an available worker + close(ch) + wg.Wait() + wg.Add(1) + require.NoError(t, p.Invoke(ch), "nonblocking submit when pool is not full shouldn't return error") + wg.Wait() +} + +func TestNonblockingSubmitWithFuncGeneric(t *testing.T) { + poolSize := 10 + var wg sync.WaitGroup + p, err := NewPoolWithFuncGeneric(poolSize, func(ch chan struct{}) { + longRunningPoolFuncCh(ch) + wg.Done() + }, WithNonblocking(true)) + require.NoError(t, err, "create TimingPool failed: %v", err) defer p.Release() ch := make(chan struct{}) wg.Add(poolSize) for i := 0; i < poolSize-1; i++ { - assert.NoError(t, p.Invoke(ch), "nonblocking submit when pool is not full shouldn't return error") + require.NoError(t, p.Invoke(ch), "nonblocking submit when pool is not full shouldn't return error") } // p is full now. - assert.NoError(t, p.Invoke(ch), "nonblocking submit when pool is not full shouldn't return error") - assert.EqualError(t, p.Invoke(nil), ErrPoolOverload.Error(), + require.NoError(t, p.Invoke(ch), "nonblocking submit when pool is not full shouldn't return error") + require.ErrorIsf(t, p.Invoke(nil), ErrPoolOverload, "nonblocking submit when pool is full should get an ErrPoolOverload") // interrupt f to get an available worker close(ch) wg.Wait() - assert.NoError(t, p.Invoke(nil), "nonblocking submit when pool is not full shouldn't return error") + wg.Add(1) + require.NoError(t, p.Invoke(ch), "nonblocking submit when pool is not full shouldn't return error") + wg.Wait() } func TestMaxBlockingSubmitWithFunc(t *testing.T) { + ch := make(chan struct{}) poolSize := 10 p, err := NewPoolWithFunc(poolSize, longRunningPoolFunc, WithMaxBlockingTasks(1)) - assert.NoError(t, err, "create TimingPool failed: %v", err) + require.NoError(t, err, "create TimingPool failed: %v", err) defer p.Release() for i := 0; i < poolSize-1; i++ { - assert.NoError(t, p.Invoke(Param), "submit when pool is not full shouldn't return error") + require.NoError(t, p.Invoke(ch), "submit when pool is not full shouldn't return error") } + // p is full now. + require.NoError(t, p.Invoke(ch), "submit when pool is not full shouldn't return error") + var wg sync.WaitGroup + wg.Add(1) + errCh := make(chan error, 1) + go func() { + // should be blocked. blocking num == 1 + if err := p.Invoke(ch); err != nil { + errCh <- err + } + wg.Done() + }() + time.Sleep(1 * time.Second) + // already reached max blocking limit + require.ErrorIsf(t, p.Invoke(ch), ErrPoolOverload, + "blocking submit when pool reach max blocking submit should return ErrPoolOverload: %v", err) + // interrupt one func to make blocking submit successful. + close(ch) + wg.Wait() + select { + case <-errCh: + t.Fatalf("blocking submit when pool is full should not return error") + default: + } +} + +func TestMaxBlockingSubmitWithFuncGeneric(t *testing.T) { + poolSize := 10 + p, err := NewPoolWithFuncGeneric(poolSize, longRunningPoolFuncCh, WithMaxBlockingTasks(1)) + require.NoError(t, err, "create TimingPool failed: %v", err) + defer p.Release() ch := make(chan struct{}) + for i := 0; i < poolSize-1; i++ { + require.NoError(t, p.Invoke(ch), "submit when pool is not full shouldn't return error") + } // p is full now. - assert.NoError(t, p.Invoke(ch), "submit when pool is not full shouldn't return error") + require.NoError(t, p.Invoke(ch), "submit when pool is not full shouldn't return error") var wg sync.WaitGroup wg.Add(1) errCh := make(chan error, 1) go func() { // should be blocked. blocking num == 1 - if err := p.Invoke(Param); err != nil { + if err := p.Invoke(ch); err != nil { errCh <- err } wg.Done() }() time.Sleep(1 * time.Second) // already reached max blocking limit - assert.EqualErrorf(t, p.Invoke(Param), ErrPoolOverload.Error(), + require.ErrorIsf(t, p.Invoke(ch), ErrPoolOverload, "blocking submit when pool reach max blocking submit should return ErrPoolOverload: %v", err) // interrupt one func to make blocking submit successful. close(ch) @@ -511,18 +714,18 @@ func TestRebootDefaultPool(t *testing.T) { wg.Done() }) wg.Wait() - assert.NoError(t, ReleaseTimeout(time.Second)) - assert.EqualError(t, Submit(nil), ErrPoolClosed.Error(), "pool should be closed") + require.NoError(t, ReleaseTimeout(time.Second)) + require.ErrorIsf(t, Submit(nil), ErrPoolClosed, "pool should be closed") Reboot() wg.Add(1) - assert.NoError(t, Submit(func() { wg.Done() }), "pool should be rebooted") + require.NoError(t, Submit(func() { wg.Done() }), "pool should be rebooted") wg.Wait() } func TestRebootNewPool(t *testing.T) { var wg sync.WaitGroup p, err := NewPool(10) - assert.NoErrorf(t, err, "create Pool failed: %v", err) + require.NoErrorf(t, err, "create Pool failed: %v", err) defer p.Release() wg.Add(1) _ = p.Submit(func() { @@ -530,27 +733,43 @@ func TestRebootNewPool(t *testing.T) { wg.Done() }) wg.Wait() - assert.NoError(t, p.ReleaseTimeout(time.Second)) - assert.EqualError(t, p.Submit(nil), ErrPoolClosed.Error(), "pool should be closed") + require.NoError(t, p.ReleaseTimeout(time.Second)) + require.ErrorIsf(t, p.Submit(nil), ErrPoolClosed, "pool should be closed") p.Reboot() wg.Add(1) - assert.NoError(t, p.Submit(func() { wg.Done() }), "pool should be rebooted") + require.NoError(t, p.Submit(func() { wg.Done() }), "pool should be rebooted") wg.Wait() p1, err := NewPoolWithFunc(10, func(i any) { demoPoolFunc(i) wg.Done() }) - assert.NoErrorf(t, err, "create TimingPoolWithFunc failed: %v", err) + require.NoErrorf(t, err, "create TimingPoolWithFunc failed: %v", err) defer p1.Release() wg.Add(1) _ = p1.Invoke(1) wg.Wait() - assert.NoError(t, p1.ReleaseTimeout(time.Second)) - assert.EqualError(t, p1.Invoke(nil), ErrPoolClosed.Error(), "pool should be closed") + require.NoError(t, p1.ReleaseTimeout(time.Second)) + require.ErrorIsf(t, p1.Invoke(nil), ErrPoolClosed, "pool should be closed") p1.Reboot() wg.Add(1) - assert.NoError(t, p1.Invoke(1), "pool should be rebooted") + require.NoError(t, p1.Invoke(1), "pool should be rebooted") + wg.Wait() + + p2, err := NewPoolWithFuncGeneric(10, func(i int) { + demoPoolFuncInt(i) + wg.Done() + }) + require.NoErrorf(t, err, "create TimingPoolWithFunc failed: %v", err) + defer p2.Release() + wg.Add(1) + _ = p2.Invoke(1) + wg.Wait() + require.NoError(t, p2.ReleaseTimeout(time.Second)) + require.ErrorIsf(t, p2.Invoke(1), ErrPoolClosed, "pool should be closed") + p2.Reboot() + wg.Add(1) + require.NoError(t, p2.Invoke(1), "pool should be rebooted") wg.Wait() } @@ -575,7 +794,7 @@ func TestInfinitePool(t *testing.T) { } var err error _, err = NewPool(-1, WithPreAlloc(true)) - assert.EqualErrorf(t, err, ErrInvalidPreAllocSize.Error(), "") + require.EqualErrorf(t, err, ErrInvalidPreAllocSize.Error(), "") } func testPoolWithDisablePurge(t *testing.T, p *Pool, numWorker int, waitForPurge time.Duration) { @@ -593,9 +812,9 @@ func testPoolWithDisablePurge(t *testing.T, p *Pool, numWorker int, waitForPurge wg1.Wait() runningCnt := p.Running() - assert.EqualValuesf(t, numWorker, runningCnt, "expect %d workers running, but got %d", numWorker, runningCnt) + require.EqualValuesf(t, numWorker, runningCnt, "expect %d workers running, but got %d", numWorker, runningCnt) freeCnt := p.Free() - assert.EqualValuesf(t, 0, freeCnt, "expect %d free workers, but got %d", 0, freeCnt) + require.EqualValuesf(t, 0, freeCnt, "expect %d free workers, but got %d", 0, freeCnt) // Finish all tasks and sleep for a while to wait for purging, since we've disabled purge mechanism, // we should see that all workers are still running after the sleep. @@ -604,17 +823,17 @@ func testPoolWithDisablePurge(t *testing.T, p *Pool, numWorker int, waitForPurge time.Sleep(waitForPurge + waitForPurge/2) runningCnt = p.Running() - assert.EqualValuesf(t, numWorker, runningCnt, "expect %d workers running, but got %d", numWorker, runningCnt) + require.EqualValuesf(t, numWorker, runningCnt, "expect %d workers running, but got %d", numWorker, runningCnt) freeCnt = p.Free() - assert.EqualValuesf(t, 0, freeCnt, "expect %d free workers, but got %d", 0, freeCnt) + require.EqualValuesf(t, 0, freeCnt, "expect %d free workers, but got %d", 0, freeCnt) err := p.ReleaseTimeout(waitForPurge + waitForPurge/2) - assert.NoErrorf(t, err, "release pool failed: %v", err) + require.NoErrorf(t, err, "release pool failed: %v", err) runningCnt = p.Running() - assert.EqualValuesf(t, 0, runningCnt, "expect %d workers running, but got %d", 0, runningCnt) + require.EqualValuesf(t, 0, runningCnt, "expect %d workers running, but got %d", 0, runningCnt) freeCnt = p.Free() - assert.EqualValuesf(t, numWorker, freeCnt, "expect %d free workers, but got %d", numWorker, freeCnt) + require.EqualValuesf(t, numWorker, freeCnt, "expect %d free workers, but got %d", numWorker, freeCnt) } func TestWithDisablePurgePool(t *testing.T) { @@ -637,9 +856,9 @@ func testPoolFuncWithDisablePurge(t *testing.T, p *PoolWithFunc, numWorker int, wg1.Wait() runningCnt := p.Running() - assert.EqualValuesf(t, numWorker, runningCnt, "expect %d workers running, but got %d", numWorker, runningCnt) + require.EqualValuesf(t, numWorker, runningCnt, "expect %d workers running, but got %d", numWorker, runningCnt) freeCnt := p.Free() - assert.EqualValuesf(t, 0, freeCnt, "expect %d free workers, but got %d", 0, freeCnt) + require.EqualValuesf(t, 0, freeCnt, "expect %d free workers, but got %d", 0, freeCnt) // Finish all tasks and sleep for a while to wait for purging, since we've disabled purge mechanism, // we should see that all workers are still running after the sleep. @@ -648,17 +867,17 @@ func testPoolFuncWithDisablePurge(t *testing.T, p *PoolWithFunc, numWorker int, time.Sleep(waitForPurge + waitForPurge/2) runningCnt = p.Running() - assert.EqualValuesf(t, numWorker, runningCnt, "expect %d workers running, but got %d", numWorker, runningCnt) + require.EqualValuesf(t, numWorker, runningCnt, "expect %d workers running, but got %d", numWorker, runningCnt) freeCnt = p.Free() - assert.EqualValuesf(t, 0, freeCnt, "expect %d free workers, but got %d", 0, freeCnt) + require.EqualValuesf(t, 0, freeCnt, "expect %d free workers, but got %d", 0, freeCnt) err := p.ReleaseTimeout(waitForPurge + waitForPurge/2) - assert.NoErrorf(t, err, "release pool failed: %v", err) + require.NoErrorf(t, err, "release pool failed: %v", err) runningCnt = p.Running() - assert.EqualValuesf(t, 0, runningCnt, "expect %d workers running, but got %d", 0, runningCnt) + require.EqualValuesf(t, 0, runningCnt, "expect %d workers running, but got %d", 0, runningCnt) freeCnt = p.Free() - assert.EqualValuesf(t, numWorker, freeCnt, "expect %d free workers, but got %d", numWorker, freeCnt) + require.EqualValuesf(t, numWorker, freeCnt, "expect %d free workers, but got %d", numWorker, freeCnt) } func TestWithDisablePurgePoolFunc(t *testing.T) { @@ -692,10 +911,12 @@ func TestWithDisablePurgeAndWithExpirationPoolFunc(t *testing.T) { func TestInfinitePoolWithFunc(t *testing.T) { c := make(chan struct{}) - p, _ := NewPoolWithFunc(-1, func(i any) { + p, err := NewPoolWithFunc(-1, func(i any) { demoPoolFunc(i) <-c }) + require.NoErrorf(t, err, "create pool with func failed: %v", err) + defer p.Release() _ = p.Invoke(10) _ = p.Invoke(10) c <- struct{}{} @@ -710,16 +931,40 @@ func TestInfinitePoolWithFunc(t *testing.T) { if capacity := p.Cap(); capacity != -1 { t.Fatalf("expect capacity: -1 but got %d", capacity) } - var err error _, err = NewPoolWithFunc(-1, demoPoolFunc, WithPreAlloc(true)) - if err != ErrInvalidPreAllocSize { - t.Errorf("expect ErrInvalidPreAllocSize but got %v", err) + require.ErrorIsf(t, err, ErrInvalidPreAllocSize, "expect ErrInvalidPreAllocSize but got %v", err) +} + +func TestInfinitePoolWithFuncGeneric(t *testing.T) { + c := make(chan struct{}) + p, err := NewPoolWithFuncGeneric(-1, func(i int) { + demoPoolFuncInt(i) + <-c + }) + require.NoErrorf(t, err, "create pool with func failed: %v", err) + defer p.Release() + _ = p.Invoke(10) + _ = p.Invoke(10) + c <- struct{}{} + c <- struct{}{} + if n := p.Running(); n != 2 { + t.Errorf("expect 2 workers running, but got %d", n) } + if n := p.Free(); n != -1 { + t.Errorf("expect -1 of free workers by unlimited pool, but got %d", n) + } + p.Tune(10) + if capacity := p.Cap(); capacity != -1 { + t.Fatalf("expect capacity: -1 but got %d", capacity) + } + _, err = NewPoolWithFuncGeneric(-1, demoPoolFuncInt, WithPreAlloc(true)) + require.ErrorIsf(t, err, ErrInvalidPreAllocSize, "expect ErrInvalidPreAllocSize but got %v", err) } func TestReleaseWhenRunningPool(t *testing.T) { var wg sync.WaitGroup - p, _ := NewPool(1) + p, err := NewPool(1) + require.NoErrorf(t, err, "create pool failed: %v", err) wg.Add(2) go func() { t.Log("start aaa") @@ -759,10 +1004,12 @@ func TestReleaseWhenRunningPool(t *testing.T) { func TestReleaseWhenRunningPoolWithFunc(t *testing.T) { var wg sync.WaitGroup - p, _ := NewPoolWithFunc(1, func(i any) { + p, err := NewPoolWithFunc(1, func(i any) { t.Log("do task", i) time.Sleep(1 * time.Second) }) + require.NoErrorf(t, err, "create pool with func failed: %v", err) + wg.Add(2) go func() { t.Log("start aaa") @@ -792,15 +1039,61 @@ func TestReleaseWhenRunningPoolWithFunc(t *testing.T) { wg.Wait() } +func TestReleaseWhenRunningPoolWithFuncGeneric(t *testing.T) { + var wg sync.WaitGroup + p, err := NewPoolWithFuncGeneric(1, func(i int) { + t.Log("do task", i) + time.Sleep(1 * time.Second) + }) + require.NoErrorf(t, err, "create pool with func failed: %v", err) + wg.Add(2) + + go func() { + t.Log("start aaa") + defer func() { + wg.Done() + t.Log("stop aaa") + }() + for i := 0; i < 30; i++ { + _ = p.Invoke(i) + } + }() + + go func() { + t.Log("start bbb") + defer func() { + wg.Done() + t.Log("stop bbb") + }() + for i := 100; i < 130; i++ { + _ = p.Invoke(i) + } + }() + + time.Sleep(3 * time.Second) + p.Release() + t.Log("wait for all goroutines to exit...") + wg.Wait() +} + func TestRestCodeCoverage(t *testing.T) { _, err := NewPool(-1, WithExpiryDuration(-1)) - t.Log(err) + require.ErrorIs(t, err, ErrInvalidPoolExpiry) _, err = NewPool(1, WithExpiryDuration(-1)) - t.Log(err) + require.ErrorIs(t, err, ErrInvalidPoolExpiry) _, err = NewPoolWithFunc(-1, demoPoolFunc, WithExpiryDuration(-1)) - t.Log(err) + require.ErrorIs(t, err, ErrInvalidPoolExpiry) _, err = NewPoolWithFunc(1, demoPoolFunc, WithExpiryDuration(-1)) - t.Log(err) + require.ErrorIs(t, err, ErrInvalidPoolExpiry) + _, err = NewPoolWithFunc(1, nil, WithExpiryDuration(-1)) + require.ErrorIs(t, err, ErrLackPoolFunc) + _, err = NewPoolWithFuncGeneric(-1, demoPoolFuncInt, WithExpiryDuration(-1)) + require.ErrorIs(t, err, ErrInvalidPoolExpiry) + _, err = NewPoolWithFuncGeneric(1, demoPoolFuncInt, WithExpiryDuration(-1)) + require.ErrorIs(t, err, ErrInvalidPoolExpiry) + var fn func(i int) + _, err = NewPoolWithFuncGeneric(1, fn, WithExpiryDuration(-1)) + require.ErrorIs(t, err, ErrLackPoolFunc) options := Options{} options.ExpiryDuration = time.Duration(10) * time.Second @@ -824,74 +1117,106 @@ func TestRestCodeCoverage(t *testing.T) { p0.Tune(TestSize / 10) t.Logf("pool, after tuning capacity, capacity:%d, running:%d", p0.Cap(), p0.Running()) - pprem, _ := NewPool(TestSize, WithPreAlloc(true)) + p1, _ := NewPool(TestSize, WithPreAlloc(true)) defer func() { - _ = pprem.Submit(demoFunc) + _ = p1.Submit(demoFunc) }() - defer pprem.Release() + defer p1.Release() for i := 0; i < n; i++ { - _ = pprem.Submit(demoFunc) + _ = p1.Submit(demoFunc) } - t.Logf("pre-malloc pool, capacity:%d", pprem.Cap()) - t.Logf("pre-malloc pool, running workers number:%d", pprem.Running()) - t.Logf("pre-malloc pool, free workers number:%d", pprem.Free()) - pprem.Tune(TestSize) - pprem.Tune(TestSize / 10) - t.Logf("pre-malloc pool, after tuning capacity, capacity:%d, running:%d", pprem.Cap(), pprem.Running()) - - p, _ := NewPoolWithFunc(TestSize, demoPoolFunc) + t.Logf("pre-malloc pool, capacity:%d", p1.Cap()) + t.Logf("pre-malloc pool, running workers number:%d", p1.Running()) + t.Logf("pre-malloc pool, free workers number:%d", p1.Free()) + p1.Tune(TestSize) + p1.Tune(TestSize / 10) + t.Logf("pre-malloc pool, after tuning capacity, capacity:%d, running:%d", p1.Cap(), p1.Running()) + + p2, _ := NewPoolWithFunc(TestSize, demoPoolFunc) defer func() { - _ = p.Invoke(Param) + _ = p2.Invoke(Param) }() - defer p.Release() + defer p2.Release() for i := 0; i < n; i++ { - _ = p.Invoke(Param) + _ = p2.Invoke(Param) } time.Sleep(DefaultCleanIntervalTime) - t.Logf("pool with func, capacity:%d", p.Cap()) - t.Logf("pool with func, running workers number:%d", p.Running()) - t.Logf("pool with func, free workers number:%d", p.Free()) - p.Tune(TestSize) - p.Tune(TestSize / 10) - t.Logf("pool with func, after tuning capacity, capacity:%d, running:%d", p.Cap(), p.Running()) - - ppremWithFunc, _ := NewPoolWithFunc(TestSize, demoPoolFunc, WithPreAlloc(true)) + t.Logf("pool with func, capacity:%d", p2.Cap()) + t.Logf("pool with func, running workers number:%d", p2.Running()) + t.Logf("pool with func, free workers number:%d", p2.Free()) + p2.Tune(TestSize) + p2.Tune(TestSize / 10) + t.Logf("pool with func, after tuning capacity, capacity:%d, running:%d", p2.Cap(), p2.Running()) + + p3, _ := NewPoolWithFuncGeneric(TestSize, demoPoolFuncInt) defer func() { - _ = ppremWithFunc.Invoke(Param) + _ = p3.Invoke(Param) }() - defer ppremWithFunc.Release() + defer p3.Release() for i := 0; i < n; i++ { - _ = ppremWithFunc.Invoke(Param) + _ = p3.Invoke(Param) } time.Sleep(DefaultCleanIntervalTime) - t.Logf("pre-malloc pool with func, capacity:%d", ppremWithFunc.Cap()) - t.Logf("pre-malloc pool with func, running workers number:%d", ppremWithFunc.Running()) - t.Logf("pre-malloc pool with func, free workers number:%d", ppremWithFunc.Free()) - ppremWithFunc.Tune(TestSize) - ppremWithFunc.Tune(TestSize / 10) - t.Logf("pre-malloc pool with func, after tuning capacity, capacity:%d, running:%d", ppremWithFunc.Cap(), - ppremWithFunc.Running()) + t.Logf("pool with func, capacity:%d", p3.Cap()) + t.Logf("pool with func, running workers number:%d", p3.Running()) + t.Logf("pool with func, free workers number:%d", p3.Free()) + p3.Tune(TestSize) + p3.Tune(TestSize / 10) + t.Logf("pool with func, after tuning capacity, capacity:%d, running:%d", p3.Cap(), p3.Running()) + + p4, _ := NewPoolWithFunc(TestSize, demoPoolFunc, WithPreAlloc(true)) + defer func() { + _ = p4.Invoke(Param) + }() + defer p4.Release() + for i := 0; i < n; i++ { + _ = p4.Invoke(Param) + } + time.Sleep(DefaultCleanIntervalTime) + t.Logf("pre-malloc pool with func, capacity:%d", p4.Cap()) + t.Logf("pre-malloc pool with func, running workers number:%d", p4.Running()) + t.Logf("pre-malloc pool with func, free workers number:%d", p4.Free()) + p4.Tune(TestSize) + p4.Tune(TestSize / 10) + t.Logf("pre-malloc pool with func, after tuning capacity, capacity:%d, running:%d", p4.Cap(), + p4.Running()) + + p5, _ := NewPoolWithFuncGeneric(TestSize, demoPoolFuncInt, WithPreAlloc(true)) + defer func() { + _ = p5.Invoke(Param) + }() + defer p5.Release() + for i := 0; i < n; i++ { + _ = p5.Invoke(Param) + } + time.Sleep(DefaultCleanIntervalTime) + t.Logf("pre-malloc pool with func, capacity:%d", p5.Cap()) + t.Logf("pre-malloc pool with func, running workers number:%d", p5.Running()) + t.Logf("pre-malloc pool with func, free workers number:%d", p5.Free()) + p5.Tune(TestSize) + p5.Tune(TestSize / 10) + t.Logf("pre-malloc pool with func, after tuning capacity, capacity:%d, running:%d", p5.Cap(), + p5.Running()) } func TestPoolTuneScaleUp(t *testing.T) { c := make(chan struct{}) + // Test Pool p, _ := NewPool(2) for i := 0; i < 2; i++ { _ = p.Submit(func() { <-c }) } - if n := p.Running(); n != 2 { - t.Errorf("expect 2 workers running, but got %d", n) - } + n := p.Running() + require.EqualValuesf(t, 2, n, "expect 2 workers running, but got %d", p.Running()) // test pool tune scale up one p.Tune(3) _ = p.Submit(func() { <-c }) - if n := p.Running(); n != 3 { - t.Errorf("expect 3 workers running, but got %d", n) - } + n = p.Running() + require.EqualValuesf(t, 3, n, "expect 3 workers running, but got %d", n) // test pool tune scale up multiple var wg sync.WaitGroup for i := 0; i < 5; i++ { @@ -905,73 +1230,111 @@ func TestPoolTuneScaleUp(t *testing.T) { } p.Tune(8) wg.Wait() - if n := p.Running(); n != 8 { - t.Errorf("expect 8 workers running, but got %d", n) - } + n = p.Running() + require.EqualValuesf(t, 8, n, "expect 8 workers running, but got %d", n) for i := 0; i < 8; i++ { c <- struct{}{} } p.Release() - // test PoolWithFunc + // Test PoolWithFunc pf, _ := NewPoolWithFunc(2, func(_ any) { <-c }) for i := 0; i < 2; i++ { _ = pf.Invoke(1) } - if n := pf.Running(); n != 2 { - t.Errorf("expect 2 workers running, but got %d", n) - } + n = pf.Running() + require.EqualValuesf(t, 2, n, "expect 2 workers running, but got %d", n) // test pool tune scale up one pf.Tune(3) _ = pf.Invoke(1) - if n := pf.Running(); n != 3 { - t.Errorf("expect 3 workers running, but got %d", n) - } + n = pf.Running() + require.EqualValuesf(t, 3, n, "expect 3 workers running, but got %d", n) // test pool tune scale up multiple - var pfwg sync.WaitGroup for i := 0; i < 5; i++ { - pfwg.Add(1) + wg.Add(1) go func() { - defer pfwg.Done() + defer wg.Done() _ = pf.Invoke(1) }() } pf.Tune(8) - pfwg.Wait() - if n := pf.Running(); n != 8 { - t.Errorf("expect 8 workers running, but got %d", n) + wg.Wait() + n = pf.Running() + require.EqualValuesf(t, 8, n, "expect 8 workers running, but got %d", n) + for i := 0; i < 8; i++ { + c <- struct{}{} + } + pf.Release() + + // Test PoolWithFuncGeneric + pfg, _ := NewPoolWithFuncGeneric(2, func(_ int) { + <-c + }) + for i := 0; i < 2; i++ { + _ = pfg.Invoke(1) } + n = pfg.Running() + require.EqualValuesf(t, 2, n, "expect 2 workers running, but got %d", n) + // test pool tune scale up one + pfg.Tune(3) + _ = pfg.Invoke(1) + n = pfg.Running() + require.EqualValuesf(t, 3, n, "expect 3 workers running, but got %d", n) + // test pool tune scale up multiple + for i := 0; i < 5; i++ { + wg.Add(1) + go func() { + defer wg.Done() + _ = pfg.Invoke(1) + }() + } + pfg.Tune(8) + wg.Wait() + n = pfg.Running() + require.EqualValuesf(t, 8, n, "expect 8 workers running, but got %d", n) for i := 0; i < 8; i++ { c <- struct{}{} } close(c) - pf.Release() + pfg.Release() } func TestReleaseTimeout(t *testing.T) { - p, _ := NewPool(10) + p, err := NewPool(10) + require.NoError(t, err) for i := 0; i < 5; i++ { _ = p.Submit(func() { time.Sleep(time.Second) }) } - assert.NotZero(t, p.Running()) - err := p.ReleaseTimeout(2 * time.Second) - assert.NoError(t, err) + require.NotZero(t, p.Running()) + err = p.ReleaseTimeout(2 * time.Second) + require.NoError(t, err) - var pf *PoolWithFunc - pf, _ = NewPoolWithFunc(10, func(i any) { + pf, err := NewPoolWithFunc(10, func(i any) { dur := i.(time.Duration) time.Sleep(dur) }) + require.NoError(t, err) for i := 0; i < 5; i++ { _ = pf.Invoke(time.Second) } - assert.NotZero(t, pf.Running()) + require.NotZero(t, pf.Running()) err = pf.ReleaseTimeout(2 * time.Second) - assert.NoError(t, err) + require.NoError(t, err) + + pfg, err := NewPoolWithFuncGeneric(10, func(d time.Duration) { + time.Sleep(d) + }) + require.NoError(t, err) + for i := 0; i < 5; i++ { + _ = pfg.Invoke(time.Second) + } + require.NotZero(t, pfg.Running()) + err = pfg.ReleaseTimeout(2 * time.Second) + require.NoError(t, err) } func TestDefaultPoolReleaseTimeout(t *testing.T) { @@ -981,50 +1344,56 @@ func TestDefaultPoolReleaseTimeout(t *testing.T) { time.Sleep(time.Second) }) } - assert.NotZero(t, Running()) + require.NotZero(t, Running()) err := ReleaseTimeout(2 * time.Second) - assert.NoError(t, err) + require.NoError(t, err) } func TestMultiPool(t *testing.T) { - _, err := NewMultiPool(10, -1, 8) - assert.ErrorIs(t, err, ErrInvalidLoadBalancingStrategy) + _, err := NewMultiPool(-1, 10, 8) + require.ErrorIs(t, err, ErrInvalidMultiPoolSize) + _, err = NewMultiPool(10, -1, 8) + require.ErrorIs(t, err, ErrInvalidLoadBalancingStrategy) + _, err = NewMultiPool(10, 10, RoundRobin, WithExpiryDuration(-1)) + require.ErrorIs(t, err, ErrInvalidPoolExpiry) mp, err := NewMultiPool(10, 5, RoundRobin) testFn := func() { for i := 0; i < 50; i++ { err = mp.Submit(longRunningFunc) - assert.NoError(t, err) + require.NoError(t, err) } - assert.EqualValues(t, mp.Waiting(), 0) + require.EqualValues(t, mp.Waiting(), 0) _, err = mp.WaitingByIndex(-1) - assert.ErrorIs(t, err, ErrInvalidPoolIndex) + require.ErrorIs(t, err, ErrInvalidPoolIndex) _, err = mp.WaitingByIndex(11) - assert.ErrorIs(t, err, ErrInvalidPoolIndex) - assert.EqualValues(t, 50, mp.Running()) + require.ErrorIs(t, err, ErrInvalidPoolIndex) + require.EqualValues(t, 50, mp.Running()) _, err = mp.RunningByIndex(-1) - assert.ErrorIs(t, err, ErrInvalidPoolIndex) + require.ErrorIs(t, err, ErrInvalidPoolIndex) _, err = mp.RunningByIndex(11) - assert.ErrorIs(t, err, ErrInvalidPoolIndex) - assert.EqualValues(t, 0, mp.Free()) + require.ErrorIs(t, err, ErrInvalidPoolIndex) + require.EqualValues(t, 0, mp.Free()) _, err = mp.FreeByIndex(-1) - assert.ErrorIs(t, err, ErrInvalidPoolIndex) + require.ErrorIs(t, err, ErrInvalidPoolIndex) _, err = mp.FreeByIndex(11) - assert.ErrorIs(t, err, ErrInvalidPoolIndex) - assert.EqualValues(t, 50, mp.Cap()) - assert.False(t, mp.IsClosed()) + require.ErrorIs(t, err, ErrInvalidPoolIndex) + require.EqualValues(t, 50, mp.Cap()) + require.False(t, mp.IsClosed()) for i := 0; i < 10; i++ { n, _ := mp.WaitingByIndex(i) - assert.EqualValues(t, 0, n) + require.EqualValues(t, 0, n) n, _ = mp.RunningByIndex(i) - assert.EqualValues(t, 5, n) + require.EqualValues(t, 5, n) n, _ = mp.FreeByIndex(i) - assert.EqualValues(t, 0, n) + require.EqualValues(t, 0, n) } atomic.StoreInt32(&stopLongRunningFunc, 1) - assert.NoError(t, mp.ReleaseTimeout(3*time.Second)) - assert.Zero(t, mp.Running()) - assert.True(t, mp.IsClosed()) + require.NoError(t, mp.ReleaseTimeout(3*time.Second)) + require.ErrorIs(t, mp.ReleaseTimeout(3*time.Second), ErrPoolClosed) + require.ErrorIs(t, mp.Submit(nil), ErrPoolClosed) + require.Zero(t, mp.Running()) + require.True(t, mp.IsClosed()) atomic.StoreInt32(&stopLongRunningFunc, 0) } testFn() @@ -1042,45 +1411,52 @@ func TestMultiPool(t *testing.T) { } func TestMultiPoolWithFunc(t *testing.T) { - _, err := NewMultiPoolWithFunc(10, -1, longRunningPoolFunc, 8) - assert.ErrorIs(t, err, ErrInvalidLoadBalancingStrategy) + _, err := NewMultiPoolWithFunc(-1, 10, longRunningPoolFunc, 8) + require.ErrorIs(t, err, ErrInvalidMultiPoolSize) + _, err = NewMultiPoolWithFunc(10, -1, longRunningPoolFunc, 8) + require.ErrorIs(t, err, ErrInvalidLoadBalancingStrategy) + _, err = NewMultiPoolWithFunc(10, 10, longRunningPoolFunc, RoundRobin, WithExpiryDuration(-1)) + require.ErrorIs(t, err, ErrInvalidPoolExpiry) + ch := make(chan struct{}) mp, err := NewMultiPoolWithFunc(10, 5, longRunningPoolFunc, RoundRobin) testFn := func() { for i := 0; i < 50; i++ { - err = mp.Invoke(i) - assert.NoError(t, err) + err = mp.Invoke(ch) + require.NoError(t, err) } - assert.EqualValues(t, mp.Waiting(), 0) + require.EqualValues(t, mp.Waiting(), 0) _, err = mp.WaitingByIndex(-1) - assert.ErrorIs(t, err, ErrInvalidPoolIndex) + require.ErrorIs(t, err, ErrInvalidPoolIndex) _, err = mp.WaitingByIndex(11) - assert.ErrorIs(t, err, ErrInvalidPoolIndex) - assert.EqualValues(t, 50, mp.Running()) + require.ErrorIs(t, err, ErrInvalidPoolIndex) + require.EqualValues(t, 50, mp.Running()) _, err = mp.RunningByIndex(-1) - assert.ErrorIs(t, err, ErrInvalidPoolIndex) + require.ErrorIs(t, err, ErrInvalidPoolIndex) _, err = mp.RunningByIndex(11) - assert.ErrorIs(t, err, ErrInvalidPoolIndex) - assert.EqualValues(t, 0, mp.Free()) + require.ErrorIs(t, err, ErrInvalidPoolIndex) + require.EqualValues(t, 0, mp.Free()) _, err = mp.FreeByIndex(-1) - assert.ErrorIs(t, err, ErrInvalidPoolIndex) + require.ErrorIs(t, err, ErrInvalidPoolIndex) _, err = mp.FreeByIndex(11) - assert.ErrorIs(t, err, ErrInvalidPoolIndex) - assert.EqualValues(t, 50, mp.Cap()) - assert.False(t, mp.IsClosed()) + require.ErrorIs(t, err, ErrInvalidPoolIndex) + require.EqualValues(t, 50, mp.Cap()) + require.False(t, mp.IsClosed()) for i := 0; i < 10; i++ { n, _ := mp.WaitingByIndex(i) - assert.EqualValues(t, 0, n) + require.EqualValues(t, 0, n) n, _ = mp.RunningByIndex(i) - assert.EqualValues(t, 5, n) + require.EqualValues(t, 5, n) n, _ = mp.FreeByIndex(i) - assert.EqualValues(t, 0, n) + require.EqualValues(t, 0, n) } - atomic.StoreInt32(&stopLongRunningPoolFunc, 1) - assert.NoError(t, mp.ReleaseTimeout(3*time.Second)) - assert.Zero(t, mp.Running()) - assert.True(t, mp.IsClosed()) - atomic.StoreInt32(&stopLongRunningPoolFunc, 0) + close(ch) + require.NoError(t, mp.ReleaseTimeout(3*time.Second)) + require.ErrorIs(t, mp.ReleaseTimeout(3*time.Second), ErrPoolClosed) + require.ErrorIs(t, mp.Invoke(nil), ErrPoolClosed) + require.Zero(t, mp.Running()) + require.True(t, mp.IsClosed()) + ch = make(chan struct{}) } testFn() @@ -1095,3 +1471,65 @@ func TestMultiPoolWithFunc(t *testing.T) { mp.Tune(10) } + +func TestMultiPoolWithFuncGeneric(t *testing.T) { + _, err := NewMultiPoolWithFuncGeneric(-1, 10, longRunningPoolFuncCh, 8) + require.ErrorIs(t, err, ErrInvalidMultiPoolSize) + _, err = NewMultiPoolWithFuncGeneric(10, -1, longRunningPoolFuncCh, 8) + require.ErrorIs(t, err, ErrInvalidLoadBalancingStrategy) + _, err = NewMultiPoolWithFuncGeneric(10, 10, longRunningPoolFuncCh, RoundRobin, WithExpiryDuration(-1)) + require.ErrorIs(t, err, ErrInvalidPoolExpiry) + + ch := make(chan struct{}) + mp, err := NewMultiPoolWithFuncGeneric(10, 5, longRunningPoolFuncCh, RoundRobin) + testFn := func() { + for i := 0; i < 50; i++ { + err = mp.Invoke(ch) + require.NoError(t, err) + } + require.EqualValues(t, mp.Waiting(), 0) + _, err = mp.WaitingByIndex(-1) + require.ErrorIs(t, err, ErrInvalidPoolIndex) + _, err = mp.WaitingByIndex(11) + require.ErrorIs(t, err, ErrInvalidPoolIndex) + require.EqualValues(t, 50, mp.Running()) + _, err = mp.RunningByIndex(-1) + require.ErrorIs(t, err, ErrInvalidPoolIndex) + _, err = mp.RunningByIndex(11) + require.ErrorIs(t, err, ErrInvalidPoolIndex) + require.EqualValues(t, 0, mp.Free()) + _, err = mp.FreeByIndex(-1) + require.ErrorIs(t, err, ErrInvalidPoolIndex) + _, err = mp.FreeByIndex(11) + require.ErrorIs(t, err, ErrInvalidPoolIndex) + require.EqualValues(t, 50, mp.Cap()) + require.False(t, mp.IsClosed()) + for i := 0; i < 10; i++ { + n, _ := mp.WaitingByIndex(i) + require.EqualValues(t, 0, n) + n, _ = mp.RunningByIndex(i) + require.EqualValues(t, 5, n) + n, _ = mp.FreeByIndex(i) + require.EqualValues(t, 0, n) + } + close(ch) + require.NoError(t, mp.ReleaseTimeout(3*time.Second)) + require.ErrorIs(t, mp.ReleaseTimeout(3*time.Second), ErrPoolClosed) + require.ErrorIs(t, mp.Invoke(nil), ErrPoolClosed) + require.Zero(t, mp.Running()) + require.True(t, mp.IsClosed()) + ch = make(chan struct{}) + } + testFn() + + mp.Reboot() + testFn() + + mp, err = NewMultiPoolWithFuncGeneric(10, 5, longRunningPoolFuncCh, LeastTasks) + testFn() + + mp.Reboot() + testFn() + + mp.Tune(10) +} diff --git a/multipool.go b/multipool.go index 3f78ce2c..342b0383 100644 --- a/multipool.go +++ b/multipool.go @@ -25,6 +25,7 @@ package ants import ( "errors" "fmt" + "math" "strings" "sync/atomic" "time" @@ -58,6 +59,10 @@ type MultiPool struct { // NewMultiPool instantiates a MultiPool with a size of the pool list and a size // per pool, and the load-balancing strategy. func NewMultiPool(size, sizePerPool int, lbs LoadBalancingStrategy, options ...Option) (*MultiPool, error) { + if size <= 0 { + return nil, ErrInvalidMultiPoolSize + } + if lbs != RoundRobin && lbs != LeastTasks { return nil, ErrInvalidLoadBalancingStrategy } @@ -69,16 +74,13 @@ func NewMultiPool(size, sizePerPool int, lbs LoadBalancingStrategy, options ...O } pools[i] = pool } - return &MultiPool{pools: pools, lbs: lbs}, nil + return &MultiPool{pools: pools, index: math.MaxUint32, lbs: lbs}, nil } func (mp *MultiPool) next(lbs LoadBalancingStrategy) (idx int) { switch lbs { case RoundRobin: - if idx = int((atomic.AddUint32(&mp.index, 1) - 1) % uint32(len(mp.pools))); idx == -1 { - idx = 0 - } - return + return int(atomic.AddUint32(&mp.index, 1) % uint32(len(mp.pools))) case LeastTasks: leastTasks := 1<<31 - 1 for i, pool := range mp.pools { diff --git a/multipool_func.go b/multipool_func.go index ed7e1dc2..7b4b6e54 100644 --- a/multipool_func.go +++ b/multipool_func.go @@ -25,6 +25,7 @@ package ants import ( "errors" "fmt" + "math" "strings" "sync/atomic" "time" @@ -47,6 +48,10 @@ type MultiPoolWithFunc struct { // NewMultiPoolWithFunc instantiates a MultiPoolWithFunc with a size of the pool list and a size // per pool, and the load-balancing strategy. func NewMultiPoolWithFunc(size, sizePerPool int, fn func(any), lbs LoadBalancingStrategy, options ...Option) (*MultiPoolWithFunc, error) { + if size <= 0 { + return nil, ErrInvalidMultiPoolSize + } + if lbs != RoundRobin && lbs != LeastTasks { return nil, ErrInvalidLoadBalancingStrategy } @@ -58,16 +63,13 @@ func NewMultiPoolWithFunc(size, sizePerPool int, fn func(any), lbs LoadBalancing } pools[i] = pool } - return &MultiPoolWithFunc{pools: pools, lbs: lbs}, nil + return &MultiPoolWithFunc{pools: pools, index: math.MaxUint32, lbs: lbs}, nil } func (mp *MultiPoolWithFunc) next(lbs LoadBalancingStrategy) (idx int) { switch lbs { case RoundRobin: - if idx = int((atomic.AddUint32(&mp.index, 1) - 1) % uint32(len(mp.pools))); idx == -1 { - idx = 0 - } - return + return int(atomic.AddUint32(&mp.index, 1) % uint32(len(mp.pools))) case LeastTasks: leastTasks := 1<<31 - 1 for i, pool := range mp.pools { diff --git a/multipool_func_generic.go b/multipool_func_generic.go new file mode 100644 index 00000000..f5931e51 --- /dev/null +++ b/multipool_func_generic.go @@ -0,0 +1,215 @@ +// MIT License + +// Copyright (c) 2025 Andy Pan + +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package ants + +import ( + "errors" + "fmt" + "math" + "strings" + "sync/atomic" + "time" + + "golang.org/x/sync/errgroup" +) + +// MultiPoolWithFuncGeneric is the generic version of MultiPoolWithFunc. +type MultiPoolWithFuncGeneric[T any] struct { + pools []*PoolWithFuncGeneric[T] + index uint32 + state int32 + lbs LoadBalancingStrategy +} + +// NewMultiPoolWithFuncGeneric instantiates a MultiPoolWithFunc with a size of the pool list and a size +// per pool, and the load-balancing strategy. +func NewMultiPoolWithFuncGeneric[T any](size, sizePerPool int, fn func(T), lbs LoadBalancingStrategy, options ...Option) (*MultiPoolWithFuncGeneric[T], error) { + if size <= 0 { + return nil, ErrInvalidMultiPoolSize + } + + if lbs != RoundRobin && lbs != LeastTasks { + return nil, ErrInvalidLoadBalancingStrategy + } + pools := make([]*PoolWithFuncGeneric[T], size) + for i := 0; i < size; i++ { + pool, err := NewPoolWithFuncGeneric(sizePerPool, fn, options...) + if err != nil { + return nil, err + } + pools[i] = pool + } + return &MultiPoolWithFuncGeneric[T]{pools: pools, index: math.MaxUint32, lbs: lbs}, nil +} + +func (mp *MultiPoolWithFuncGeneric[T]) next(lbs LoadBalancingStrategy) (idx int) { + switch lbs { + case RoundRobin: + return int(atomic.AddUint32(&mp.index, 1) % uint32(len(mp.pools))) + case LeastTasks: + leastTasks := 1<<31 - 1 + for i, pool := range mp.pools { + if n := pool.Running(); n < leastTasks { + leastTasks = n + idx = i + } + } + return + } + return -1 +} + +// Invoke submits a task to a pool selected by the load-balancing strategy. +func (mp *MultiPoolWithFuncGeneric[T]) Invoke(args T) (err error) { + if mp.IsClosed() { + return ErrPoolClosed + } + + if err = mp.pools[mp.next(mp.lbs)].Invoke(args); err == nil { + return + } + if err == ErrPoolOverload && mp.lbs == RoundRobin { + return mp.pools[mp.next(LeastTasks)].Invoke(args) + } + return +} + +// Running returns the number of the currently running workers across all pools. +func (mp *MultiPoolWithFuncGeneric[T]) Running() (n int) { + for _, pool := range mp.pools { + n += pool.Running() + } + return +} + +// RunningByIndex returns the number of the currently running workers in the specific pool. +func (mp *MultiPoolWithFuncGeneric[T]) RunningByIndex(idx int) (int, error) { + if idx < 0 || idx >= len(mp.pools) { + return -1, ErrInvalidPoolIndex + } + return mp.pools[idx].Running(), nil +} + +// Free returns the number of available workers across all pools. +func (mp *MultiPoolWithFuncGeneric[T]) Free() (n int) { + for _, pool := range mp.pools { + n += pool.Free() + } + return +} + +// FreeByIndex returns the number of available workers in the specific pool. +func (mp *MultiPoolWithFuncGeneric[T]) FreeByIndex(idx int) (int, error) { + if idx < 0 || idx >= len(mp.pools) { + return -1, ErrInvalidPoolIndex + } + return mp.pools[idx].Free(), nil +} + +// Waiting returns the number of the currently waiting tasks across all pools. +func (mp *MultiPoolWithFuncGeneric[T]) Waiting() (n int) { + for _, pool := range mp.pools { + n += pool.Waiting() + } + return +} + +// WaitingByIndex returns the number of the currently waiting tasks in the specific pool. +func (mp *MultiPoolWithFuncGeneric[T]) WaitingByIndex(idx int) (int, error) { + if idx < 0 || idx >= len(mp.pools) { + return -1, ErrInvalidPoolIndex + } + return mp.pools[idx].Waiting(), nil +} + +// Cap returns the capacity of this multi-pool. +func (mp *MultiPoolWithFuncGeneric[T]) Cap() (n int) { + for _, pool := range mp.pools { + n += pool.Cap() + } + return +} + +// Tune resizes each pool in multi-pool. +// +// Note that this method doesn't resize the overall +// capacity of multi-pool. +func (mp *MultiPoolWithFuncGeneric[T]) Tune(size int) { + for _, pool := range mp.pools { + pool.Tune(size) + } +} + +// IsClosed indicates whether the multi-pool is closed. +func (mp *MultiPoolWithFuncGeneric[T]) IsClosed() bool { + return atomic.LoadInt32(&mp.state) == CLOSED +} + +// ReleaseTimeout closes the multi-pool with a timeout, +// it waits all pools to be closed before timing out. +func (mp *MultiPoolWithFuncGeneric[T]) ReleaseTimeout(timeout time.Duration) error { + if !atomic.CompareAndSwapInt32(&mp.state, OPENED, CLOSED) { + return ErrPoolClosed + } + + errCh := make(chan error, len(mp.pools)) + var wg errgroup.Group + for i, pool := range mp.pools { + func(p *PoolWithFuncGeneric[T], idx int) { + wg.Go(func() error { + err := p.ReleaseTimeout(timeout) + if err != nil { + err = fmt.Errorf("pool %d: %v", idx, err) + } + errCh <- err + return err + }) + }(pool, i) + } + + _ = wg.Wait() + + var errStr strings.Builder + for i := 0; i < len(mp.pools); i++ { + if err := <-errCh; err != nil { + errStr.WriteString(err.Error()) + errStr.WriteString(" | ") + } + } + + if errStr.Len() == 0 { + return nil + } + + return errors.New(strings.TrimSuffix(errStr.String(), " | ")) +} + +// Reboot reboots a released multi-pool. +func (mp *MultiPoolWithFuncGeneric[T]) Reboot() { + if atomic.CompareAndSwapInt32(&mp.state, CLOSED, OPENED) { + atomic.StoreUint32(&mp.index, 0) + for _, pool := range mp.pools { + pool.Reboot() + } + } +} diff --git a/pool_func.go b/pool_func.go index 70f5fae2..a181b43b 100644 --- a/pool_func.go +++ b/pool_func.go @@ -26,8 +26,8 @@ package ants type PoolWithFunc struct { *poolCommon - // poolFunc is the unified function for processing tasks. - poolFunc func(any) + // fn is the unified function for processing tasks. + fn func(any) } // Invoke passes arguments to the pool. @@ -36,14 +36,14 @@ type PoolWithFunc struct { // but what calls for special attention is that you will get blocked with the last // Pool.Invoke() call once the current Pool runs out of its capacity, and to avoid this, // you should instantiate a PoolWithFunc with ants.WithNonblocking(true). -func (p *PoolWithFunc) Invoke(args any) error { +func (p *PoolWithFunc) Invoke(arg any) error { if p.IsClosed() { return ErrPoolClosed } w, err := p.retrieveWorker() if w != nil { - w.inputParam(args) + w.inputArg(arg) } return err } @@ -61,13 +61,13 @@ func NewPoolWithFunc(size int, pf func(any), options ...Option) (*PoolWithFunc, pool := &PoolWithFunc{ poolCommon: pc, - poolFunc: pf, + fn: pf, } pool.workerCache.New = func() any { return &goWorkerWithFunc{ pool: pool, - args: make(chan any, workerChanCap), + arg: make(chan any, workerChanCap), } } diff --git a/pool_func_generic.go b/pool_func_generic.go new file mode 100644 index 00000000..06ed3cad --- /dev/null +++ b/pool_func_generic.go @@ -0,0 +1,71 @@ +// MIT License + +// Copyright (c) 2025 Andy Pan + +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package ants + +// PoolWithFuncGeneric is the generic version of PoolWithFunc. +type PoolWithFuncGeneric[T any] struct { + *poolCommon + + // fn is the unified function for processing tasks. + fn func(T) +} + +// Invoke passes the argument to the pool to start a new task. +func (p *PoolWithFuncGeneric[T]) Invoke(arg T) error { + if p.IsClosed() { + return ErrPoolClosed + } + + w, err := p.retrieveWorker() + if w != nil { + w.(*goWorkerWithFuncGeneric[T]).arg <- arg + } + return err +} + +// NewPoolWithFuncGeneric instantiates a PoolWithFuncGeneric[T] with customized options. +func NewPoolWithFuncGeneric[T any](size int, pf func(T), options ...Option) (*PoolWithFuncGeneric[T], error) { + if pf == nil { + return nil, ErrLackPoolFunc + } + + pc, err := newPool(size, options...) + if err != nil { + return nil, err + } + + pool := &PoolWithFuncGeneric[T]{ + poolCommon: pc, + fn: pf, + } + + pool.workerCache.New = func() any { + return &goWorkerWithFuncGeneric[T]{ + pool: pool, + arg: make(chan T, workerChanCap), + exit: make(chan struct{}, 1), + } + } + + return pool, nil +} diff --git a/worker.go b/worker.go index f8dd6506..03b4bd70 100644 --- a/worker.go +++ b/worker.go @@ -31,6 +31,8 @@ import ( // it starts a goroutine that accepts tasks and // performs function calls. type goWorker struct { + worker + // pool who owns this worker. pool *Pool @@ -64,11 +66,11 @@ func (w *goWorker) run() { w.pool.cond.Signal() }() - for f := range w.task { - if f == nil { + for fn := range w.task { + if fn == nil { return } - f() + fn() if ok := w.pool.revertWorker(w); !ok { return } @@ -91,7 +93,3 @@ func (w *goWorker) setLastUsedTime(t time.Time) { func (w *goWorker) inputFunc(fn func()) { w.task <- fn } - -func (w *goWorker) inputParam(any) { - panic("unreachable") -} diff --git a/worker_func.go b/worker_func.go index 76c697ac..8437e40d 100644 --- a/worker_func.go +++ b/worker_func.go @@ -31,11 +31,13 @@ import ( // it starts a goroutine that accepts tasks and // performs function calls. type goWorkerWithFunc struct { + worker + // pool who owns this worker. pool *PoolWithFunc - // args is a job should be done. - args chan any + // arg is the argument for the function. + arg chan any // lastUsed will be updated when putting a worker back into queue. lastUsed time.Time @@ -64,11 +66,11 @@ func (w *goWorkerWithFunc) run() { w.pool.cond.Signal() }() - for args := range w.args { - if args == nil { + for arg := range w.arg { + if arg == nil { return } - w.pool.poolFunc(args) + w.pool.fn(arg) if ok := w.pool.revertWorker(w); !ok { return } @@ -77,7 +79,7 @@ func (w *goWorkerWithFunc) run() { } func (w *goWorkerWithFunc) finish() { - w.args <- nil + w.arg <- nil } func (w *goWorkerWithFunc) lastUsedTime() time.Time { @@ -88,10 +90,6 @@ func (w *goWorkerWithFunc) setLastUsedTime(t time.Time) { w.lastUsed = t } -func (w *goWorkerWithFunc) inputFunc(func()) { - panic("unreachable") -} - -func (w *goWorkerWithFunc) inputParam(arg any) { - w.args <- arg +func (w *goWorkerWithFunc) inputArg(arg any) { + w.arg <- arg } diff --git a/worker_func_generic.go b/worker_func_generic.go new file mode 100644 index 00000000..a76d109c --- /dev/null +++ b/worker_func_generic.go @@ -0,0 +1,96 @@ +// MIT License + +// Copyright (c) 2025 Andy Pan + +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package ants + +import ( + "runtime/debug" + "time" +) + +// goWorkerWithFunc is the actual executor who runs the tasks, +// it starts a goroutine that accepts tasks and +// performs function calls. +type goWorkerWithFuncGeneric[T any] struct { + worker + + // pool who owns this worker. + pool *PoolWithFuncGeneric[T] + + // arg is a job should be done. + arg chan T + + // exit signals the goroutine to exit. + exit chan struct{} + + // lastUsed will be updated when putting a worker back into queue. + lastUsed time.Time +} + +// run starts a goroutine to repeat the process +// that performs the function calls. +func (w *goWorkerWithFuncGeneric[T]) run() { + w.pool.addRunning(1) + go func() { + defer func() { + if w.pool.addRunning(-1) == 0 && w.pool.IsClosed() { + w.pool.once.Do(func() { + close(w.pool.allDone) + }) + } + w.pool.workerCache.Put(w) + if p := recover(); p != nil { + if ph := w.pool.options.PanicHandler; ph != nil { + ph(p) + } else { + w.pool.options.Logger.Printf("worker exits from panic: %v\n%s\n", p, debug.Stack()) + } + } + // Call Signal() here in case there are goroutines waiting for available workers. + w.pool.cond.Signal() + }() + + for { + select { + case <-w.exit: + return + case arg := <-w.arg: + w.pool.fn(arg) + if ok := w.pool.revertWorker(w); !ok { + return + } + } + } + }() +} + +func (w *goWorkerWithFuncGeneric[T]) finish() { + w.exit <- struct{}{} +} + +func (w *goWorkerWithFuncGeneric[T]) lastUsedTime() time.Time { + return w.lastUsed +} + +func (w *goWorkerWithFuncGeneric[T]) setLastUsedTime(t time.Time) { + w.lastUsed = t +} diff --git a/worker_loop_queue.go b/worker_loop_queue.go index a5451ab5..52091f31 100644 --- a/worker_loop_queue.go +++ b/worker_loop_queue.go @@ -12,6 +12,9 @@ type loopQueue struct { } func newWorkerLoopQueue(size int) *loopQueue { + if size <= 0 { + return nil + } return &loopQueue{ items: make([]worker, size), size: size, @@ -39,10 +42,6 @@ func (wq *loopQueue) isEmpty() bool { } func (wq *loopQueue) insert(w worker) error { - if wq.size == 0 { - return errQueueIsReleased - } - if wq.isFull { return errQueueIsFull } diff --git a/worker_loop_queue_test.go b/worker_loop_queue_test.go index 755cf156..8e043946 100644 --- a/worker_loop_queue_test.go +++ b/worker_loop_queue_test.go @@ -6,15 +6,17 @@ import ( "testing" "time" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestNewLoopQueue(t *testing.T) { size := 100 q := newWorkerLoopQueue(size) - assert.EqualValues(t, 0, q.len(), "Len error") - assert.Equal(t, true, q.isEmpty(), "IsEmpty error") - assert.Nil(t, q.detach(), "Dequeue error") + require.EqualValues(t, 0, q.len(), "Len error") + require.Equal(t, true, q.isEmpty(), "IsEmpty error") + require.Nil(t, q.detach(), "Dequeue error") + + require.Nil(t, newWorkerLoopQueue(0)) } func TestLoopQueue(t *testing.T) { @@ -27,9 +29,9 @@ func TestLoopQueue(t *testing.T) { break } } - assert.EqualValues(t, 5, q.len(), "Len error") + require.EqualValues(t, 5, q.len(), "Len error") _ = q.detach() - assert.EqualValues(t, 4, q.len(), "Len error") + require.EqualValues(t, 4, q.len(), "Len error") time.Sleep(time.Second) @@ -39,13 +41,13 @@ func TestLoopQueue(t *testing.T) { break } } - assert.EqualValues(t, 10, q.len(), "Len error") + require.EqualValues(t, 10, q.len(), "Len error") err := q.insert(&goWorker{lastUsed: time.Now()}) - assert.Error(t, err, "Enqueue, error") + require.Error(t, err, "Enqueue, error") q.refresh(time.Second) - assert.EqualValuesf(t, 6, q.len(), "Len error: %d", q.len()) + require.EqualValuesf(t, 6, q.len(), "Len error: %d", q.len()) } func TestRotatedQueueSearch(t *testing.T) { @@ -57,18 +59,18 @@ func TestRotatedQueueSearch(t *testing.T) { _ = q.insert(&goWorker{lastUsed: time.Now()}) - assert.EqualValues(t, 0, q.binarySearch(time.Now()), "index should be 0") - assert.EqualValues(t, -1, q.binarySearch(expiry1), "index should be -1") + require.EqualValues(t, 0, q.binarySearch(time.Now()), "index should be 0") + require.EqualValues(t, -1, q.binarySearch(expiry1), "index should be -1") // 2 expiry2 := time.Now() _ = q.insert(&goWorker{lastUsed: time.Now()}) - assert.EqualValues(t, -1, q.binarySearch(expiry1), "index should be -1") + require.EqualValues(t, -1, q.binarySearch(expiry1), "index should be -1") - assert.EqualValues(t, 0, q.binarySearch(expiry2), "index should be 0") + require.EqualValues(t, 0, q.binarySearch(expiry2), "index should be 0") - assert.EqualValues(t, 1, q.binarySearch(time.Now()), "index should be 1") + require.EqualValues(t, 1, q.binarySearch(time.Now()), "index should be 1") // more for i := 0; i < 5; i++ { @@ -83,7 +85,7 @@ func TestRotatedQueueSearch(t *testing.T) { err = q.insert(&goWorker{lastUsed: time.Now()}) } - assert.EqualValues(t, 7, q.binarySearch(expiry3), "index should be 7") + require.EqualValues(t, 7, q.binarySearch(expiry3), "index should be 7") // rotate for i := 0; i < 6; i++ { @@ -98,7 +100,7 @@ func TestRotatedQueueSearch(t *testing.T) { } // head = 6, tail = 5, insert direction -> // [expiry4, time, time, time, time, nil/tail, time/head, time, time, time] - assert.EqualValues(t, 0, q.binarySearch(expiry4), "index should be 0") + require.EqualValues(t, 0, q.binarySearch(expiry4), "index should be 0") for i := 0; i < 3; i++ { _ = q.detach() @@ -108,17 +110,17 @@ func TestRotatedQueueSearch(t *testing.T) { // head = 6, tail = 5, insert direction -> // [expiry4, time, time, time, time, expiry5, nil/tail, nil, nil, time/head] - assert.EqualValues(t, 5, q.binarySearch(expiry5), "index should be 5") + require.EqualValues(t, 5, q.binarySearch(expiry5), "index should be 5") for i := 0; i < 3; i++ { _ = q.insert(&goWorker{lastUsed: time.Now()}) } // head = 9, tail = 9, insert direction -> // [expiry4, time, time, time, time, expiry5, time, time, time, time/head/tail] - assert.EqualValues(t, -1, q.binarySearch(expiry2), "index should be -1") + require.EqualValues(t, -1, q.binarySearch(expiry2), "index should be -1") - assert.EqualValues(t, 9, q.binarySearch(q.items[9].lastUsedTime()), "index should be 9") - assert.EqualValues(t, 8, q.binarySearch(time.Now()), "index should be 8") + require.EqualValues(t, 9, q.binarySearch(q.items[9].lastUsedTime()), "index should be 9") + require.EqualValues(t, 8, q.binarySearch(time.Now()), "index should be 8") } func TestRetrieveExpiry(t *testing.T) { @@ -139,7 +141,7 @@ func TestRetrieveExpiry(t *testing.T) { } workers := q.refresh(u) - assert.EqualValues(t, expirew, workers, "expired workers aren't right") + require.EqualValues(t, expirew, workers, "expired workers aren't right") // test [ time, time, time, time, time, time+1s, time+1s, time+1s, time+1s, time+1s] time.Sleep(u) @@ -152,7 +154,7 @@ func TestRetrieveExpiry(t *testing.T) { workers2 := q.refresh(u) - assert.EqualValues(t, expirew, workers2, "expired workers aren't right") + require.EqualValues(t, expirew, workers2, "expired workers aren't right") // test [ time+1s, time+1s, time+1s, nil, nil, time+1s, time+1s, time+1s, time+1s, time+1s] for i := 0; i < size/2; i++ { @@ -172,5 +174,5 @@ func TestRetrieveExpiry(t *testing.T) { workers3 := q.refresh(u) - assert.EqualValues(t, expirew, workers3, "expired workers aren't right") + require.EqualValues(t, expirew, workers3, "expired workers aren't right") } diff --git a/worker_queue.go b/worker_queue.go index 1c44ee64..4131972a 100644 --- a/worker_queue.go +++ b/worker_queue.go @@ -5,13 +5,8 @@ import ( "time" ) -var ( - // errQueueIsFull will be returned when the worker queue is full. - errQueueIsFull = errors.New("the queue is full") - - // errQueueIsReleased will be returned when trying to insert item to a released worker queue. - errQueueIsReleased = errors.New("the queue length is zero") -) +// errQueueIsFull will be returned when the worker queue is full. +var errQueueIsFull = errors.New("the queue is full") type worker interface { run() @@ -19,7 +14,7 @@ type worker interface { lastUsedTime() time.Time setLastUsedTime(t time.Time) inputFunc(func()) - inputParam(any) + inputArg(any) } type workerQueue interface { diff --git a/worker_stack.go b/worker_stack.go index 6b01abcd..8eb12ab7 100644 --- a/worker_stack.go +++ b/worker_stack.go @@ -13,57 +13,57 @@ func newWorkerStack(size int) *workerStack { } } -func (wq *workerStack) len() int { - return len(wq.items) +func (ws *workerStack) len() int { + return len(ws.items) } -func (wq *workerStack) isEmpty() bool { - return len(wq.items) == 0 +func (ws *workerStack) isEmpty() bool { + return len(ws.items) == 0 } -func (wq *workerStack) insert(w worker) error { - wq.items = append(wq.items, w) +func (ws *workerStack) insert(w worker) error { + ws.items = append(ws.items, w) return nil } -func (wq *workerStack) detach() worker { - l := wq.len() +func (ws *workerStack) detach() worker { + l := ws.len() if l == 0 { return nil } - w := wq.items[l-1] - wq.items[l-1] = nil // avoid memory leaks - wq.items = wq.items[:l-1] + w := ws.items[l-1] + ws.items[l-1] = nil // avoid memory leaks + ws.items = ws.items[:l-1] return w } -func (wq *workerStack) refresh(duration time.Duration) []worker { - n := wq.len() +func (ws *workerStack) refresh(duration time.Duration) []worker { + n := ws.len() if n == 0 { return nil } expiryTime := time.Now().Add(-duration) - index := wq.binarySearch(0, n-1, expiryTime) + index := ws.binarySearch(0, n-1, expiryTime) - wq.expiry = wq.expiry[:0] + ws.expiry = ws.expiry[:0] if index != -1 { - wq.expiry = append(wq.expiry, wq.items[:index+1]...) - m := copy(wq.items, wq.items[index+1:]) + ws.expiry = append(ws.expiry, ws.items[:index+1]...) + m := copy(ws.items, ws.items[index+1:]) for i := m; i < n; i++ { - wq.items[i] = nil + ws.items[i] = nil } - wq.items = wq.items[:m] + ws.items = ws.items[:m] } - return wq.expiry + return ws.expiry } -func (wq *workerStack) binarySearch(l, r int, expiryTime time.Time) int { +func (ws *workerStack) binarySearch(l, r int, expiryTime time.Time) int { for l <= r { mid := l + ((r - l) >> 1) // avoid overflow when computing mid - if expiryTime.Before(wq.items[mid].lastUsedTime()) { + if expiryTime.Before(ws.items[mid].lastUsedTime()) { r = mid - 1 } else { l = mid + 1 @@ -72,10 +72,10 @@ func (wq *workerStack) binarySearch(l, r int, expiryTime time.Time) int { return r } -func (wq *workerStack) reset() { - for i := 0; i < wq.len(); i++ { - wq.items[i].finish() - wq.items[i] = nil +func (ws *workerStack) reset() { + for i := 0; i < ws.len(); i++ { + ws.items[i].finish() + ws.items[i] = nil } - wq.items = wq.items[:0] + ws.items = ws.items[:0] } diff --git a/worker_stack_test.go b/worker_stack_test.go index 453d6e3a..87fca0d2 100644 --- a/worker_stack_test.go +++ b/worker_stack_test.go @@ -6,15 +6,15 @@ import ( "testing" "time" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestNewWorkerStack(t *testing.T) { size := 100 q := newWorkerStack(size) - assert.EqualValues(t, 0, q.len(), "Len error") - assert.Equal(t, true, q.isEmpty(), "IsEmpty error") - assert.Nil(t, q.detach(), "Dequeue error") + require.EqualValues(t, 0, q.len(), "Len error") + require.Equal(t, true, q.isEmpty(), "IsEmpty error") + require.Nil(t, q.detach(), "Dequeue error") } func TestWorkerStack(t *testing.T) { @@ -26,7 +26,7 @@ func TestWorkerStack(t *testing.T) { break } } - assert.EqualValues(t, 5, q.len(), "Len error") + require.EqualValues(t, 5, q.len(), "Len error") expired := time.Now() @@ -43,9 +43,9 @@ func TestWorkerStack(t *testing.T) { t.Fatal("Enqueue error") } } - assert.EqualValues(t, 12, q.len(), "Len error") + require.EqualValues(t, 12, q.len(), "Len error") q.refresh(time.Second) - assert.EqualValues(t, 6, q.len(), "Len error") + require.EqualValues(t, 6, q.len(), "Len error") } // It seems that something wrong with time.Now() on Windows, not sure whether it is a bug on Windows, @@ -58,18 +58,18 @@ func TestSearch(t *testing.T) { _ = q.insert(&goWorker{lastUsed: time.Now()}) - assert.EqualValues(t, 0, q.binarySearch(0, q.len()-1, time.Now()), "index should be 0") - assert.EqualValues(t, -1, q.binarySearch(0, q.len()-1, expiry1), "index should be -1") + require.EqualValues(t, 0, q.binarySearch(0, q.len()-1, time.Now()), "index should be 0") + require.EqualValues(t, -1, q.binarySearch(0, q.len()-1, expiry1), "index should be -1") // 2 expiry2 := time.Now() _ = q.insert(&goWorker{lastUsed: time.Now()}) - assert.EqualValues(t, -1, q.binarySearch(0, q.len()-1, expiry1), "index should be -1") + require.EqualValues(t, -1, q.binarySearch(0, q.len()-1, expiry1), "index should be -1") - assert.EqualValues(t, 0, q.binarySearch(0, q.len()-1, expiry2), "index should be 0") + require.EqualValues(t, 0, q.binarySearch(0, q.len()-1, expiry2), "index should be 0") - assert.EqualValues(t, 1, q.binarySearch(0, q.len()-1, time.Now()), "index should be 1") + require.EqualValues(t, 1, q.binarySearch(0, q.len()-1, time.Now()), "index should be 1") // more for i := 0; i < 5; i++ { @@ -84,5 +84,5 @@ func TestSearch(t *testing.T) { _ = q.insert(&goWorker{lastUsed: time.Now()}) } - assert.EqualValues(t, 7, q.binarySearch(0, q.len()-1, expiry3), "index should be 7") + require.EqualValues(t, 7, q.binarySearch(0, q.len()-1, expiry3), "index should be 7") } From 160ee0a8b2b5cb230eb9e8f1d3df4733dbe20920 Mon Sep 17 00:00:00 2001 From: Andy Pan Date: Sun, 12 Jan 2025 23:29:13 +0800 Subject: [PATCH 17/17] test: add some basic testable examples (#353) --- README.md | 203 ++------------------ README_ZH.md | 197 ++----------------- ants_benchmark_test.go | 16 +- ants_test.go | 394 +++++++++++++++++++------------------- example_test.go | 174 +++++++++++++++++ examples/main.go | 114 ----------- options.go | 22 +++ worker_loop_queue.go | 22 +++ worker_loop_queue_test.go | 27 ++- worker_queue.go | 22 +++ worker_stack.go | 22 +++ worker_stack_test.go | 27 ++- 12 files changed, 546 insertions(+), 694 deletions(-) create mode 100644 example_test.go delete mode 100644 examples/main.go diff --git a/README.md b/README.md index ccdff4ad..fc86e129 100644 --- a/README.md +++ b/README.md @@ -25,7 +25,7 @@ Library `ants` implements a goroutine pool with fixed capacity, managing and rec - Purging overdue goroutines periodically - Abundant APIs: submitting tasks, getting the number of running goroutines, tuning the capacity of the pool dynamically, releasing the pool, rebooting the pool, etc. - Handle panic gracefully to prevent programs from crash -- Efficient in memory usage and it may even achieve ***higher performance*** than unlimited goroutines in Golang +- Efficient in memory usage and it may even achieve ***higher performance*** than unlimited goroutines in Go - Nonblocking mechanism - Preallocated memory (ring buffer, optional) @@ -62,205 +62,30 @@ go get -u github.com/panjf2000/ants/v2 ``` ## 🛠 How to use -Just imagine that your program starts a massive number of goroutines, resulting in a huge consumption of memory. To mitigate that kind of situation, all you need to do is to import `ants` package and submit all your tasks to a default pool with fixed capacity, activated when package `ants` is imported: +Check out [the examples](https://pkg.go.dev/github.com/panjf2000/ants/v2#pkg-examples) for basic usage. -``` go -package main - -import ( - "fmt" - "sync" - "sync/atomic" - "time" - - "github.com/panjf2000/ants/v2" -) - -var sum int32 - -func myFunc(i any) { - n := i.(int32) - atomic.AddInt32(&sum, n) - fmt.Printf("run with %d\n", n) -} - -func demoFunc() { - time.Sleep(10 * time.Millisecond) - fmt.Println("Hello World!") -} - -func main() { - defer ants.Release() - - runTimes := 1000 - - // Use the common pool. - var wg sync.WaitGroup - syncCalculateSum := func() { - demoFunc() - wg.Done() - } - for i := 0; i < runTimes; i++ { - wg.Add(1) - _ = ants.Submit(syncCalculateSum) - } - wg.Wait() - fmt.Printf("running goroutines: %d\n", ants.Running()) - fmt.Printf("finish all tasks.\n") - - // Use the pool with a function, - // set 10 to the capacity of goroutine pool and 1 second for expired duration. - p, _ := ants.NewPoolWithFunc(10, func(i any) { - myFunc(i) - wg.Done() - }) - defer p.Release() - // Submit tasks one by one. - for i := 0; i < runTimes; i++ { - wg.Add(1) - _ = p.Invoke(int32(i)) - } - wg.Wait() - fmt.Printf("running goroutines: %d\n", p.Running()) - fmt.Printf("finish all tasks, result is %d\n", sum) - if sum != 499500 { - panic("the final result is wrong!!!") - } - - // Use the MultiPool and set the capacity of the 10 goroutine pools to unlimited. - // If you use -1 as the pool size parameter, the size will be unlimited. - // There are two load-balancing algorithms for pools: ants.RoundRobin and ants.LeastTasks. - mp, _ := ants.NewMultiPool(10, -1, ants.RoundRobin) - defer mp.ReleaseTimeout(5 * time.Second) - for i := 0; i < runTimes; i++ { - wg.Add(1) - _ = mp.Submit(syncCalculateSum) - } - wg.Wait() - fmt.Printf("running goroutines: %d\n", mp.Running()) - fmt.Printf("finish all tasks.\n") - - // Use the MultiPoolFunc and set the capacity of 10 goroutine pools to (runTimes/10). - mpf, _ := ants.NewMultiPoolWithFunc(10, runTimes/10, func(i any) { - myFunc(i) - wg.Done() - }, ants.LeastTasks) - defer mpf.ReleaseTimeout(5 * time.Second) - for i := 0; i < runTimes; i++ { - wg.Add(1) - _ = mpf.Invoke(int32(i)) - } - wg.Wait() - fmt.Printf("running goroutines: %d\n", mpf.Running()) - fmt.Printf("finish all tasks, result is %d\n", sum) - if sum != 499500*2 { - panic("the final result is wrong!!!") - } -} -``` - -### Functional options for ants pool +### Functional options for pool -```go -// Option represents the optional function. -type Option func(opts *Options) - -// Options contains all options which will be applied when instantiating a ants pool. -type Options struct { - // ExpiryDuration is a period for the scavenger goroutine to clean up those expired workers, - // the scavenger scans all workers every `ExpiryDuration` and clean up those workers that haven't been - // used for more than `ExpiryDuration`. - ExpiryDuration time.Duration - - // PreAlloc indicates whether to make memory pre-allocation when initializing Pool. - PreAlloc bool - - // Max number of goroutine blocking on pool.Submit. - // 0 (default value) means no such limit. - MaxBlockingTasks int - - // When Nonblocking is true, Pool.Submit will never be blocked. - // ErrPoolOverload will be returned when Pool.Submit cannot be done at once. - // When Nonblocking is true, MaxBlockingTasks is inoperative. - Nonblocking bool - - // PanicHandler is used to handle panics from each worker goroutine. - // if nil, panics will be thrown out again from worker goroutines. - PanicHandler func(any) - - // Logger is the customized logger for logging info, if it is not set, - // default standard logger from log package is used. - Logger Logger -} - -// WithOptions accepts the whole options config. -func WithOptions(options Options) Option { - return func(opts *Options) { - *opts = options - } -} - -// WithExpiryDuration sets up the interval time of cleaning up goroutines. -func WithExpiryDuration(expiryDuration time.Duration) Option { - return func(opts *Options) { - opts.ExpiryDuration = expiryDuration - } -} - -// WithPreAlloc indicates whether it should malloc for workers. -func WithPreAlloc(preAlloc bool) Option { - return func(opts *Options) { - opts.PreAlloc = preAlloc - } -} - -// WithMaxBlockingTasks sets up the maximum number of goroutines that are blocked when it reaches the capacity of pool. -func WithMaxBlockingTasks(maxBlockingTasks int) Option { - return func(opts *Options) { - opts.MaxBlockingTasks = maxBlockingTasks - } -} - -// WithNonblocking indicates that pool will return nil when there is no available workers. -func WithNonblocking(nonblocking bool) Option { - return func(opts *Options) { - opts.Nonblocking = nonblocking - } -} - -// WithPanicHandler sets up panic handler. -func WithPanicHandler(panicHandler func(any)) Option { - return func(opts *Options) { - opts.PanicHandler = panicHandler - } -} - -// WithLogger sets up a customized logger. -func WithLogger(logger Logger) Option { - return func(opts *Options) { - opts.Logger = logger - } -} -``` +`ants.Options`contains all optional configurations of the ants pool, which allows you to customize the goroutine pool by invoking option functions to set up each configuration in `NewPool`/`NewPoolWithFunc`/`NewPoolWithFuncGeneric` method. -`ants.Options`contains all optional configurations of the ants pool, which allows you to customize the goroutine pool by invoking option functions to set up each configuration in `NewPool`/`NewPoolWithFunc`method. +Check out [ants.Options](https://pkg.go.dev/github.com/panjf2000/ants/v2#Options) and [ants.Option](https://pkg.go.dev/github.com/panjf2000/ants/v2#Option) for more details. -### Customize limited pool +### Customize pool capacity -`ants` also supports customizing the capacity of the pool. You can invoke the `NewPool` method to instantiate a pool with a given capacity, as follows: +`ants` supports customizing the capacity of the pool. You can call the `NewPool` method to instantiate a `Pool` with a given capacity, as follows: ``` go p, _ := ants.NewPool(10000) ``` ### Submit tasks -Tasks can be submitted by calling `ants.Submit(func())` +Tasks can be submitted by calling `ants.Submit` ```go ants.Submit(func(){}) ``` -### Tune pool capacity in runtime -You can tune the capacity of `ants` pool in runtime with `Tune(int)`: +### Tune pool capacity at runtime +You can tune the capacity of `ants` pool at runtime with `ants.Tune`: ``` go pool.Tune(1000) // Tune its capacity to 1000 @@ -274,11 +99,11 @@ Don't worry about the contention problems in this case, the method here is threa `ants` allows you to pre-allocate the memory of the goroutine queue in the pool, which may get a performance enhancement under some special certain circumstances such as the scenario that requires a pool with ultra-large capacity, meanwhile, each task in goroutine lasts for a long time, in this case, pre-mallocing will reduce a lot of memory allocation in goroutine queue. ```go -// ants will pre-malloc the whole capacity of pool when you invoke this method +// ants will pre-malloc the whole capacity of pool when calling ants.NewPool. p, _ := ants.NewPool(100000, ants.WithPreAlloc(true)) ``` -### Release Pool +### Release pool ```go pool.Release() @@ -290,10 +115,10 @@ or pool.ReleaseTimeout(time.Second * 3) ``` -### Reboot Pool +### Reboot pool ```go -// A pool that has been released can be still used once you invoke the Reboot(). +// A pool that has been released can be still used after calling the Reboot(). pool.Reboot() ``` diff --git a/README_ZH.md b/README_ZH.md index 275e539c..e0c323c1 100644 --- a/README_ZH.md +++ b/README_ZH.md @@ -17,7 +17,7 @@ ## 📖 简介 -`ants`是一个高性能的 goroutine 池,实现了对大规模 goroutine 的调度管理、goroutine 复用,允许使用者在开发并发程序的时候限制 goroutine 数量,复用资源,达到更高效执行任务的效果。 +`ants` 是一个高性能的 goroutine 池,实现了对大规模 goroutine 的调度管理、goroutine 复用,允许使用者在开发并发程序的时候限制 goroutine 数量,复用资源,达到更高效执行任务的效果。 ## 🚀 功能: @@ -25,7 +25,7 @@ - 定期清理过期的 goroutines,进一步节省资源 - 提供了大量实用的接口:任务提交、获取运行中的 goroutine 数量、动态调整 Pool 大小、释放 Pool、重启 Pool 等 - 优雅处理 panic,防止程序崩溃 -- 资源复用,极大节省内存使用量;在大规模批量并发任务场景下甚至可能比原生 goroutine 并发具有***更高的性能*** +- 资源复用,极大节省内存使用量;在大规模批量并发任务场景下甚至可能比 Go 语言的无限制 goroutine 并发具有***更高的性能*** - 非阻塞机制 - 预分配内存 (环形队列,可选) @@ -62,192 +62,17 @@ go get -u github.com/panjf2000/ants/v2 ``` ## 🛠 使用 -写 go 并发程序的时候如果程序会启动大量的 goroutine ,势必会消耗大量的系统资源(内存,CPU),通过使用 `ants`,可以实例化一个 goroutine 池,复用 goroutine ,节省资源,提升性能: - -``` go -package main - -import ( - "fmt" - "sync" - "sync/atomic" - "time" - - "github.com/panjf2000/ants/v2" -) - -var sum int32 - -func myFunc(i any) { - n := i.(int32) - atomic.AddInt32(&sum, n) - fmt.Printf("run with %d\n", n) -} - -func demoFunc() { - time.Sleep(10 * time.Millisecond) - fmt.Println("Hello World!") -} - -func main() { - defer ants.Release() - - runTimes := 1000 - - // Use the common pool. - var wg sync.WaitGroup - syncCalculateSum := func() { - demoFunc() - wg.Done() - } - for i := 0; i < runTimes; i++ { - wg.Add(1) - _ = ants.Submit(syncCalculateSum) - } - wg.Wait() - fmt.Printf("running goroutines: %d\n", ants.Running()) - fmt.Printf("finish all tasks.\n") - - // Use the pool with a function, - // set 10 to the capacity of goroutine pool and 1 second for expired duration. - p, _ := ants.NewPoolWithFunc(10, func(i any) { - myFunc(i) - wg.Done() - }) - defer p.Release() - // Submit tasks one by one. - for i := 0; i < runTimes; i++ { - wg.Add(1) - _ = p.Invoke(int32(i)) - } - wg.Wait() - fmt.Printf("running goroutines: %d\n", p.Running()) - fmt.Printf("finish all tasks, result is %d\n", sum) - if sum != 499500 { - panic("the final result is wrong!!!") - } - - // Use the MultiPool and set the capacity of the 10 goroutine pools to unlimited. - // If you use -1 as the pool size parameter, the size will be unlimited. - // There are two load-balancing algorithms for pools: ants.RoundRobin and ants.LeastTasks. - mp, _ := ants.NewMultiPool(10, -1, ants.RoundRobin) - defer mp.ReleaseTimeout(5 * time.Second) - for i := 0; i < runTimes; i++ { - wg.Add(1) - _ = mp.Submit(syncCalculateSum) - } - wg.Wait() - fmt.Printf("running goroutines: %d\n", mp.Running()) - fmt.Printf("finish all tasks.\n") - - // Use the MultiPoolFunc and set the capacity of 10 goroutine pools to (runTimes/10). - mpf, _ := ants.NewMultiPoolWithFunc(10, runTimes/10, func(i any) { - myFunc(i) - wg.Done() - }, ants.LeastTasks) - defer mpf.ReleaseTimeout(5 * time.Second) - for i := 0; i < runTimes; i++ { - wg.Add(1) - _ = mpf.Invoke(int32(i)) - } - wg.Wait() - fmt.Printf("running goroutines: %d\n", mpf.Running()) - fmt.Printf("finish all tasks, result is %d\n", sum) - if sum != 499500*2 { - panic("the final result is wrong!!!") - } -} -``` +基本的使用请查看[示例](https://pkg.go.dev/github.com/panjf2000/ants/v2#pkg-examples). ### Pool 配置 -```go -// Option represents the optional function. -type Option func(opts *Options) - -// Options contains all options which will be applied when instantiating a ants pool. -type Options struct { - // ExpiryDuration is a period for the scavenger goroutine to clean up those expired workers, - // the scavenger scans all workers every `ExpiryDuration` and clean up those workers that haven't been - // used for more than `ExpiryDuration`. - ExpiryDuration time.Duration - - // PreAlloc indicates whether to make memory pre-allocation when initializing Pool. - PreAlloc bool - - // Max number of goroutine blocking on pool.Submit. - // 0 (default value) means no such limit. - MaxBlockingTasks int - - // When Nonblocking is true, Pool.Submit will never be blocked. - // ErrPoolOverload will be returned when Pool.Submit cannot be done at once. - // When Nonblocking is true, MaxBlockingTasks is inoperative. - Nonblocking bool - - // PanicHandler is used to handle panics from each worker goroutine. - // if nil, panics will be thrown out again from worker goroutines. - PanicHandler func(any) - - // Logger is the customized logger for logging info, if it is not set, - // default standard logger from log package is used. - Logger Logger -} - -// WithOptions accepts the whole options config. -func WithOptions(options Options) Option { - return func(opts *Options) { - *opts = options - } -} - -// WithExpiryDuration sets up the interval time of cleaning up goroutines. -func WithExpiryDuration(expiryDuration time.Duration) Option { - return func(opts *Options) { - opts.ExpiryDuration = expiryDuration - } -} - -// WithPreAlloc indicates whether it should malloc for workers. -func WithPreAlloc(preAlloc bool) Option { - return func(opts *Options) { - opts.PreAlloc = preAlloc - } -} - -// WithMaxBlockingTasks sets up the maximum number of goroutines that are blocked when it reaches the capacity of pool. -func WithMaxBlockingTasks(maxBlockingTasks int) Option { - return func(opts *Options) { - opts.MaxBlockingTasks = maxBlockingTasks - } -} - -// WithNonblocking indicates that pool will return nil when there is no available workers. -func WithNonblocking(nonblocking bool) Option { - return func(opts *Options) { - opts.Nonblocking = nonblocking - } -} - -// WithPanicHandler sets up panic handler. -func WithPanicHandler(panicHandler func(any)) Option { - return func(opts *Options) { - opts.PanicHandler = panicHandler - } -} - -// WithLogger sets up a customized logger. -func WithLogger(logger Logger) Option { - return func(opts *Options) { - opts.Logger = logger - } -} -``` +通过在调用 `NewPool`/`NewPoolWithFunc`/`NewPoolWithFuncGeneric` 之时使用各种 optional function,可以设置 `ants.Options` 中各个配置项的值,然后用它来定制化 goroutine pool。 -通过在调用`NewPool`/`NewPoolWithFunc`之时使用各种 optional function,可以设置`ants.Options`中各个配置项的值,然后用它来定制化 goroutine pool. +更多细节请查看 [ants.Options](https://pkg.go.dev/github.com/panjf2000/ants/v2#Options) 和 [ants.Option](https://pkg.go.dev/github.com/panjf2000/ants/v2#Option) -### 自定义池 -`ants`支持实例化使用者自己的一个 Pool ,指定具体的池容量;通过调用 `NewPool` 方法可以实例化一个新的带有指定容量的 Pool ,如下: +### 自定义 pool 容量 +`ants` 支持实例化使用者自己的一个 Pool,指定具体的 pool 容量;通过调用 `NewPool` 方法可以实例化一个新的带有指定容量的 `Pool`,如下: ``` go p, _ := ants.NewPool(10000) @@ -255,13 +80,13 @@ p, _ := ants.NewPool(10000) ### 任务提交 -提交任务通过调用 `ants.Submit(func())`方法: +提交任务通过调用 `ants.Submit` 方法: ```go ants.Submit(func(){}) ``` ### 动态调整 goroutine 池容量 -需要动态调整 goroutine 池容量可以通过调用`Tune(int)`: +需要动态调整 pool 容量可以通过调用 `ants.Tune`: ``` go pool.Tune(1000) // Tune its capacity to 1000 @@ -272,10 +97,10 @@ pool.Tune(100000) // Tune its capacity to 100000 ### 预先分配 goroutine 队列内存 -`ants`允许你预先把整个池的容量分配内存, 这个功能可以在某些特定的场景下提高 goroutine 池的性能。比如, 有一个场景需要一个超大容量的池,而且每个 goroutine 里面的任务都是耗时任务,这种情况下,预先分配 goroutine 队列内存将会减少不必要的内存重新分配。 +`ants` 支持预先为 pool 分配容量的内存, 这个功能可以在某些特定的场景下提高 goroutine 池的性能。比如, 有一个场景需要一个超大容量的池,而且每个 goroutine 里面的任务都是耗时任务,这种情况下,预先分配 goroutine 队列内存将会减少不必要的内存重新分配。 ```go -// ants will pre-malloc the whole capacity of pool when you invoke this function +// 提前分配的 pool 容量的内存空间 p, _ := ants.NewPool(100000, ants.WithPreAlloc(true)) ``` diff --git a/ants_benchmark_test.go b/ants_benchmark_test.go index 33b4c1e4..38e25dc0 100644 --- a/ants_benchmark_test.go +++ b/ants_benchmark_test.go @@ -20,7 +20,7 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. -package ants +package ants_test import ( "runtime" @@ -30,6 +30,8 @@ import ( "time" "golang.org/x/sync/errgroup" + + "github.com/panjf2000/ants/v2" ) const ( @@ -122,7 +124,7 @@ func BenchmarkErrGroup(b *testing.B) { func BenchmarkAntsPool(b *testing.B) { var wg sync.WaitGroup - p, _ := NewPool(PoolCap, WithExpiryDuration(DefaultExpiredTime)) + p, _ := ants.NewPool(PoolCap, ants.WithExpiryDuration(DefaultExpiredTime)) defer p.Release() b.ResetTimer() @@ -140,7 +142,7 @@ func BenchmarkAntsPool(b *testing.B) { func BenchmarkAntsMultiPool(b *testing.B) { var wg sync.WaitGroup - p, _ := NewMultiPool(10, PoolCap/10, RoundRobin, WithExpiryDuration(DefaultExpiredTime)) + p, _ := ants.NewMultiPool(10, PoolCap/10, ants.RoundRobin, ants.WithExpiryDuration(DefaultExpiredTime)) defer p.ReleaseTimeout(DefaultExpiredTime) //nolint:errcheck b.ResetTimer() @@ -178,7 +180,7 @@ func BenchmarkSemaphoreThroughput(b *testing.B) { } func BenchmarkAntsPoolThroughput(b *testing.B) { - p, _ := NewPool(PoolCap, WithExpiryDuration(DefaultExpiredTime)) + p, _ := ants.NewPool(PoolCap, ants.WithExpiryDuration(DefaultExpiredTime)) defer p.Release() b.ResetTimer() @@ -190,7 +192,7 @@ func BenchmarkAntsPoolThroughput(b *testing.B) { } func BenchmarkAntsMultiPoolThroughput(b *testing.B) { - p, _ := NewMultiPool(10, PoolCap/10, RoundRobin, WithExpiryDuration(DefaultExpiredTime)) + p, _ := ants.NewMultiPool(10, PoolCap/10, ants.RoundRobin, ants.WithExpiryDuration(DefaultExpiredTime)) defer p.ReleaseTimeout(DefaultExpiredTime) //nolint:errcheck b.ResetTimer() @@ -202,7 +204,7 @@ func BenchmarkAntsMultiPoolThroughput(b *testing.B) { } func BenchmarkParallelAntsPoolThroughput(b *testing.B) { - p, _ := NewPool(PoolCap, WithExpiryDuration(DefaultExpiredTime)) + p, _ := ants.NewPool(PoolCap, ants.WithExpiryDuration(DefaultExpiredTime)) defer p.Release() b.ResetTimer() @@ -214,7 +216,7 @@ func BenchmarkParallelAntsPoolThroughput(b *testing.B) { } func BenchmarkParallelAntsMultiPoolThroughput(b *testing.B) { - p, _ := NewMultiPool(10, PoolCap/10, RoundRobin, WithExpiryDuration(DefaultExpiredTime)) + p, _ := ants.NewMultiPool(10, PoolCap/10, ants.RoundRobin, ants.WithExpiryDuration(DefaultExpiredTime)) defer p.ReleaseTimeout(DefaultExpiredTime) //nolint:errcheck b.ResetTimer() diff --git a/ants_test.go b/ants_test.go index 316497d1..8fc1d5fd 100644 --- a/ants_test.go +++ b/ants_test.go @@ -20,7 +20,7 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. -package ants +package ants_test import ( "log" @@ -32,6 +32,8 @@ import ( "time" "github.com/stretchr/testify/require" + + "github.com/panjf2000/ants/v2" ) const ( @@ -52,7 +54,7 @@ var curMem uint64 // TestAntsPoolWaitToGetWorker is used to test waiting to get worker. func TestAntsPoolWaitToGetWorker(t *testing.T) { var wg sync.WaitGroup - p, _ := NewPool(AntsSize) + p, _ := ants.NewPool(AntsSize) defer p.Release() for i := 0; i < n; i++ { @@ -72,7 +74,7 @@ func TestAntsPoolWaitToGetWorker(t *testing.T) { func TestAntsPoolWaitToGetWorkerPreMalloc(t *testing.T) { var wg sync.WaitGroup - p, _ := NewPool(AntsSize, WithPreAlloc(true)) + p, _ := ants.NewPool(AntsSize, ants.WithPreAlloc(true)) defer p.Release() for i := 0; i < n; i++ { @@ -93,7 +95,7 @@ func TestAntsPoolWaitToGetWorkerPreMalloc(t *testing.T) { // TestAntsPoolWithFuncWaitToGetWorker is used to test waiting to get worker. func TestAntsPoolWithFuncWaitToGetWorker(t *testing.T) { var wg sync.WaitGroup - p, _ := NewPoolWithFunc(AntsSize, func(i any) { + p, _ := ants.NewPoolWithFunc(AntsSize, func(i any) { demoPoolFunc(i) wg.Done() }) @@ -114,7 +116,7 @@ func TestAntsPoolWithFuncWaitToGetWorker(t *testing.T) { // TestAntsPoolWithFuncGenericWaitToGetWorker is used to test waiting to get worker. func TestAntsPoolWithFuncGenericWaitToGetWorker(t *testing.T) { var wg sync.WaitGroup - p, _ := NewPoolWithFuncGeneric(AntsSize, func(i int) { + p, _ := ants.NewPoolWithFuncGeneric(AntsSize, func(i int) { demoPoolFuncInt(i) wg.Done() }) @@ -134,10 +136,10 @@ func TestAntsPoolWithFuncGenericWaitToGetWorker(t *testing.T) { func TestAntsPoolWithFuncWaitToGetWorkerPreMalloc(t *testing.T) { var wg sync.WaitGroup - p, _ := NewPoolWithFunc(AntsSize, func(i any) { + p, _ := ants.NewPoolWithFunc(AntsSize, func(i any) { demoPoolFunc(i) wg.Done() - }, WithPreAlloc(true)) + }, ants.WithPreAlloc(true)) defer p.Release() for i := 0; i < n; i++ { @@ -154,10 +156,10 @@ func TestAntsPoolWithFuncWaitToGetWorkerPreMalloc(t *testing.T) { func TestAntsPoolWithFuncGenericWaitToGetWorkerPreMalloc(t *testing.T) { var wg sync.WaitGroup - p, _ := NewPoolWithFuncGeneric(AntsSize, func(i int) { + p, _ := ants.NewPoolWithFuncGeneric(AntsSize, func(i int) { demoPoolFuncInt(i) wg.Done() - }, WithPreAlloc(true)) + }, ants.WithPreAlloc(true)) defer p.Release() for i := 0; i < n; i++ { @@ -174,13 +176,13 @@ func TestAntsPoolWithFuncGenericWaitToGetWorkerPreMalloc(t *testing.T) { // TestAntsPoolGetWorkerFromCache is used to test getting worker from sync.Pool. func TestAntsPoolGetWorkerFromCache(t *testing.T) { - p, _ := NewPool(TestSize) + p, _ := ants.NewPool(TestSize) defer p.Release() for i := 0; i < AntsSize; i++ { _ = p.Submit(demoFunc) } - time.Sleep(2 * DefaultCleanIntervalTime) + time.Sleep(2 * ants.DefaultCleanIntervalTime) _ = p.Submit(demoFunc) t.Logf("pool, running workers number:%d", p.Running()) mem := runtime.MemStats{} @@ -192,13 +194,13 @@ func TestAntsPoolGetWorkerFromCache(t *testing.T) { // TestAntsPoolWithFuncGetWorkerFromCache is used to test getting worker from sync.Pool. func TestAntsPoolWithFuncGetWorkerFromCache(t *testing.T) { dur := 10 - p, _ := NewPoolWithFunc(TestSize, demoPoolFunc) + p, _ := ants.NewPoolWithFunc(TestSize, demoPoolFunc) defer p.Release() for i := 0; i < AntsSize; i++ { _ = p.Invoke(dur) } - time.Sleep(2 * DefaultCleanIntervalTime) + time.Sleep(2 * ants.DefaultCleanIntervalTime) _ = p.Invoke(dur) t.Logf("pool with func, running workers number:%d", p.Running()) mem := runtime.MemStats{} @@ -210,13 +212,13 @@ func TestAntsPoolWithFuncGetWorkerFromCache(t *testing.T) { // TestAntsPoolWithFuncGenericGetWorkerFromCache is used to test getting worker from sync.Pool. func TestAntsPoolWithFuncGenericGetWorkerFromCache(t *testing.T) { dur := 10 - p, _ := NewPoolWithFuncGeneric(TestSize, demoPoolFuncInt) + p, _ := ants.NewPoolWithFuncGeneric(TestSize, demoPoolFuncInt) defer p.Release() for i := 0; i < AntsSize; i++ { _ = p.Invoke(dur) } - time.Sleep(2 * DefaultCleanIntervalTime) + time.Sleep(2 * ants.DefaultCleanIntervalTime) _ = p.Invoke(dur) t.Logf("pool with func, running workers number:%d", p.Running()) mem := runtime.MemStats{} @@ -227,13 +229,13 @@ func TestAntsPoolWithFuncGenericGetWorkerFromCache(t *testing.T) { func TestAntsPoolWithFuncGetWorkerFromCachePreMalloc(t *testing.T) { dur := 10 - p, _ := NewPoolWithFunc(TestSize, demoPoolFunc, WithPreAlloc(true)) + p, _ := ants.NewPoolWithFunc(TestSize, demoPoolFunc, ants.WithPreAlloc(true)) defer p.Release() for i := 0; i < AntsSize; i++ { _ = p.Invoke(dur) } - time.Sleep(2 * DefaultCleanIntervalTime) + time.Sleep(2 * ants.DefaultCleanIntervalTime) _ = p.Invoke(dur) t.Logf("pool with func, running workers number:%d", p.Running()) mem := runtime.MemStats{} @@ -244,13 +246,13 @@ func TestAntsPoolWithFuncGetWorkerFromCachePreMalloc(t *testing.T) { func TestAntsPoolWithFuncGenericGetWorkerFromCachePreMalloc(t *testing.T) { dur := 10 - p, _ := NewPoolWithFuncGeneric(TestSize, demoPoolFuncInt, WithPreAlloc(true)) + p, _ := ants.NewPoolWithFuncGeneric(TestSize, demoPoolFuncInt, ants.WithPreAlloc(true)) defer p.Release() for i := 0; i < AntsSize; i++ { _ = p.Invoke(dur) } - time.Sleep(2 * DefaultCleanIntervalTime) + time.Sleep(2 * ants.DefaultCleanIntervalTime) _ = p.Invoke(dur) t.Logf("pool with func, running workers number:%d", p.Running()) mem := runtime.MemStats{} @@ -279,20 +281,20 @@ func TestNoPool(t *testing.T) { } func TestAntsPool(t *testing.T) { - defer Release() + defer ants.Release() var wg sync.WaitGroup for i := 0; i < n; i++ { wg.Add(1) - _ = Submit(func() { + _ = ants.Submit(func() { demoFunc() wg.Done() }) } wg.Wait() - t.Logf("pool, capacity:%d", Cap()) - t.Logf("pool, running workers number:%d", Running()) - t.Logf("pool, free workers number:%d", Free()) + t.Logf("pool, capacity:%d", ants.Cap()) + t.Logf("pool, running workers number:%d", ants.Running()) + t.Logf("pool, free workers number:%d", ants.Free()) mem := runtime.MemStats{} runtime.ReadMemStats(&mem) @@ -303,7 +305,7 @@ func TestAntsPool(t *testing.T) { func TestPanicHandler(t *testing.T) { var panicCounter int64 var wg sync.WaitGroup - p0, err := NewPool(10, WithPanicHandler(func(p any) { + p0, err := ants.NewPool(10, ants.WithPanicHandler(func(p any) { defer wg.Done() atomic.AddInt64(&panicCounter, 1) t.Logf("catch panic with PanicHandler: %v", p) @@ -319,7 +321,7 @@ func TestPanicHandler(t *testing.T) { require.EqualValuesf(t, 1, c, "panic handler didn't work, panicCounter: %d", c) require.EqualValues(t, 0, p0.Running(), "pool should be empty after panic") - p1, err := NewPoolWithFunc(10, func(p any) { panic(p) }, WithPanicHandler(func(_ any) { + p1, err := ants.NewPoolWithFunc(10, func(p any) { panic(p) }, ants.WithPanicHandler(func(_ any) { defer wg.Done() atomic.AddInt64(&panicCounter, 1) })) @@ -332,7 +334,7 @@ func TestPanicHandler(t *testing.T) { require.EqualValuesf(t, 2, c, "panic handler didn't work, panicCounter: %d", c) require.EqualValues(t, 0, p1.Running(), "pool should be empty after panic") - p2, err := NewPoolWithFuncGeneric(10, func(s string) { panic(s) }, WithPanicHandler(func(_ any) { + p2, err := ants.NewPoolWithFuncGeneric(10, func(s string) { panic(s) }, ants.WithPanicHandler(func(_ any) { defer wg.Done() atomic.AddInt64(&panicCounter, 1) })) @@ -349,7 +351,7 @@ func TestPanicHandler(t *testing.T) { func TestPanicHandlerPreMalloc(t *testing.T) { var panicCounter int64 var wg sync.WaitGroup - p0, err := NewPool(10, WithPreAlloc(true), WithPanicHandler(func(p any) { + p0, err := ants.NewPool(10, ants.WithPreAlloc(true), ants.WithPanicHandler(func(p any) { defer wg.Done() atomic.AddInt64(&panicCounter, 1) t.Logf("catch panic with PanicHandler: %v", p) @@ -365,7 +367,7 @@ func TestPanicHandlerPreMalloc(t *testing.T) { require.EqualValuesf(t, 1, c, "panic handler didn't work, panicCounter: %d", c) require.EqualValues(t, 0, p0.Running(), "pool should be empty after panic") - p1, err := NewPoolWithFunc(10, func(p any) { panic(p) }, WithPreAlloc(true), WithPanicHandler(func(_ any) { + p1, err := ants.NewPoolWithFunc(10, func(p any) { panic(p) }, ants.WithPreAlloc(true), ants.WithPanicHandler(func(_ any) { defer wg.Done() atomic.AddInt64(&panicCounter, 1) })) @@ -378,7 +380,7 @@ func TestPanicHandlerPreMalloc(t *testing.T) { require.EqualValuesf(t, 2, c, "panic handler didn't work, panicCounter: %d", c) require.EqualValues(t, 0, p1.Running(), "pool should be empty after panic") - p2, err := NewPoolWithFuncGeneric(10, func(p string) { panic(p) }, WithPreAlloc(true), WithPanicHandler(func(_ any) { + p2, err := ants.NewPoolWithFuncGeneric(10, func(p string) { panic(p) }, ants.WithPreAlloc(true), ants.WithPanicHandler(func(_ any) { defer wg.Done() atomic.AddInt64(&panicCounter, 1) })) @@ -393,40 +395,40 @@ func TestPanicHandlerPreMalloc(t *testing.T) { } func TestPoolPanicWithoutHandler(t *testing.T) { - p0, err := NewPool(10) + p0, err := ants.NewPool(10) require.NoErrorf(t, err, "create new pool failed: %v", err) defer p0.Release() _ = p0.Submit(func() { panic("Oops!") }) - p1, err := NewPoolWithFunc(10, func(p any) { panic(p) }) + p1, err := ants.NewPoolWithFunc(10, func(p any) { panic(p) }) require.NoErrorf(t, err, "create new pool with func failed: %v", err) defer p1.Release() _ = p1.Invoke("Oops!") - p2, err := NewPoolWithFuncGeneric(10, func(p string) { panic(p) }) + p2, err := ants.NewPoolWithFuncGeneric(10, func(p string) { panic(p) }) require.NoErrorf(t, err, "create new pool with func failed: %v", err) defer p2.Release() _ = p2.Invoke("Oops!") } func TestPoolPanicWithoutHandlerPreMalloc(t *testing.T) { - p0, err := NewPool(10, WithPreAlloc(true)) + p0, err := ants.NewPool(10, ants.WithPreAlloc(true)) require.NoErrorf(t, err, "create new pool failed: %v", err) defer p0.Release() _ = p0.Submit(func() { panic("Oops!") }) - p1, err := NewPoolWithFunc(10, func(p any) { + p1, err := ants.NewPoolWithFunc(10, func(p any) { panic(p) }) require.NoErrorf(t, err, "create new pool with func failed: %v", err) defer p1.Release() _ = p1.Invoke("Oops!") - p2, err := NewPoolWithFuncGeneric(10, func(p any) { + p2, err := ants.NewPoolWithFuncGeneric(10, func(p any) { panic(p) }) require.NoErrorf(t, err, "create new pool with func failed: %v", err) @@ -438,7 +440,7 @@ func TestPurgePool(t *testing.T) { size := 500 ch := make(chan struct{}) - p, err := NewPool(size) + p, err := ants.NewPool(size) require.NoErrorf(t, err, "create TimingPool failed: %v", err) defer p.Release() @@ -450,11 +452,11 @@ func TestPurgePool(t *testing.T) { time.Sleep(time.Duration(d) * time.Millisecond) }) } - require.Equalf(t, size, p.Running(), "pool should be full, expected: %d, but got: %d", size, p.Running()) + require.EqualValuesf(t, size, p.Running(), "pool should be full, expected: %d, but got: %d", size, p.Running()) close(ch) - time.Sleep(5 * DefaultCleanIntervalTime) - require.Equalf(t, 0, p.Running(), "pool should be empty after purge, but got %d", p.Running()) + time.Sleep(5 * ants.DefaultCleanIntervalTime) + require.EqualValuesf(t, 0, p.Running(), "pool should be empty after purge, but got %d", p.Running()) ch = make(chan struct{}) f := func(i any) { @@ -463,18 +465,18 @@ func TestPurgePool(t *testing.T) { time.Sleep(time.Duration(d) * time.Millisecond) } - p1, err := NewPoolWithFunc(size, f) + p1, err := ants.NewPoolWithFunc(size, f) require.NoErrorf(t, err, "create TimingPoolWithFunc failed: %v", err) defer p1.Release() for i := 0; i < size; i++ { _ = p1.Invoke(i) } - require.Equalf(t, size, p1.Running(), "pool should be full, expected: %d, but got: %d", size, p1.Running()) + require.EqualValuesf(t, size, p1.Running(), "pool should be full, expected: %d, but got: %d", size, p1.Running()) close(ch) - time.Sleep(5 * DefaultCleanIntervalTime) - require.Equalf(t, 0, p1.Running(), "pool should be empty after purge, but got %d", p1.Running()) + time.Sleep(5 * ants.DefaultCleanIntervalTime) + require.EqualValuesf(t, 0, p1.Running(), "pool should be empty after purge, but got %d", p1.Running()) ch = make(chan struct{}) f1 := func(i int) { @@ -483,46 +485,46 @@ func TestPurgePool(t *testing.T) { time.Sleep(time.Duration(d) * time.Millisecond) } - p2, err := NewPoolWithFuncGeneric(size, f1) + p2, err := ants.NewPoolWithFuncGeneric(size, f1) require.NoErrorf(t, err, "create TimingPoolWithFunc failed: %v", err) defer p2.Release() for i := 0; i < size; i++ { _ = p2.Invoke(i) } - require.Equalf(t, size, p2.Running(), "pool should be full, expected: %d, but got: %d", size, p2.Running()) + require.EqualValuesf(t, size, p2.Running(), "pool should be full, expected: %d, but got: %d", size, p2.Running()) close(ch) - time.Sleep(5 * DefaultCleanIntervalTime) - require.Equalf(t, 0, p2.Running(), "pool should be empty after purge, but got %d", p2.Running()) + time.Sleep(5 * ants.DefaultCleanIntervalTime) + require.EqualValuesf(t, 0, p2.Running(), "pool should be empty after purge, but got %d", p2.Running()) } func TestPurgePreMallocPool(t *testing.T) { - p, err := NewPool(10, WithPreAlloc(true)) + p, err := ants.NewPool(10, ants.WithPreAlloc(true)) require.NoErrorf(t, err, "create TimingPool failed: %v", err) defer p.Release() _ = p.Submit(demoFunc) - time.Sleep(3 * DefaultCleanIntervalTime) + time.Sleep(3 * ants.DefaultCleanIntervalTime) require.EqualValues(t, 0, p.Running(), "all p should be purged") - p1, err := NewPoolWithFunc(10, demoPoolFunc) + p1, err := ants.NewPoolWithFunc(10, demoPoolFunc) require.NoErrorf(t, err, "create TimingPoolWithFunc failed: %v", err) defer p1.Release() _ = p1.Invoke(1) - time.Sleep(3 * DefaultCleanIntervalTime) + time.Sleep(3 * ants.DefaultCleanIntervalTime) require.EqualValues(t, 0, p1.Running(), "all p should be purged") - p2, err := NewPoolWithFuncGeneric(10, demoPoolFuncInt) + p2, err := ants.NewPoolWithFuncGeneric(10, demoPoolFuncInt) require.NoErrorf(t, err, "create TimingPoolWithFunc failed: %v", err) defer p2.Release() _ = p2.Invoke(1) - time.Sleep(3 * DefaultCleanIntervalTime) + time.Sleep(3 * ants.DefaultCleanIntervalTime) require.EqualValues(t, 0, p2.Running(), "all p should be purged") } func TestNonblockingSubmit(t *testing.T) { poolSize := 10 - p, err := NewPool(poolSize, WithNonblocking(true)) + p, err := ants.NewPool(poolSize, ants.WithNonblocking(true)) require.NoErrorf(t, err, "create TimingPool failed: %v", err) defer p.Release() for i := 0; i < poolSize-1; i++ { @@ -536,8 +538,8 @@ func TestNonblockingSubmit(t *testing.T) { } // p is full now. require.NoError(t, p.Submit(f), "nonblocking submit when pool is not full shouldn't return error") - require.ErrorIsf(t, p.Submit(demoFunc), ErrPoolOverload, - "nonblocking submit when pool is full should get an ErrPoolOverload") + require.ErrorIsf(t, p.Submit(demoFunc), ants.ErrPoolOverload, + "nonblocking submit when pool is full should get an ants.ErrPoolOverload") // interrupt f to get an available worker close(ch) <-ch1 @@ -546,7 +548,7 @@ func TestNonblockingSubmit(t *testing.T) { func TestMaxBlockingSubmit(t *testing.T) { poolSize := 10 - p, err := NewPool(poolSize, WithMaxBlockingTasks(1)) + p, err := ants.NewPool(poolSize, ants.WithMaxBlockingTasks(1)) require.NoErrorf(t, err, "create TimingPool failed: %v", err) defer p.Release() for i := 0; i < poolSize-1; i++ { @@ -570,8 +572,8 @@ func TestMaxBlockingSubmit(t *testing.T) { }() time.Sleep(1 * time.Second) // already reached max blocking limit - require.ErrorIsf(t, p.Submit(demoFunc), ErrPoolOverload, - "blocking submit when pool reach max blocking submit should return ErrPoolOverload") + require.ErrorIsf(t, p.Submit(demoFunc), ants.ErrPoolOverload, + "blocking submit when pool reach max blocking submit should return ants.ErrPoolOverload") // interrupt f to make blocking submit successful. close(ch) wg.Wait() @@ -586,10 +588,10 @@ func TestNonblockingSubmitWithFunc(t *testing.T) { poolSize := 10 ch := make(chan struct{}) var wg sync.WaitGroup - p, err := NewPoolWithFunc(poolSize, func(i any) { + p, err := ants.NewPoolWithFunc(poolSize, func(i any) { longRunningPoolFunc(i) wg.Done() - }, WithNonblocking(true)) + }, ants.WithNonblocking(true)) require.NoError(t, err, "create TimingPool failed: %v", err) defer p.Release() wg.Add(poolSize) @@ -598,8 +600,8 @@ func TestNonblockingSubmitWithFunc(t *testing.T) { } // p is full now. require.NoError(t, p.Invoke(ch), "nonblocking submit when pool is not full shouldn't return error") - require.ErrorIsf(t, p.Invoke(nil), ErrPoolOverload, - "nonblocking submit when pool is full should get an ErrPoolOverload") + require.ErrorIsf(t, p.Invoke(nil), ants.ErrPoolOverload, + "nonblocking submit when pool is full should get an ants.ErrPoolOverload") // interrupt f to get an available worker close(ch) wg.Wait() @@ -611,10 +613,10 @@ func TestNonblockingSubmitWithFunc(t *testing.T) { func TestNonblockingSubmitWithFuncGeneric(t *testing.T) { poolSize := 10 var wg sync.WaitGroup - p, err := NewPoolWithFuncGeneric(poolSize, func(ch chan struct{}) { + p, err := ants.NewPoolWithFuncGeneric(poolSize, func(ch chan struct{}) { longRunningPoolFuncCh(ch) wg.Done() - }, WithNonblocking(true)) + }, ants.WithNonblocking(true)) require.NoError(t, err, "create TimingPool failed: %v", err) defer p.Release() ch := make(chan struct{}) @@ -624,8 +626,8 @@ func TestNonblockingSubmitWithFuncGeneric(t *testing.T) { } // p is full now. require.NoError(t, p.Invoke(ch), "nonblocking submit when pool is not full shouldn't return error") - require.ErrorIsf(t, p.Invoke(nil), ErrPoolOverload, - "nonblocking submit when pool is full should get an ErrPoolOverload") + require.ErrorIsf(t, p.Invoke(nil), ants.ErrPoolOverload, + "nonblocking submit when pool is full should get an ants.ErrPoolOverload") // interrupt f to get an available worker close(ch) wg.Wait() @@ -637,7 +639,7 @@ func TestNonblockingSubmitWithFuncGeneric(t *testing.T) { func TestMaxBlockingSubmitWithFunc(t *testing.T) { ch := make(chan struct{}) poolSize := 10 - p, err := NewPoolWithFunc(poolSize, longRunningPoolFunc, WithMaxBlockingTasks(1)) + p, err := ants.NewPoolWithFunc(poolSize, longRunningPoolFunc, ants.WithMaxBlockingTasks(1)) require.NoError(t, err, "create TimingPool failed: %v", err) defer p.Release() for i := 0; i < poolSize-1; i++ { @@ -657,8 +659,8 @@ func TestMaxBlockingSubmitWithFunc(t *testing.T) { }() time.Sleep(1 * time.Second) // already reached max blocking limit - require.ErrorIsf(t, p.Invoke(ch), ErrPoolOverload, - "blocking submit when pool reach max blocking submit should return ErrPoolOverload: %v", err) + require.ErrorIsf(t, p.Invoke(ch), ants.ErrPoolOverload, + "blocking submit when pool reach max blocking submit should return ants.ErrPoolOverload: %v", err) // interrupt one func to make blocking submit successful. close(ch) wg.Wait() @@ -671,7 +673,7 @@ func TestMaxBlockingSubmitWithFunc(t *testing.T) { func TestMaxBlockingSubmitWithFuncGeneric(t *testing.T) { poolSize := 10 - p, err := NewPoolWithFuncGeneric(poolSize, longRunningPoolFuncCh, WithMaxBlockingTasks(1)) + p, err := ants.NewPoolWithFuncGeneric(poolSize, longRunningPoolFuncCh, ants.WithMaxBlockingTasks(1)) require.NoError(t, err, "create TimingPool failed: %v", err) defer p.Release() ch := make(chan struct{}) @@ -692,8 +694,8 @@ func TestMaxBlockingSubmitWithFuncGeneric(t *testing.T) { }() time.Sleep(1 * time.Second) // already reached max blocking limit - require.ErrorIsf(t, p.Invoke(ch), ErrPoolOverload, - "blocking submit when pool reach max blocking submit should return ErrPoolOverload: %v", err) + require.ErrorIsf(t, p.Invoke(ch), ants.ErrPoolOverload, + "blocking submit when pool reach max blocking submit should return ants.ErrPoolOverload: %v", err) // interrupt one func to make blocking submit successful. close(ch) wg.Wait() @@ -705,26 +707,26 @@ func TestMaxBlockingSubmitWithFuncGeneric(t *testing.T) { } func TestRebootDefaultPool(t *testing.T) { - defer Release() - Reboot() // should do nothing inside + defer ants.Release() + ants.Reboot() // should do nothing inside var wg sync.WaitGroup wg.Add(1) - _ = Submit(func() { + _ = ants.Submit(func() { demoFunc() wg.Done() }) wg.Wait() - require.NoError(t, ReleaseTimeout(time.Second)) - require.ErrorIsf(t, Submit(nil), ErrPoolClosed, "pool should be closed") - Reboot() + require.NoError(t, ants.ReleaseTimeout(time.Second)) + require.ErrorIsf(t, ants.Submit(nil), ants.ErrPoolClosed, "pool should be closed") + ants.Reboot() wg.Add(1) - require.NoError(t, Submit(func() { wg.Done() }), "pool should be rebooted") + require.NoError(t, ants.Submit(func() { wg.Done() }), "pool should be rebooted") wg.Wait() } func TestRebootNewPool(t *testing.T) { var wg sync.WaitGroup - p, err := NewPool(10) + p, err := ants.NewPool(10) require.NoErrorf(t, err, "create Pool failed: %v", err) defer p.Release() wg.Add(1) @@ -734,13 +736,13 @@ func TestRebootNewPool(t *testing.T) { }) wg.Wait() require.NoError(t, p.ReleaseTimeout(time.Second)) - require.ErrorIsf(t, p.Submit(nil), ErrPoolClosed, "pool should be closed") + require.ErrorIsf(t, p.Submit(nil), ants.ErrPoolClosed, "pool should be closed") p.Reboot() wg.Add(1) require.NoError(t, p.Submit(func() { wg.Done() }), "pool should be rebooted") wg.Wait() - p1, err := NewPoolWithFunc(10, func(i any) { + p1, err := ants.NewPoolWithFunc(10, func(i any) { demoPoolFunc(i) wg.Done() }) @@ -750,13 +752,13 @@ func TestRebootNewPool(t *testing.T) { _ = p1.Invoke(1) wg.Wait() require.NoError(t, p1.ReleaseTimeout(time.Second)) - require.ErrorIsf(t, p1.Invoke(nil), ErrPoolClosed, "pool should be closed") + require.ErrorIsf(t, p1.Invoke(nil), ants.ErrPoolClosed, "pool should be closed") p1.Reboot() wg.Add(1) require.NoError(t, p1.Invoke(1), "pool should be rebooted") wg.Wait() - p2, err := NewPoolWithFuncGeneric(10, func(i int) { + p2, err := ants.NewPoolWithFuncGeneric(10, func(i int) { demoPoolFuncInt(i) wg.Done() }) @@ -766,7 +768,7 @@ func TestRebootNewPool(t *testing.T) { _ = p2.Invoke(1) wg.Wait() require.NoError(t, p2.ReleaseTimeout(time.Second)) - require.ErrorIsf(t, p2.Invoke(1), ErrPoolClosed, "pool should be closed") + require.ErrorIsf(t, p2.Invoke(1), ants.ErrPoolClosed, "pool should be closed") p2.Reboot() wg.Add(1) require.NoError(t, p2.Invoke(1), "pool should be rebooted") @@ -775,7 +777,7 @@ func TestRebootNewPool(t *testing.T) { func TestInfinitePool(t *testing.T) { c := make(chan struct{}) - p, _ := NewPool(-1) + p, _ := ants.NewPool(-1) _ = p.Submit(func() { _ = p.Submit(func() { <-c @@ -793,11 +795,11 @@ func TestInfinitePool(t *testing.T) { t.Fatalf("expect capacity: -1 but got %d", capacity) } var err error - _, err = NewPool(-1, WithPreAlloc(true)) - require.EqualErrorf(t, err, ErrInvalidPreAllocSize.Error(), "") + _, err = ants.NewPool(-1, ants.WithPreAlloc(true)) + require.ErrorIs(t, err, ants.ErrInvalidPreAllocSize) } -func testPoolWithDisablePurge(t *testing.T, p *Pool, numWorker int, waitForPurge time.Duration) { +func testPoolWithDisablePurge(t *testing.T, p *ants.Pool, numWorker int, waitForPurge time.Duration) { sig := make(chan struct{}) var wg1, wg2 sync.WaitGroup wg1.Add(numWorker) @@ -838,18 +840,18 @@ func testPoolWithDisablePurge(t *testing.T, p *Pool, numWorker int, waitForPurge func TestWithDisablePurgePool(t *testing.T) { numWorker := 10 - p, _ := NewPool(numWorker, WithDisablePurge(true)) - testPoolWithDisablePurge(t, p, numWorker, DefaultCleanIntervalTime) + p, _ := ants.NewPool(numWorker, ants.WithDisablePurge(true)) + testPoolWithDisablePurge(t, p, numWorker, ants.DefaultCleanIntervalTime) } func TestWithDisablePurgeAndWithExpirationPool(t *testing.T) { numWorker := 10 expiredDuration := time.Millisecond * 100 - p, _ := NewPool(numWorker, WithDisablePurge(true), WithExpiryDuration(expiredDuration)) + p, _ := ants.NewPool(numWorker, ants.WithDisablePurge(true), ants.WithExpiryDuration(expiredDuration)) testPoolWithDisablePurge(t, p, numWorker, expiredDuration) } -func testPoolFuncWithDisablePurge(t *testing.T, p *PoolWithFunc, numWorker int, wg1, wg2 *sync.WaitGroup, sig chan struct{}, waitForPurge time.Duration) { +func testPoolFuncWithDisablePurge(t *testing.T, p *ants.PoolWithFunc, numWorker int, wg1, wg2 *sync.WaitGroup, sig chan struct{}, waitForPurge time.Duration) { for i := 0; i < numWorker; i++ { _ = p.Invoke(i) } @@ -886,12 +888,12 @@ func TestWithDisablePurgePoolFunc(t *testing.T) { var wg1, wg2 sync.WaitGroup wg1.Add(numWorker) wg2.Add(numWorker) - p, _ := NewPoolWithFunc(numWorker, func(_ any) { + p, _ := ants.NewPoolWithFunc(numWorker, func(_ any) { wg1.Done() <-sig wg2.Done() - }, WithDisablePurge(true)) - testPoolFuncWithDisablePurge(t, p, numWorker, &wg1, &wg2, sig, DefaultCleanIntervalTime) + }, ants.WithDisablePurge(true)) + testPoolFuncWithDisablePurge(t, p, numWorker, &wg1, &wg2, sig, ants.DefaultCleanIntervalTime) } func TestWithDisablePurgeAndWithExpirationPoolFunc(t *testing.T) { @@ -901,17 +903,17 @@ func TestWithDisablePurgeAndWithExpirationPoolFunc(t *testing.T) { wg1.Add(numWorker) wg2.Add(numWorker) expiredDuration := time.Millisecond * 100 - p, _ := NewPoolWithFunc(numWorker, func(_ any) { + p, _ := ants.NewPoolWithFunc(numWorker, func(_ any) { wg1.Done() <-sig wg2.Done() - }, WithDisablePurge(true), WithExpiryDuration(expiredDuration)) + }, ants.WithDisablePurge(true), ants.WithExpiryDuration(expiredDuration)) testPoolFuncWithDisablePurge(t, p, numWorker, &wg1, &wg2, sig, expiredDuration) } func TestInfinitePoolWithFunc(t *testing.T) { c := make(chan struct{}) - p, err := NewPoolWithFunc(-1, func(i any) { + p, err := ants.NewPoolWithFunc(-1, func(i any) { demoPoolFunc(i) <-c }) @@ -931,13 +933,13 @@ func TestInfinitePoolWithFunc(t *testing.T) { if capacity := p.Cap(); capacity != -1 { t.Fatalf("expect capacity: -1 but got %d", capacity) } - _, err = NewPoolWithFunc(-1, demoPoolFunc, WithPreAlloc(true)) - require.ErrorIsf(t, err, ErrInvalidPreAllocSize, "expect ErrInvalidPreAllocSize but got %v", err) + _, err = ants.NewPoolWithFunc(-1, demoPoolFunc, ants.WithPreAlloc(true)) + require.ErrorIsf(t, err, ants.ErrInvalidPreAllocSize, "expect ErrInvalidPreAllocSize but got %v", err) } func TestInfinitePoolWithFuncGeneric(t *testing.T) { c := make(chan struct{}) - p, err := NewPoolWithFuncGeneric(-1, func(i int) { + p, err := ants.NewPoolWithFuncGeneric(-1, func(i int) { demoPoolFuncInt(i) <-c }) @@ -957,13 +959,13 @@ func TestInfinitePoolWithFuncGeneric(t *testing.T) { if capacity := p.Cap(); capacity != -1 { t.Fatalf("expect capacity: -1 but got %d", capacity) } - _, err = NewPoolWithFuncGeneric(-1, demoPoolFuncInt, WithPreAlloc(true)) - require.ErrorIsf(t, err, ErrInvalidPreAllocSize, "expect ErrInvalidPreAllocSize but got %v", err) + _, err = ants.NewPoolWithFuncGeneric(-1, demoPoolFuncInt, ants.WithPreAlloc(true)) + require.ErrorIsf(t, err, ants.ErrInvalidPreAllocSize, "expect ErrInvalidPreAllocSize but got %v", err) } func TestReleaseWhenRunningPool(t *testing.T) { var wg sync.WaitGroup - p, err := NewPool(1) + p, err := ants.NewPool(1) require.NoErrorf(t, err, "create pool failed: %v", err) wg.Add(2) go func() { @@ -1004,7 +1006,7 @@ func TestReleaseWhenRunningPool(t *testing.T) { func TestReleaseWhenRunningPoolWithFunc(t *testing.T) { var wg sync.WaitGroup - p, err := NewPoolWithFunc(1, func(i any) { + p, err := ants.NewPoolWithFunc(1, func(i any) { t.Log("do task", i) time.Sleep(1 * time.Second) }) @@ -1041,7 +1043,7 @@ func TestReleaseWhenRunningPoolWithFunc(t *testing.T) { func TestReleaseWhenRunningPoolWithFuncGeneric(t *testing.T) { var wg sync.WaitGroup - p, err := NewPoolWithFuncGeneric(1, func(i int) { + p, err := ants.NewPoolWithFuncGeneric(1, func(i int) { t.Log("do task", i) time.Sleep(1 * time.Second) }) @@ -1077,32 +1079,32 @@ func TestReleaseWhenRunningPoolWithFuncGeneric(t *testing.T) { } func TestRestCodeCoverage(t *testing.T) { - _, err := NewPool(-1, WithExpiryDuration(-1)) - require.ErrorIs(t, err, ErrInvalidPoolExpiry) - _, err = NewPool(1, WithExpiryDuration(-1)) - require.ErrorIs(t, err, ErrInvalidPoolExpiry) - _, err = NewPoolWithFunc(-1, demoPoolFunc, WithExpiryDuration(-1)) - require.ErrorIs(t, err, ErrInvalidPoolExpiry) - _, err = NewPoolWithFunc(1, demoPoolFunc, WithExpiryDuration(-1)) - require.ErrorIs(t, err, ErrInvalidPoolExpiry) - _, err = NewPoolWithFunc(1, nil, WithExpiryDuration(-1)) - require.ErrorIs(t, err, ErrLackPoolFunc) - _, err = NewPoolWithFuncGeneric(-1, demoPoolFuncInt, WithExpiryDuration(-1)) - require.ErrorIs(t, err, ErrInvalidPoolExpiry) - _, err = NewPoolWithFuncGeneric(1, demoPoolFuncInt, WithExpiryDuration(-1)) - require.ErrorIs(t, err, ErrInvalidPoolExpiry) + _, err := ants.NewPool(-1, ants.WithExpiryDuration(-1)) + require.ErrorIs(t, err, ants.ErrInvalidPoolExpiry) + _, err = ants.NewPool(1, ants.WithExpiryDuration(-1)) + require.ErrorIs(t, err, ants.ErrInvalidPoolExpiry) + _, err = ants.NewPoolWithFunc(-1, demoPoolFunc, ants.WithExpiryDuration(-1)) + require.ErrorIs(t, err, ants.ErrInvalidPoolExpiry) + _, err = ants.NewPoolWithFunc(1, demoPoolFunc, ants.WithExpiryDuration(-1)) + require.ErrorIs(t, err, ants.ErrInvalidPoolExpiry) + _, err = ants.NewPoolWithFunc(1, nil, ants.WithExpiryDuration(-1)) + require.ErrorIs(t, err, ants.ErrLackPoolFunc) + _, err = ants.NewPoolWithFuncGeneric(-1, demoPoolFuncInt, ants.WithExpiryDuration(-1)) + require.ErrorIs(t, err, ants.ErrInvalidPoolExpiry) + _, err = ants.NewPoolWithFuncGeneric(1, demoPoolFuncInt, ants.WithExpiryDuration(-1)) + require.ErrorIs(t, err, ants.ErrInvalidPoolExpiry) var fn func(i int) - _, err = NewPoolWithFuncGeneric(1, fn, WithExpiryDuration(-1)) - require.ErrorIs(t, err, ErrLackPoolFunc) + _, err = ants.NewPoolWithFuncGeneric(1, fn, ants.WithExpiryDuration(-1)) + require.ErrorIs(t, err, ants.ErrLackPoolFunc) - options := Options{} + options := ants.Options{} options.ExpiryDuration = time.Duration(10) * time.Second options.Nonblocking = true options.PreAlloc = true - poolOpts, _ := NewPool(1, WithOptions(options)) + poolOpts, _ := ants.NewPool(1, ants.WithOptions(options)) t.Logf("Pool with options, capacity: %d", poolOpts.Cap()) - p0, _ := NewPool(TestSize, WithLogger(log.New(os.Stderr, "", log.LstdFlags))) + p0, _ := ants.NewPool(TestSize, ants.WithLogger(log.New(os.Stderr, "", log.LstdFlags))) defer func() { _ = p0.Submit(demoFunc) }() @@ -1117,7 +1119,7 @@ func TestRestCodeCoverage(t *testing.T) { p0.Tune(TestSize / 10) t.Logf("pool, after tuning capacity, capacity:%d, running:%d", p0.Cap(), p0.Running()) - p1, _ := NewPool(TestSize, WithPreAlloc(true)) + p1, _ := ants.NewPool(TestSize, ants.WithPreAlloc(true)) defer func() { _ = p1.Submit(demoFunc) }() @@ -1132,7 +1134,7 @@ func TestRestCodeCoverage(t *testing.T) { p1.Tune(TestSize / 10) t.Logf("pre-malloc pool, after tuning capacity, capacity:%d, running:%d", p1.Cap(), p1.Running()) - p2, _ := NewPoolWithFunc(TestSize, demoPoolFunc) + p2, _ := ants.NewPoolWithFunc(TestSize, demoPoolFunc) defer func() { _ = p2.Invoke(Param) }() @@ -1140,7 +1142,7 @@ func TestRestCodeCoverage(t *testing.T) { for i := 0; i < n; i++ { _ = p2.Invoke(Param) } - time.Sleep(DefaultCleanIntervalTime) + time.Sleep(ants.DefaultCleanIntervalTime) t.Logf("pool with func, capacity:%d", p2.Cap()) t.Logf("pool with func, running workers number:%d", p2.Running()) t.Logf("pool with func, free workers number:%d", p2.Free()) @@ -1148,7 +1150,7 @@ func TestRestCodeCoverage(t *testing.T) { p2.Tune(TestSize / 10) t.Logf("pool with func, after tuning capacity, capacity:%d, running:%d", p2.Cap(), p2.Running()) - p3, _ := NewPoolWithFuncGeneric(TestSize, demoPoolFuncInt) + p3, _ := ants.NewPoolWithFuncGeneric(TestSize, demoPoolFuncInt) defer func() { _ = p3.Invoke(Param) }() @@ -1156,7 +1158,7 @@ func TestRestCodeCoverage(t *testing.T) { for i := 0; i < n; i++ { _ = p3.Invoke(Param) } - time.Sleep(DefaultCleanIntervalTime) + time.Sleep(ants.DefaultCleanIntervalTime) t.Logf("pool with func, capacity:%d", p3.Cap()) t.Logf("pool with func, running workers number:%d", p3.Running()) t.Logf("pool with func, free workers number:%d", p3.Free()) @@ -1164,7 +1166,7 @@ func TestRestCodeCoverage(t *testing.T) { p3.Tune(TestSize / 10) t.Logf("pool with func, after tuning capacity, capacity:%d, running:%d", p3.Cap(), p3.Running()) - p4, _ := NewPoolWithFunc(TestSize, demoPoolFunc, WithPreAlloc(true)) + p4, _ := ants.NewPoolWithFunc(TestSize, demoPoolFunc, ants.WithPreAlloc(true)) defer func() { _ = p4.Invoke(Param) }() @@ -1172,7 +1174,7 @@ func TestRestCodeCoverage(t *testing.T) { for i := 0; i < n; i++ { _ = p4.Invoke(Param) } - time.Sleep(DefaultCleanIntervalTime) + time.Sleep(ants.DefaultCleanIntervalTime) t.Logf("pre-malloc pool with func, capacity:%d", p4.Cap()) t.Logf("pre-malloc pool with func, running workers number:%d", p4.Running()) t.Logf("pre-malloc pool with func, free workers number:%d", p4.Free()) @@ -1181,7 +1183,7 @@ func TestRestCodeCoverage(t *testing.T) { t.Logf("pre-malloc pool with func, after tuning capacity, capacity:%d, running:%d", p4.Cap(), p4.Running()) - p5, _ := NewPoolWithFuncGeneric(TestSize, demoPoolFuncInt, WithPreAlloc(true)) + p5, _ := ants.NewPoolWithFuncGeneric(TestSize, demoPoolFuncInt, ants.WithPreAlloc(true)) defer func() { _ = p5.Invoke(Param) }() @@ -1189,7 +1191,7 @@ func TestRestCodeCoverage(t *testing.T) { for i := 0; i < n; i++ { _ = p5.Invoke(Param) } - time.Sleep(DefaultCleanIntervalTime) + time.Sleep(ants.DefaultCleanIntervalTime) t.Logf("pre-malloc pool with func, capacity:%d", p5.Cap()) t.Logf("pre-malloc pool with func, running workers number:%d", p5.Running()) t.Logf("pre-malloc pool with func, free workers number:%d", p5.Free()) @@ -1202,7 +1204,7 @@ func TestRestCodeCoverage(t *testing.T) { func TestPoolTuneScaleUp(t *testing.T) { c := make(chan struct{}) // Test Pool - p, _ := NewPool(2) + p, _ := ants.NewPool(2) for i := 0; i < 2; i++ { _ = p.Submit(func() { <-c @@ -1238,7 +1240,7 @@ func TestPoolTuneScaleUp(t *testing.T) { p.Release() // Test PoolWithFunc - pf, _ := NewPoolWithFunc(2, func(_ any) { + pf, _ := ants.NewPoolWithFunc(2, func(_ any) { <-c }) for i := 0; i < 2; i++ { @@ -1269,7 +1271,7 @@ func TestPoolTuneScaleUp(t *testing.T) { pf.Release() // Test PoolWithFuncGeneric - pfg, _ := NewPoolWithFuncGeneric(2, func(_ int) { + pfg, _ := ants.NewPoolWithFuncGeneric(2, func(_ int) { <-c }) for i := 0; i < 2; i++ { @@ -1302,7 +1304,7 @@ func TestPoolTuneScaleUp(t *testing.T) { } func TestReleaseTimeout(t *testing.T) { - p, err := NewPool(10) + p, err := ants.NewPool(10) require.NoError(t, err) for i := 0; i < 5; i++ { _ = p.Submit(func() { @@ -1313,7 +1315,7 @@ func TestReleaseTimeout(t *testing.T) { err = p.ReleaseTimeout(2 * time.Second) require.NoError(t, err) - pf, err := NewPoolWithFunc(10, func(i any) { + pf, err := ants.NewPoolWithFunc(10, func(i any) { dur := i.(time.Duration) time.Sleep(dur) }) @@ -1325,7 +1327,7 @@ func TestReleaseTimeout(t *testing.T) { err = pf.ReleaseTimeout(2 * time.Second) require.NoError(t, err) - pfg, err := NewPoolWithFuncGeneric(10, func(d time.Duration) { + pfg, err := ants.NewPoolWithFuncGeneric(10, func(d time.Duration) { time.Sleep(d) }) require.NoError(t, err) @@ -1338,26 +1340,26 @@ func TestReleaseTimeout(t *testing.T) { } func TestDefaultPoolReleaseTimeout(t *testing.T) { - Reboot() // should do nothing inside + ants.Reboot() // should do nothing inside for i := 0; i < 5; i++ { - _ = Submit(func() { + _ = ants.Submit(func() { time.Sleep(time.Second) }) } - require.NotZero(t, Running()) - err := ReleaseTimeout(2 * time.Second) + require.NotZero(t, ants.Running()) + err := ants.ReleaseTimeout(2 * time.Second) require.NoError(t, err) } func TestMultiPool(t *testing.T) { - _, err := NewMultiPool(-1, 10, 8) - require.ErrorIs(t, err, ErrInvalidMultiPoolSize) - _, err = NewMultiPool(10, -1, 8) - require.ErrorIs(t, err, ErrInvalidLoadBalancingStrategy) - _, err = NewMultiPool(10, 10, RoundRobin, WithExpiryDuration(-1)) - require.ErrorIs(t, err, ErrInvalidPoolExpiry) - - mp, err := NewMultiPool(10, 5, RoundRobin) + _, err := ants.NewMultiPool(-1, 10, 8) + require.ErrorIs(t, err, ants.ErrInvalidMultiPoolSize) + _, err = ants.NewMultiPool(10, -1, 8) + require.ErrorIs(t, err, ants.ErrInvalidLoadBalancingStrategy) + _, err = ants.NewMultiPool(10, 10, ants.RoundRobin, ants.WithExpiryDuration(-1)) + require.ErrorIs(t, err, ants.ErrInvalidPoolExpiry) + + mp, err := ants.NewMultiPool(10, 5, ants.RoundRobin) testFn := func() { for i := 0; i < 50; i++ { err = mp.Submit(longRunningFunc) @@ -1365,19 +1367,19 @@ func TestMultiPool(t *testing.T) { } require.EqualValues(t, mp.Waiting(), 0) _, err = mp.WaitingByIndex(-1) - require.ErrorIs(t, err, ErrInvalidPoolIndex) + require.ErrorIs(t, err, ants.ErrInvalidPoolIndex) _, err = mp.WaitingByIndex(11) - require.ErrorIs(t, err, ErrInvalidPoolIndex) + require.ErrorIs(t, err, ants.ErrInvalidPoolIndex) require.EqualValues(t, 50, mp.Running()) _, err = mp.RunningByIndex(-1) - require.ErrorIs(t, err, ErrInvalidPoolIndex) + require.ErrorIs(t, err, ants.ErrInvalidPoolIndex) _, err = mp.RunningByIndex(11) - require.ErrorIs(t, err, ErrInvalidPoolIndex) + require.ErrorIs(t, err, ants.ErrInvalidPoolIndex) require.EqualValues(t, 0, mp.Free()) _, err = mp.FreeByIndex(-1) - require.ErrorIs(t, err, ErrInvalidPoolIndex) + require.ErrorIs(t, err, ants.ErrInvalidPoolIndex) _, err = mp.FreeByIndex(11) - require.ErrorIs(t, err, ErrInvalidPoolIndex) + require.ErrorIs(t, err, ants.ErrInvalidPoolIndex) require.EqualValues(t, 50, mp.Cap()) require.False(t, mp.IsClosed()) for i := 0; i < 10; i++ { @@ -1390,8 +1392,8 @@ func TestMultiPool(t *testing.T) { } atomic.StoreInt32(&stopLongRunningFunc, 1) require.NoError(t, mp.ReleaseTimeout(3*time.Second)) - require.ErrorIs(t, mp.ReleaseTimeout(3*time.Second), ErrPoolClosed) - require.ErrorIs(t, mp.Submit(nil), ErrPoolClosed) + require.ErrorIs(t, mp.ReleaseTimeout(3*time.Second), ants.ErrPoolClosed) + require.ErrorIs(t, mp.Submit(nil), ants.ErrPoolClosed) require.Zero(t, mp.Running()) require.True(t, mp.IsClosed()) atomic.StoreInt32(&stopLongRunningFunc, 0) @@ -1401,7 +1403,7 @@ func TestMultiPool(t *testing.T) { mp.Reboot() testFn() - mp, err = NewMultiPool(10, 5, LeastTasks) + mp, err = ants.NewMultiPool(10, 5, ants.LeastTasks) testFn() mp.Reboot() @@ -1411,15 +1413,15 @@ func TestMultiPool(t *testing.T) { } func TestMultiPoolWithFunc(t *testing.T) { - _, err := NewMultiPoolWithFunc(-1, 10, longRunningPoolFunc, 8) - require.ErrorIs(t, err, ErrInvalidMultiPoolSize) - _, err = NewMultiPoolWithFunc(10, -1, longRunningPoolFunc, 8) - require.ErrorIs(t, err, ErrInvalidLoadBalancingStrategy) - _, err = NewMultiPoolWithFunc(10, 10, longRunningPoolFunc, RoundRobin, WithExpiryDuration(-1)) - require.ErrorIs(t, err, ErrInvalidPoolExpiry) + _, err := ants.NewMultiPoolWithFunc(-1, 10, longRunningPoolFunc, 8) + require.ErrorIs(t, err, ants.ErrInvalidMultiPoolSize) + _, err = ants.NewMultiPoolWithFunc(10, -1, longRunningPoolFunc, 8) + require.ErrorIs(t, err, ants.ErrInvalidLoadBalancingStrategy) + _, err = ants.NewMultiPoolWithFunc(10, 10, longRunningPoolFunc, ants.RoundRobin, ants.WithExpiryDuration(-1)) + require.ErrorIs(t, err, ants.ErrInvalidPoolExpiry) ch := make(chan struct{}) - mp, err := NewMultiPoolWithFunc(10, 5, longRunningPoolFunc, RoundRobin) + mp, err := ants.NewMultiPoolWithFunc(10, 5, longRunningPoolFunc, ants.RoundRobin) testFn := func() { for i := 0; i < 50; i++ { err = mp.Invoke(ch) @@ -1427,19 +1429,19 @@ func TestMultiPoolWithFunc(t *testing.T) { } require.EqualValues(t, mp.Waiting(), 0) _, err = mp.WaitingByIndex(-1) - require.ErrorIs(t, err, ErrInvalidPoolIndex) + require.ErrorIs(t, err, ants.ErrInvalidPoolIndex) _, err = mp.WaitingByIndex(11) - require.ErrorIs(t, err, ErrInvalidPoolIndex) + require.ErrorIs(t, err, ants.ErrInvalidPoolIndex) require.EqualValues(t, 50, mp.Running()) _, err = mp.RunningByIndex(-1) - require.ErrorIs(t, err, ErrInvalidPoolIndex) + require.ErrorIs(t, err, ants.ErrInvalidPoolIndex) _, err = mp.RunningByIndex(11) - require.ErrorIs(t, err, ErrInvalidPoolIndex) + require.ErrorIs(t, err, ants.ErrInvalidPoolIndex) require.EqualValues(t, 0, mp.Free()) _, err = mp.FreeByIndex(-1) - require.ErrorIs(t, err, ErrInvalidPoolIndex) + require.ErrorIs(t, err, ants.ErrInvalidPoolIndex) _, err = mp.FreeByIndex(11) - require.ErrorIs(t, err, ErrInvalidPoolIndex) + require.ErrorIs(t, err, ants.ErrInvalidPoolIndex) require.EqualValues(t, 50, mp.Cap()) require.False(t, mp.IsClosed()) for i := 0; i < 10; i++ { @@ -1452,8 +1454,8 @@ func TestMultiPoolWithFunc(t *testing.T) { } close(ch) require.NoError(t, mp.ReleaseTimeout(3*time.Second)) - require.ErrorIs(t, mp.ReleaseTimeout(3*time.Second), ErrPoolClosed) - require.ErrorIs(t, mp.Invoke(nil), ErrPoolClosed) + require.ErrorIs(t, mp.ReleaseTimeout(3*time.Second), ants.ErrPoolClosed) + require.ErrorIs(t, mp.Invoke(nil), ants.ErrPoolClosed) require.Zero(t, mp.Running()) require.True(t, mp.IsClosed()) ch = make(chan struct{}) @@ -1463,7 +1465,7 @@ func TestMultiPoolWithFunc(t *testing.T) { mp.Reboot() testFn() - mp, err = NewMultiPoolWithFunc(10, 5, longRunningPoolFunc, LeastTasks) + mp, err = ants.NewMultiPoolWithFunc(10, 5, longRunningPoolFunc, ants.LeastTasks) testFn() mp.Reboot() @@ -1473,15 +1475,15 @@ func TestMultiPoolWithFunc(t *testing.T) { } func TestMultiPoolWithFuncGeneric(t *testing.T) { - _, err := NewMultiPoolWithFuncGeneric(-1, 10, longRunningPoolFuncCh, 8) - require.ErrorIs(t, err, ErrInvalidMultiPoolSize) - _, err = NewMultiPoolWithFuncGeneric(10, -1, longRunningPoolFuncCh, 8) - require.ErrorIs(t, err, ErrInvalidLoadBalancingStrategy) - _, err = NewMultiPoolWithFuncGeneric(10, 10, longRunningPoolFuncCh, RoundRobin, WithExpiryDuration(-1)) - require.ErrorIs(t, err, ErrInvalidPoolExpiry) + _, err := ants.NewMultiPoolWithFuncGeneric(-1, 10, longRunningPoolFuncCh, 8) + require.ErrorIs(t, err, ants.ErrInvalidMultiPoolSize) + _, err = ants.NewMultiPoolWithFuncGeneric(10, -1, longRunningPoolFuncCh, 8) + require.ErrorIs(t, err, ants.ErrInvalidLoadBalancingStrategy) + _, err = ants.NewMultiPoolWithFuncGeneric(10, 10, longRunningPoolFuncCh, ants.RoundRobin, ants.WithExpiryDuration(-1)) + require.ErrorIs(t, err, ants.ErrInvalidPoolExpiry) ch := make(chan struct{}) - mp, err := NewMultiPoolWithFuncGeneric(10, 5, longRunningPoolFuncCh, RoundRobin) + mp, err := ants.NewMultiPoolWithFuncGeneric(10, 5, longRunningPoolFuncCh, ants.RoundRobin) testFn := func() { for i := 0; i < 50; i++ { err = mp.Invoke(ch) @@ -1489,19 +1491,19 @@ func TestMultiPoolWithFuncGeneric(t *testing.T) { } require.EqualValues(t, mp.Waiting(), 0) _, err = mp.WaitingByIndex(-1) - require.ErrorIs(t, err, ErrInvalidPoolIndex) + require.ErrorIs(t, err, ants.ErrInvalidPoolIndex) _, err = mp.WaitingByIndex(11) - require.ErrorIs(t, err, ErrInvalidPoolIndex) + require.ErrorIs(t, err, ants.ErrInvalidPoolIndex) require.EqualValues(t, 50, mp.Running()) _, err = mp.RunningByIndex(-1) - require.ErrorIs(t, err, ErrInvalidPoolIndex) + require.ErrorIs(t, err, ants.ErrInvalidPoolIndex) _, err = mp.RunningByIndex(11) - require.ErrorIs(t, err, ErrInvalidPoolIndex) + require.ErrorIs(t, err, ants.ErrInvalidPoolIndex) require.EqualValues(t, 0, mp.Free()) _, err = mp.FreeByIndex(-1) - require.ErrorIs(t, err, ErrInvalidPoolIndex) + require.ErrorIs(t, err, ants.ErrInvalidPoolIndex) _, err = mp.FreeByIndex(11) - require.ErrorIs(t, err, ErrInvalidPoolIndex) + require.ErrorIs(t, err, ants.ErrInvalidPoolIndex) require.EqualValues(t, 50, mp.Cap()) require.False(t, mp.IsClosed()) for i := 0; i < 10; i++ { @@ -1514,8 +1516,8 @@ func TestMultiPoolWithFuncGeneric(t *testing.T) { } close(ch) require.NoError(t, mp.ReleaseTimeout(3*time.Second)) - require.ErrorIs(t, mp.ReleaseTimeout(3*time.Second), ErrPoolClosed) - require.ErrorIs(t, mp.Invoke(nil), ErrPoolClosed) + require.ErrorIs(t, mp.ReleaseTimeout(3*time.Second), ants.ErrPoolClosed) + require.ErrorIs(t, mp.Invoke(nil), ants.ErrPoolClosed) require.Zero(t, mp.Running()) require.True(t, mp.IsClosed()) ch = make(chan struct{}) @@ -1525,7 +1527,7 @@ func TestMultiPoolWithFuncGeneric(t *testing.T) { mp.Reboot() testFn() - mp, err = NewMultiPoolWithFuncGeneric(10, 5, longRunningPoolFuncCh, LeastTasks) + mp, err = ants.NewMultiPoolWithFuncGeneric(10, 5, longRunningPoolFuncCh, ants.LeastTasks) testFn() mp.Reboot() diff --git a/example_test.go b/example_test.go new file mode 100644 index 00000000..56c8645e --- /dev/null +++ b/example_test.go @@ -0,0 +1,174 @@ +/* + * Copyright (c) 2025. Andy Pan. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package ants_test + +import ( + "fmt" + "sync" + "sync/atomic" + "time" + + "github.com/panjf2000/ants/v2" +) + +var ( + sum int32 + wg sync.WaitGroup +) + +func incSum(i any) { + incSumInt(i.(int32)) +} + +func incSumInt(i int32) { + atomic.AddInt32(&sum, i) + wg.Done() +} + +func ExamplePool() { + ants.Reboot() // ensure the default pool is available + + atomic.StoreInt32(&sum, 0) + runTimes := 1000 + wg.Add(runTimes) + // Use the default pool. + for i := 0; i < runTimes; i++ { + j := i + _ = ants.Submit(func() { + incSumInt(int32(j)) + }) + } + wg.Wait() + fmt.Printf("The result is %d\n", sum) + + atomic.StoreInt32(&sum, 0) + wg.Add(runTimes) + // Use the new pool. + pool, _ := ants.NewPool(10) + defer pool.Release() + for i := 0; i < runTimes; i++ { + j := i + _ = pool.Submit(func() { + incSumInt(int32(j)) + }) + } + wg.Wait() + fmt.Printf("The result is %d\n", sum) + + // Output: + // The result is 499500 + // The result is 499500 +} + +func ExamplePoolWithFunc() { + atomic.StoreInt32(&sum, 0) + runTimes := 1000 + wg.Add(runTimes) + + pool, _ := ants.NewPoolWithFunc(10, incSum) + defer pool.Release() + + for i := 0; i < runTimes; i++ { + _ = pool.Invoke(int32(i)) + } + wg.Wait() + + fmt.Printf("The result is %d\n", sum) + + // Output: The result is 499500 +} + +func ExamplePoolWithFuncGeneric() { + atomic.StoreInt32(&sum, 0) + runTimes := 1000 + wg.Add(runTimes) + + pool, _ := ants.NewPoolWithFuncGeneric(10, incSumInt) + defer pool.Release() + + for i := 0; i < runTimes; i++ { + _ = pool.Invoke(int32(i)) + } + wg.Wait() + + fmt.Printf("The result is %d\n", sum) + + // Output: The result is 499500 +} + +func ExampleMultiPool() { + atomic.StoreInt32(&sum, 0) + runTimes := 1000 + wg.Add(runTimes) + + mp, _ := ants.NewMultiPool(10, runTimes/10, ants.RoundRobin) + defer mp.ReleaseTimeout(time.Second) // nolint:errcheck + + for i := 0; i < runTimes; i++ { + j := i + _ = mp.Submit(func() { + incSumInt(int32(j)) + }) + } + wg.Wait() + + fmt.Printf("The result is %d\n", sum) + + // Output: The result is 499500 +} + +func ExampleMultiPoolWithFunc() { + atomic.StoreInt32(&sum, 0) + runTimes := 1000 + wg.Add(runTimes) + + mp, _ := ants.NewMultiPoolWithFunc(10, runTimes/10, incSum, ants.RoundRobin) + defer mp.ReleaseTimeout(time.Second) // nolint:errcheck + + for i := 0; i < runTimes; i++ { + _ = mp.Invoke(int32(i)) + } + wg.Wait() + + fmt.Printf("The result is %d\n", sum) + + // Output: The result is 499500 +} + +func ExampleMultiPoolWithFuncGeneric() { + atomic.StoreInt32(&sum, 0) + runTimes := 1000 + wg.Add(runTimes) + + mp, _ := ants.NewMultiPoolWithFuncGeneric(10, runTimes/10, incSumInt, ants.RoundRobin) + defer mp.ReleaseTimeout(time.Second) // nolint:errcheck + + for i := 0; i < runTimes; i++ { + _ = mp.Invoke(int32(i)) + } + wg.Wait() + + fmt.Printf("The result is %d\n", sum) + + // Output: The result is 499500 +} diff --git a/examples/main.go b/examples/main.go deleted file mode 100644 index bc00ef83..00000000 --- a/examples/main.go +++ /dev/null @@ -1,114 +0,0 @@ -// MIT License - -// Copyright (c) 2018 Andy Pan - -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE. - -package main - -import ( - "fmt" - "sync" - "sync/atomic" - "time" - - "github.com/panjf2000/ants/v2" -) - -var sum int32 - -func myFunc(i any) { - n := i.(int32) - atomic.AddInt32(&sum, n) - fmt.Printf("run with %d\n", n) -} - -func demoFunc() { - time.Sleep(10 * time.Millisecond) - fmt.Println("Hello World!") -} - -func main() { - defer ants.Release() - - runTimes := 1000 - - // Use the common pool. - var wg sync.WaitGroup - syncCalculateSum := func() { - demoFunc() - wg.Done() - } - for i := 0; i < runTimes; i++ { - wg.Add(1) - _ = ants.Submit(syncCalculateSum) - } - wg.Wait() - fmt.Printf("running goroutines: %d\n", ants.Running()) - fmt.Printf("finish all tasks.\n") - - // Use the pool with a function, - // set 10 to the capacity of goroutine pool and 1 second for expired duration. - p, _ := ants.NewPoolWithFunc(10, func(i any) { - myFunc(i) - wg.Done() - }) - defer p.Release() - // Submit tasks one by one. - for i := 0; i < runTimes; i++ { - wg.Add(1) - _ = p.Invoke(int32(i)) - } - wg.Wait() - fmt.Printf("running goroutines: %d\n", p.Running()) - fmt.Printf("finish all tasks, result is %d\n", sum) - if sum != 499500 { - panic("the final result is wrong!!!") - } - - // Use the MultiPool and set the capacity of the 10 goroutine pools to unlimited. - // If you use -1 as the pool size parameter, the size will be unlimited. - // There are two load-balancing algorithms for pools: ants.RoundRobin and ants.LeastTasks. - mp, _ := ants.NewMultiPool(10, -1, ants.RoundRobin) - defer mp.ReleaseTimeout(5 * time.Second) - for i := 0; i < runTimes; i++ { - wg.Add(1) - _ = mp.Submit(syncCalculateSum) - } - wg.Wait() - fmt.Printf("running goroutines: %d\n", mp.Running()) - fmt.Printf("finish all tasks.\n") - - // Use the MultiPoolFunc and set the capacity of 10 goroutine pools to (runTimes/10). - mpf, _ := ants.NewMultiPoolWithFunc(10, runTimes/10, func(i any) { - myFunc(i) - wg.Done() - }, ants.LeastTasks) - defer mpf.ReleaseTimeout(5 * time.Second) - for i := 0; i < runTimes; i++ { - wg.Add(1) - _ = mpf.Invoke(int32(i)) - } - wg.Wait() - fmt.Printf("running goroutines: %d\n", mpf.Running()) - fmt.Printf("finish all tasks, result is %d\n", sum) - if sum != 499500*2 { - panic("the final result is wrong!!!") - } -} diff --git a/options.go b/options.go index b859bef3..182cd5af 100644 --- a/options.go +++ b/options.go @@ -1,3 +1,25 @@ +/* + * Copyright (c) 2018. Andy Pan. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + package ants import "time" diff --git a/worker_loop_queue.go b/worker_loop_queue.go index 52091f31..b3729839 100644 --- a/worker_loop_queue.go +++ b/worker_loop_queue.go @@ -1,3 +1,25 @@ +/* + * Copyright (c) 2019. Ants Authors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + package ants import "time" diff --git a/worker_loop_queue_test.go b/worker_loop_queue_test.go index 8e043946..83074ddc 100644 --- a/worker_loop_queue_test.go +++ b/worker_loop_queue_test.go @@ -1,8 +1,29 @@ -//go:build !windows +/* + * Copyright (c) 2019. Ants Authors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ package ants import ( + "runtime" "testing" "time" @@ -51,6 +72,10 @@ func TestLoopQueue(t *testing.T) { } func TestRotatedQueueSearch(t *testing.T) { + if runtime.GOOS == "windows" { // time.Now() doesn't seem to be precise on Windows + t.Skip("Skip this test on Windows platform") + } + size := 10 q := newWorkerLoopQueue(size) diff --git a/worker_queue.go b/worker_queue.go index 4131972a..948bc914 100644 --- a/worker_queue.go +++ b/worker_queue.go @@ -1,3 +1,25 @@ +/* + * Copyright (c) 2019. Ants Authors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + package ants import ( diff --git a/worker_stack.go b/worker_stack.go index 8eb12ab7..18dcd23b 100644 --- a/worker_stack.go +++ b/worker_stack.go @@ -1,3 +1,25 @@ +/* + * Copyright (c) 2019. Ants Authors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + package ants import "time" diff --git a/worker_stack_test.go b/worker_stack_test.go index 87fca0d2..410bf578 100644 --- a/worker_stack_test.go +++ b/worker_stack_test.go @@ -1,8 +1,29 @@ -//go:build !windows +/* + * Copyright (c) 2019. Ants Authors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ package ants import ( + "runtime" "testing" "time" @@ -51,6 +72,10 @@ func TestWorkerStack(t *testing.T) { // It seems that something wrong with time.Now() on Windows, not sure whether it is a bug on Windows, // so exclude this test from Windows platform temporarily. func TestSearch(t *testing.T) { + if runtime.GOOS == "windows" { // time.Now() doesn't seem to be precise on Windows + t.Skip("Skip this test on Windows platform") + } + q := newWorkerStack(0) // 1