Compare commits

..

93 Commits

Author SHA1 Message Date
Kevin Wan
b3e7d2901f Feature/trie ac automation (#1479)
* fix: trie ac automation issues

* fix: trie ac automation issues

* fix: trie ac automation issues

* fix: trie ac automation issues
2022-01-25 11:14:56 +08:00
anqiansong
cdf7ec213c fix #1468 (#1478)
Co-authored-by: anqiansong <anqiansong@bytedance.com>
2022-01-24 22:23:20 +08:00
Kevin Wan
f1102fb262 chore: optimize string search with Aho–Corasick algorithm (#1476)
* chore: optimize string search with Aho–Corasick algorithm

* chore: optimize keywords replacer

* fix: replacer bugs

* chore: reorder members
2022-01-23 23:37:02 +08:00
Keqi Huang
09d1fad6e0 Polish the words in readme.md (#1475) 2022-01-22 12:20:11 +08:00
Kevin Wan
379c65a3ef docs: add go-zero users (#1473) 2022-01-20 22:36:17 +08:00
Kevin Wan
fdc7f64d6f chore: update unauthorized callback calling order (#1469)
* chore: update unauthorized callback calling order

* chore: add comments
2022-01-20 21:09:45 +08:00
anqiansong
df0f8ed59e Fix/issue#1289 (#1460)
* fix #1289

* Add unit test case

* fix `jwtTransKey`

* fix `jwtTransKey`

Co-authored-by: anqiansong <anqiansong@bytedance.com>
2022-01-18 11:52:30 +08:00
anqiansong
c903966fc7 patch: save missing templates to disk (#1463)
Co-authored-by: anqiansong <anqiansong@bytedance.com>
2022-01-18 10:45:05 +08:00
anqiansong
e57fa8ff53 Fix/issue#1447 (#1458)
* Add data for template to render

* fix #1447

Co-authored-by: anqiansong <anqiansong@bytedance.com>
2022-01-18 10:36:38 +08:00
Kevin Wan
bf2feee5b7 feat: implement console plain output for debug logs (#1456)
* feat: implement console plain output for debug logs

* chore: rename console encoding to plain

* chore: refactor names
2022-01-17 12:43:15 +08:00
Letian Jiang
ce05c429fc chore: check interface satisfaction w/o allocating new variable (#1454) 2022-01-16 23:34:42 +08:00
Kevin Wan
272a3f347d chore: remove jwt deprecated (#1452) 2022-01-16 10:34:44 +08:00
shenbaise9527
13db7a1931 feat: 支持redis的LTrim方法 (#1443) 2022-01-16 10:27:34 +08:00
Kevin Wan
468c237189 chore: upgrade dependencies (#1444)
* chore: upgrade dependencies

* ci: upgrade go to 1.15
2022-01-14 11:01:02 +08:00
Kevin Wan
b9b80c068b ci: add translator action (#1441) 2022-01-12 17:57:39 +08:00
anqiansong
9b592b3dee Feature rpc protoc (#1251)
* code generation by protoc

* generate pb by protoc direct

* support: grpc code generation by protoc directly

* format code

* check --go_out & --go-grpc_out

* fix typo

* Update version

* fix typo

* optimize: remove deprecated unit test

* format code

Co-authored-by: anqiansong <anqiansong@bytedance.com>
2022-01-11 20:34:25 +08:00
Kevin Wan
2203809e5e chore: fix typo (#1437) 2022-01-11 20:23:59 +08:00
Kevin Wan
8d6d37f71e remove unnecessary drain, fix data race (#1435)
* remove unnecessary drain, fix data race

* chore: fix parameter order

* refactor: rename MapVoid to ForEach in mr
2022-01-11 16:17:51 +08:00
Kevin Wan
ea4f2af67f fix: mr goroutine leak on context deadline (#1433)
* fix: mr goroutine leak on context deadline

* test: update fx test check
2022-01-10 22:06:10 +08:00
Kevin Wan
53af194ef9 chore: refactor periodlimit (#1428)
* chore: refactor periodlimit

* chore: add comments
2022-01-09 16:22:34 +08:00
Kevin Wan
5e0e2d2b14 docs: add go-zero users (#1425) 2022-01-08 21:41:27 +08:00
Kevin Wan
74c99184c5 docs: add go-zero users (#1424) 2022-01-08 17:08:44 +08:00
Kevin Wan
eb4b86137a fix: golint issue (#1423) 2022-01-08 16:06:56 +08:00
Kevin Wan
9c4f4f3b4e update docs (#1421) 2022-01-07 12:08:45 +08:00
spectatorMrZ
240132e7c7 Fix pg model generation without tag (#1407)
1. fix pg model struct haven't tag
2. add pg command test from datasource
2022-01-07 10:45:26 +08:00
anqiansong
9d67fc4cfb feat: Add migrate (#1419)
* Add migrate

* Remove unused module

* refactor filename

* rename refactor to migrate

Co-authored-by: anqiansong <anqiansong@bytedance.com>
2022-01-06 18:48:34 +08:00
Kevin Wan
892f93a716 docs: update install readme (#1417) 2022-01-05 12:31:49 +08:00
Kevin Wan
ba6a7c9dc8 chore: refactor rest/timeouthandler (#1415) 2022-01-05 11:17:10 +08:00
Kevin Wan
a91c3907a8 feat: rename module from tal-tech to zeromicro (#1413) 2022-01-04 15:51:32 +08:00
Kevin Wan
e267d94ee1 chore: update go-zero to v1.2.5 (#1410) 2022-01-03 21:54:53 +08:00
anqiansong
89ce5e492b refactor file|path (#1409)
Co-authored-by: anqiansong <anqiansong@bytedance.com>
2022-01-03 21:32:40 +08:00
Kevin Wan
290de6aa96 docs: update roadmap (#1405) 2022-01-02 21:30:02 +08:00
Kevin Wan
a7aeb8ac0e feat: support tls for etcd client (#1390)
* feat: support tls for etcd client

* chore: fix typo

* refactor: rename TrustedCAFile to CACertFile

* docs: add comments

* fix: missing tls registration

* feat: add InsecureSkipVerify config for testing
2022-01-02 20:23:50 +08:00
Kevin Wan
a8e7fafebf refactor: optimize fx (#1404)
* refactor: optimize fx

* chore: add more comments

* ci: make test robust
2022-01-02 14:56:30 +08:00
Kevin Wan
7cc64070b1 docs: update goctl installation command (#1403) 2022-01-02 14:31:31 +08:00
Kevin Wan
c19d2637ea feat: implement fx.NoneMatch, fx.First, fx.Last (#1402)
* chore: use workers from options in fx.unlimitedWalk

* feat: add fx.NoneMatch

* feat: add fx.First, fx.Last

* chore: add more comments

* docs: add mr readme
2022-01-02 13:33:15 +08:00
Kevin Wan
fe1da14332 chore: simplify mapreduce (#1401) 2022-01-01 19:24:35 +08:00
anqiansong
8e9110cedf fix #1330 (#1382)
Co-authored-by: anqiansong <anqiansong@bytedance.com>
2021-12-30 20:44:04 +08:00
Kevin Wan
d6ff30a570 chore: fix golint issues (#1396) 2021-12-30 17:44:15 +08:00
Kevin Wan
b98d46bfd6 chore: update goctl version (#1394) 2021-12-30 15:30:16 +08:00
Kevin Wan
768936b256 ci: remove 386 binaries (#1393) 2021-12-30 15:18:24 +08:00
Kevin Wan
c6eb1a9670 ci: remove windows 386 binary (#1392)
* ci: remove windows 386 binary

* chore: update go-zero

* chore: update go-zero
2021-12-30 14:47:53 +08:00
Kevin Wan
e4ab518576 test: add more tests (#1391) 2021-12-30 14:21:55 +08:00
moyrne
dfc67b5fac fix readme-cn (#1388) 2021-12-30 10:42:23 +08:00
Kevin Wan
62266d8f91 fix #1070 (#1389)
* fix #1070

* test: add more tests
2021-12-29 21:34:28 +08:00
anqiansong
b8ea16a88e feat: Add --remote (#1387)
Co-authored-by: anqiansong <anqiansong@bytedance.com>
2021-12-29 18:16:42 +08:00
Kevin Wan
23deaf50e6 feat: support array in default and options tags (#1386)
* feat: support array in default and options tags

* feat: ignore spaces in tags

* test: add more tests
2021-12-29 17:37:36 +08:00
Kevin Wan
38a36ed8d3 docs: add go-zero users (#1381) 2021-12-28 17:12:51 +08:00
anqiansong
49bab23c54 fix #1376 (#1380)
* fix #1376

* fix #1376

Co-authored-by: anqiansong <anqiansong@bytedance.com>
2021-12-28 16:40:26 +08:00
Leizhengzi
78ba00d3a7 fix: command system info missing go version (#1377) 2021-12-27 22:05:27 +08:00
Kevin Wan
787b046a70 docs: update slack invitation link (#1378) 2021-12-27 16:52:08 +08:00
Kevin Wan
f827a7b985 chore: update goctl version to 1.2.4 for release tools/goctl/v1.2.4 (#1372) 2021-12-27 10:57:55 +08:00
行者
f5f2097d14 Updated MySQL生成表结构体遇到关键字db部分保持原字段名定义 (#1369) 2021-12-26 21:56:04 +08:00
Kevin Wan
cfcfb87fd4 ci: add release action to auto build binaries (#1371) 2021-12-26 21:44:33 +08:00
Kevin Wan
1d223fc114 docs: update goctl markdown (#1370) 2021-12-26 20:32:31 +08:00
Kevin Wan
c0647f0719 feat: support context in MapReduce (#1368) 2021-12-25 20:42:52 +08:00
Kevin Wan
8745ed9c61 chore: add 1s for tolerance in redislock (#1367) 2021-12-25 19:44:27 +08:00
种豆得豆
836726e710 fix redis try-lock bug (#1366)
#issue_id: 1338

Co-authored-by: zhangwei <>
2021-12-25 19:20:53 +08:00
JiangYiJun
a67c118dcf go-zero tools ,fix a func,api new can not choose style (#1356) 2021-12-23 10:28:46 +08:00
Kevin Wan
cd289465fd chore: coding style and comments (#1361)
* chore: coding style and comments

* chore: optimize `ParseJsonBody` (#1353)

* chore: optimize `ParseJsonBody`

* chore: optimize `ParseJsonBody`

* fix: fix a test

* chore: optimize `ParseJsonBody`

* fix a test

* chore: add comment

* chore: refactor

Co-authored-by: chenquan <chenquan.dev@foxmail.com>
2021-12-22 21:43:37 +08:00
chenquan
263e426ae1 chore: optimize ParseJsonBody (#1353)
* chore: optimize `ParseJsonBody`

* chore: optimize `ParseJsonBody`

* fix: fix a test

* chore: optimize `ParseJsonBody`

* fix a test

* chore: add comment
2021-12-22 20:24:55 +08:00
charliecen
d5e493383a chose: cancel the assignment and judge later (#1359)
Co-authored-by: charliecen <chq@abierr.com>
2021-12-22 20:05:35 +08:00
Kevin Wan
6f1d27354a chore: put error message in error.log for verbose mode (#1355) 2021-12-21 11:36:01 +08:00
Kevin Wan
26101732d2 test: add more tests (#1352) 2021-12-20 22:42:36 +08:00
Kevin Wan
71d40e0c08 Revert "排除客户端中断导致的503错误 (#1343)" (#1351)
This reverts commit 2cdf5e7395.
2021-12-20 20:34:43 +08:00
Kevin Wan
4ba2ff7cdd feat: treat client closed requests as code 499 (#1350)
* feat: treat client closed requests as code 499

* chore: add comments
2021-12-20 19:43:38 +08:00
vic
2cdf5e7395 排除客户端中断导致的503错误 (#1343) 2021-12-20 19:43:13 +08:00
Kevin Wan
8315a55b3f Update FUNDING.yml
enable sponsorship.
2021-12-20 15:27:05 +08:00
Kevin Wan
d1c2a31af7 chore: add tests & refactor (#1346)
* chore: add tests & refactor

* chore: refactor
2021-12-18 23:11:38 +08:00
MarkJoyMa
3e6c217408 Feature: support adding custom cache to mongoc and sqlc (#1313)
* merge

* Feature: support adding custom cache to mongoc and sqlc
2021-12-18 22:45:07 +08:00
Kevin Wan
b299f350be chore: add comments (#1345) 2021-12-18 22:39:14 +08:00
Kevin Wan
8fd16c17dc chore: update goctl version to 1.2.5 (#1337) 2021-12-16 00:21:54 +08:00
anqiansong
5979b2aa0f Update template (#1335)
Co-authored-by: anqiansong <anqiansong@bytedance.com>
2021-12-15 23:24:32 +08:00
anqiansong
0b17e0e5d9 Feat goctl bug (#1332)
* Support goctl bug

* fix typo

* format code

Co-authored-by: anqiansong <anqiansong@bytedance.com>
2021-12-15 22:43:58 +08:00
Kevin Wan
3d8ad5e4f6 feat: tidy mod, update go-zero to latest (#1334) 2021-12-15 22:34:58 +08:00
Kevin Wan
ff1752dd39 feat: tidy mod, update go-zero to latest (#1333) 2021-12-15 22:23:06 +08:00
Kevin Wan
1becaeb7be chore: refactor (#1331) 2021-12-15 20:44:23 +08:00
yangkequn
171afaadb9 Update types.go (#1314) 2021-12-15 20:16:17 +08:00
Kevin Wan
776e6e647d feat: tidy mod, add go.mod for goctl (#1328) 2021-12-15 19:44:49 +08:00
Kevin Wan
4ccdf4ec72 chore: format code (#1327) 2021-12-15 13:43:05 +08:00
CrazyZard
a7bd993c0c commit missing method for redis (#1325)
* commit `decr ` `decrby` `lindex` missing method for redis

* fix(store_test):TestRedis_DecrBy

* add unit tests for redis commands. And put the functions in alphabetical order

* put the functions in alphabetical order

* add `lindex` unit test

* sort func
2021-12-15 13:15:39 +08:00
Kevin Wan
a290ff4486 docs: add go-zero users (#1323) 2021-12-14 13:37:49 +08:00
Kevin Wan
490ef13822 style: format code (#1322) 2021-12-14 11:29:44 +08:00
anqiansong
1b14de2ff9 fix: #1318 (#1321)
* fix #1318

* fix #1318

* remove never used code

* fix unit tes

Co-authored-by: anqiansong <anqiansong@bytedance.com>
2021-12-13 22:55:11 +08:00
Kevin Wan
914692cc82 fix #1309 (#1317) 2021-12-13 11:58:58 +08:00
anqiansong
07191dc430 fix #1305 (#1307)
Co-authored-by: anqiansong <anqiansong@bytedance.com>
2021-12-07 22:24:18 +08:00
BYT0723
af3fb2b04d fix: go issue 16206 (#1298) 2021-12-07 15:52:37 +08:00
Kevin Wan
0240fa131a chore: rename service context from ctx to svcCtx (#1299) 2021-12-05 22:10:47 +08:00
Kevin Wan
e96577dd38 docs: add go-zero users (#1294) 2021-12-03 22:32:35 +08:00
Kevin Wan
403dd7367a fix #1288 (#1292)
* fix #1288

* chore: make wrapup & shutdown callbacks run simulatenously
2021-12-02 22:41:57 +08:00
Kevin Wan
8086ad120b Revert "feat: reduce dependencies of framework by add go.mod in goctl (#1290)" (#1291)
This reverts commit 87a445689c.
2021-12-02 19:40:23 +08:00
Kevin Wan
87a445689c feat: reduce dependencies of framework by add go.mod in goctl (#1290) 2021-12-02 16:57:07 +08:00
Kevin Wan
b6bda54870 chore: update cli version (#1287) 2021-12-01 23:33:23 +08:00
423 changed files with 6765 additions and 2457 deletions

2
.github/FUNDING.yml vendored
View File

@@ -9,4 +9,4 @@ community_bridge: # Replace with a single Community Bridge project-name e.g., cl
liberapay: # Replace with a single Liberapay username
issuehunt: # Replace with a single IssueHunt username
otechie: # Replace with a single Otechie username
custom: # https://gitee.com/kevwan/static/raw/master/images/sponsor.jpg
custom: https://gitee.com/kevwan/static/raw/master/images/sponsor.jpg

View File

@@ -15,7 +15,7 @@ jobs:
- name: Set up Go 1.x
uses: actions/setup-go@v2
with:
go-version: ^1.14
go-version: ^1.15
id: go
- name: Check out code into the Go module directory

18
.github/workflows/issue-translator.yml vendored Normal file
View File

@@ -0,0 +1,18 @@
name: 'issue-translator'
on:
issue_comment:
types: [created]
issues:
types: [opened]
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: tomsun28/issues-translate-action@v2.6
with:
IS_MODIFY_TITLE: true
# not require, default false, . Decide whether to modify the issue title
# if true, the robot account @Issues-translate-bot must have modification permissions, invite @Issues-translate-bot to your project or use your custom bot.
CUSTOM_BOT_NOTE: Bot detected the issue body's language is not English, translate it automatically. 👯👭🏻🧑‍🤝‍🧑👫🧑🏿‍🤝‍🧑🏻👩🏾‍🤝‍👨🏿👬🏿
# not require. Customize the translation robot prefix message.

View File

@@ -40,7 +40,7 @@ We will help you to contribute in different areas like filing issues, developing
getting your work reviewed and merged.
If you have questions about the development process,
feel free to [file an issue](https://github.com/tal-tech/go-zero/issues/new/choose).
feel free to [file an issue](https://github.com/zeromicro/go-zero/issues/new/choose).
## Find something to work on
@@ -50,10 +50,10 @@ Here is how you get started.
### Find a good first topic
[go-zero](https://github.com/tal-tech/go-zero) has beginner-friendly issues that provide a good first issue.
For example, [go-zero](https://github.com/tal-tech/go-zero) has
[help wanted](https://github.com/tal-tech/go-zero/issues?q=is%3Aopen+is%3Aissue+label%3A%22help+wanted%22) and
[good first issue](https://github.com/tal-tech/go-zero/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22)
[go-zero](https://github.com/zeromicro/go-zero) has beginner-friendly issues that provide a good first issue.
For example, [go-zero](https://github.com/zeromicro/go-zero) has
[help wanted](https://github.com/zeromicro/go-zero/issues?q=is%3Aopen+is%3Aissue+label%3A%22help+wanted%22) and
[good first issue](https://github.com/zeromicro/go-zero/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22)
labels for issues that should not need deep knowledge of the system.
We can help new contributors who wish to work on such issues.
@@ -79,7 +79,7 @@ This is a rough outline of what a contributor's workflow looks like:
- Create a topic branch from where to base the contribution. This is usually master.
- Make commits of logical units.
- Push changes in a topic branch to a personal fork of the repository.
- Submit a pull request to [go-zero](https://github.com/tal-tech/go-zero).
- Submit a pull request to [go-zero](https://github.com/zeromicro/go-zero).
## Creating Pull Requests

View File

@@ -14,8 +14,10 @@ We hope that the items listed below will inspire further engagement from the com
## 2021 Q4
- [x] Support `username/password` authentication in ETCD
- [x] Support `SSL/TLS` in ETCD
- [x] Support `SSL/TLS` in `zRPC`
- [x] Support `TLS` in redis connections
- [x] Support `goctl bug` to report bugs conveniently
## 2022
- [ ] Support `goctl mock` command to start a mocking server with given `.api` file

View File

@@ -4,8 +4,8 @@ import (
"errors"
"strconv"
"github.com/tal-tech/go-zero/core/hash"
"github.com/tal-tech/go-zero/core/stores/redis"
"github.com/zeromicro/go-zero/core/hash"
"github.com/zeromicro/go-zero/core/stores/redis"
)
const (

View File

@@ -4,7 +4,7 @@ import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/tal-tech/go-zero/core/stores/redis/redistest"
"github.com/zeromicro/go-zero/core/stores/redis/redistest"
)
func TestRedisBitSet_New_Set_Test(t *testing.T) {

View File

@@ -6,11 +6,11 @@ import (
"strings"
"sync"
"github.com/tal-tech/go-zero/core/mathx"
"github.com/tal-tech/go-zero/core/proc"
"github.com/tal-tech/go-zero/core/stat"
"github.com/tal-tech/go-zero/core/stringx"
"github.com/tal-tech/go-zero/core/timex"
"github.com/zeromicro/go-zero/core/mathx"
"github.com/zeromicro/go-zero/core/proc"
"github.com/zeromicro/go-zero/core/stat"
"github.com/zeromicro/go-zero/core/stringx"
"github.com/zeromicro/go-zero/core/timex"
)
const (

View File

@@ -8,7 +8,7 @@ import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/tal-tech/go-zero/core/stat"
"github.com/zeromicro/go-zero/core/stat"
)
func init() {

View File

@@ -6,7 +6,7 @@ import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/tal-tech/go-zero/core/stat"
"github.com/zeromicro/go-zero/core/stat"
)
func init() {

View File

@@ -4,8 +4,8 @@ import (
"math"
"time"
"github.com/tal-tech/go-zero/core/collection"
"github.com/tal-tech/go-zero/core/mathx"
"github.com/zeromicro/go-zero/core/collection"
"github.com/zeromicro/go-zero/core/mathx"
)
const (

View File

@@ -7,9 +7,9 @@ import (
"time"
"github.com/stretchr/testify/assert"
"github.com/tal-tech/go-zero/core/collection"
"github.com/tal-tech/go-zero/core/mathx"
"github.com/tal-tech/go-zero/core/stat"
"github.com/zeromicro/go-zero/core/collection"
"github.com/zeromicro/go-zero/core/mathx"
"github.com/zeromicro/go-zero/core/stat"
)
const (

View File

@@ -8,8 +8,8 @@ import (
"time"
"github.com/stretchr/testify/assert"
"github.com/tal-tech/go-zero/core/iox"
"github.com/tal-tech/go-zero/core/lang"
"github.com/zeromicro/go-zero/core/iox"
"github.com/zeromicro/go-zero/core/lang"
)
func TestEnterToContinue(t *testing.T) {

View File

@@ -7,7 +7,7 @@ import (
"encoding/base64"
"errors"
"github.com/tal-tech/go-zero/core/logx"
"github.com/zeromicro/go-zero/core/logx"
)
// ErrPaddingSize indicates bad padding size.

View File

@@ -5,7 +5,7 @@ import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/tal-tech/go-zero/core/fs"
"github.com/zeromicro/go-zero/core/fs"
)
const (

View File

@@ -6,9 +6,9 @@ import (
"sync/atomic"
"time"
"github.com/tal-tech/go-zero/core/logx"
"github.com/tal-tech/go-zero/core/mathx"
"github.com/tal-tech/go-zero/core/syncx"
"github.com/zeromicro/go-zero/core/logx"
"github.com/zeromicro/go-zero/core/mathx"
"github.com/zeromicro/go-zero/core/syncx"
)
const (

View File

@@ -4,7 +4,7 @@ import (
"sync"
"time"
"github.com/tal-tech/go-zero/core/timex"
"github.com/zeromicro/go-zero/core/timex"
)
type (

View File

@@ -6,7 +6,7 @@ import (
"time"
"github.com/stretchr/testify/assert"
"github.com/tal-tech/go-zero/core/stringx"
"github.com/zeromicro/go-zero/core/stringx"
)
const duration = time.Millisecond * 50

View File

@@ -4,7 +4,7 @@ import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/tal-tech/go-zero/core/stringx"
"github.com/zeromicro/go-zero/core/stringx"
)
func TestSafeMap(t *testing.T) {

View File

@@ -1,8 +1,8 @@
package collection
import (
"github.com/tal-tech/go-zero/core/lang"
"github.com/tal-tech/go-zero/core/logx"
"github.com/zeromicro/go-zero/core/lang"
"github.com/zeromicro/go-zero/core/logx"
)
const (

View File

@@ -5,7 +5,7 @@ import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/tal-tech/go-zero/core/logx"
"github.com/zeromicro/go-zero/core/logx"
)
func init() {

View File

@@ -5,9 +5,9 @@ import (
"fmt"
"time"
"github.com/tal-tech/go-zero/core/lang"
"github.com/tal-tech/go-zero/core/threading"
"github.com/tal-tech/go-zero/core/timex"
"github.com/zeromicro/go-zero/core/lang"
"github.com/zeromicro/go-zero/core/threading"
"github.com/zeromicro/go-zero/core/timex"
)
const drainWorkers = 8

View File

@@ -8,10 +8,10 @@ import (
"time"
"github.com/stretchr/testify/assert"
"github.com/tal-tech/go-zero/core/lang"
"github.com/tal-tech/go-zero/core/stringx"
"github.com/tal-tech/go-zero/core/syncx"
"github.com/tal-tech/go-zero/core/timex"
"github.com/zeromicro/go-zero/core/lang"
"github.com/zeromicro/go-zero/core/stringx"
"github.com/zeromicro/go-zero/core/syncx"
"github.com/zeromicro/go-zero/core/timex"
)
const (

View File

@@ -7,7 +7,7 @@ import (
"os"
"path"
"github.com/tal-tech/go-zero/core/mapping"
"github.com/zeromicro/go-zero/core/mapping"
)
var loaders = map[string]func([]byte, interface{}) error{

View File

@@ -6,8 +6,8 @@ import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/tal-tech/go-zero/core/fs"
"github.com/tal-tech/go-zero/core/hash"
"github.com/zeromicro/go-zero/core/fs"
"github.com/zeromicro/go-zero/core/hash"
)
func TestLoadConfig_notExists(t *testing.T) {

View File

@@ -7,7 +7,7 @@ import (
"strings"
"sync"
"github.com/tal-tech/go-zero/core/iox"
"github.com/zeromicro/go-zero/core/iox"
)
// PropertyError represents a configuration error message.

View File

@@ -5,7 +5,7 @@ import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/tal-tech/go-zero/core/fs"
"github.com/zeromicro/go-zero/core/fs"
)
func TestProperties(t *testing.T) {

View File

@@ -3,7 +3,7 @@ package contextx
import (
"context"
"github.com/tal-tech/go-zero/core/mapping"
"github.com/zeromicro/go-zero/core/mapping"
)
const contextTagKey = "ctx"

View File

@@ -1,7 +1,14 @@
package discov
import "github.com/tal-tech/go-zero/core/discov/internal"
import "github.com/zeromicro/go-zero/core/discov/internal"
// RegisterAccount registers the username/password to the given etcd cluster.
func RegisterAccount(endpoints []string, user, pass string) {
internal.AddAccount(endpoints, user, pass)
}
// RegisterTLS registers the CertFile/CertKeyFile/CACertFile to the given etcd.
func RegisterTLS(endpoints []string, certFile, certKeyFile, caFile string,
insecureSkipVerify bool) error {
return internal.AddTLS(endpoints, certFile, certKeyFile, caFile, insecureSkipVerify)
}

View File

@@ -4,8 +4,8 @@ import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/tal-tech/go-zero/core/discov/internal"
"github.com/tal-tech/go-zero/core/stringx"
"github.com/zeromicro/go-zero/core/discov/internal"
"github.com/zeromicro/go-zero/core/stringx"
)
func TestRegisterAccount(t *testing.T) {

View File

@@ -4,7 +4,7 @@ import (
"fmt"
"strings"
"github.com/tal-tech/go-zero/core/discov/internal"
"github.com/zeromicro/go-zero/core/discov/internal"
)
const (

View File

@@ -5,7 +5,7 @@ import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/tal-tech/go-zero/core/discov/internal"
"github.com/zeromicro/go-zero/core/discov/internal"
)
var mockLock sync.Mutex

View File

@@ -4,10 +4,14 @@ import "errors"
// EtcdConf is the config item with the given key on etcd.
type EtcdConf struct {
Hosts []string
Key string
User string `json:",optional"`
Pass string `json:",optional"`
Hosts []string
Key string
User string `json:",optional"`
Pass string `json:",optional"`
CertFile string `json:",optional"`
CertKeyFile string `json:",optional=CertFile"`
CACertFile string `json:",optional=CertFile"`
InsecureSkipVerify bool `json:",optional"`
}
// HasAccount returns if account provided.
@@ -15,6 +19,11 @@ func (c EtcdConf) HasAccount() bool {
return len(c.User) > 0 && len(c.Pass) > 0
}
// HasTLS returns if TLS CertFile/CertKeyFile/CACertFile are provided.
func (c EtcdConf) HasTLS() bool {
return len(c.CertFile) > 0 && len(c.CertKeyFile) > 0 && len(c.CACertFile) > 0
}
// Validate validates c.
func (c EtcdConf) Validate() error {
if len(c.Hosts) == 0 {

View File

@@ -1,17 +1,25 @@
package internal
import "sync"
import (
"crypto/tls"
"crypto/x509"
"io/ioutil"
"sync"
)
var (
accounts = make(map[string]Account)
tlsConfigs = make(map[string]*tls.Config)
lock sync.RWMutex
)
// Account holds the username/password for an etcd cluster.
type Account struct {
User string
Pass string
}
var (
accounts = make(map[string]Account)
lock sync.RWMutex
)
// AddAccount adds the username/password for the given etcd cluster.
func AddAccount(endpoints []string, user, pass string) {
lock.Lock()
defer lock.Unlock()
@@ -22,6 +30,33 @@ func AddAccount(endpoints []string, user, pass string) {
}
}
// AddTLS adds the tls cert files for the given etcd cluster.
func AddTLS(endpoints []string, certFile, certKeyFile, caFile string, insecureSkipVerify bool) error {
cert, err := tls.LoadX509KeyPair(certFile, certKeyFile)
if err != nil {
return err
}
caData, err := ioutil.ReadFile(caFile)
if err != nil {
return err
}
pool := x509.NewCertPool()
pool.AppendCertsFromPEM(caData)
lock.Lock()
defer lock.Unlock()
tlsConfigs[getClusterKey(endpoints)] = &tls.Config{
Certificates: []tls.Certificate{cert},
RootCAs: pool,
InsecureSkipVerify: insecureSkipVerify,
}
return nil
}
// GetAccount gets the username/password for the given etcd cluster.
func GetAccount(endpoints []string) (Account, bool) {
lock.RLock()
defer lock.RUnlock()
@@ -29,3 +64,12 @@ func GetAccount(endpoints []string) (Account, bool) {
account, ok := accounts[getClusterKey(endpoints)]
return account, ok
}
// GetTLS gets the tls config for the given etcd cluster.
func GetTLS(endpoints []string) (*tls.Config, bool) {
lock.RLock()
defer lock.RUnlock()
cfg, ok := tlsConfigs[getClusterKey(endpoints)]
return cfg, ok
}

View File

@@ -4,7 +4,7 @@ import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/tal-tech/go-zero/core/stringx"
"github.com/zeromicro/go-zero/core/stringx"
)
func TestAccount(t *testing.T) {

View File

@@ -9,11 +9,11 @@ import (
"sync"
"time"
"github.com/tal-tech/go-zero/core/contextx"
"github.com/tal-tech/go-zero/core/lang"
"github.com/tal-tech/go-zero/core/logx"
"github.com/tal-tech/go-zero/core/syncx"
"github.com/tal-tech/go-zero/core/threading"
"github.com/zeromicro/go-zero/core/contextx"
"github.com/zeromicro/go-zero/core/lang"
"github.com/zeromicro/go-zero/core/logx"
"github.com/zeromicro/go-zero/core/syncx"
"github.com/zeromicro/go-zero/core/threading"
clientv3 "go.etcd.io/etcd/client/v3"
)
@@ -337,6 +337,9 @@ func DialClient(endpoints []string) (EtcdClient, error) {
cfg.Username = account.User
cfg.Password = account.Pass
}
if tlsCfg, ok := GetTLS(endpoints); ok {
cfg.TLS = tlsCfg
}
return clientv3.New(cfg)
}

View File

@@ -7,10 +7,10 @@ import (
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
"github.com/tal-tech/go-zero/core/contextx"
"github.com/tal-tech/go-zero/core/lang"
"github.com/tal-tech/go-zero/core/logx"
"github.com/tal-tech/go-zero/core/stringx"
"github.com/zeromicro/go-zero/core/contextx"
"github.com/zeromicro/go-zero/core/lang"
"github.com/zeromicro/go-zero/core/logx"
"github.com/zeromicro/go-zero/core/stringx"
"go.etcd.io/etcd/api/v3/mvccpb"
clientv3 "go.etcd.io/etcd/client/v3"
)

View File

@@ -1,12 +1,12 @@
package discov
import (
"github.com/tal-tech/go-zero/core/discov/internal"
"github.com/tal-tech/go-zero/core/lang"
"github.com/tal-tech/go-zero/core/logx"
"github.com/tal-tech/go-zero/core/proc"
"github.com/tal-tech/go-zero/core/syncx"
"github.com/tal-tech/go-zero/core/threading"
"github.com/zeromicro/go-zero/core/discov/internal"
"github.com/zeromicro/go-zero/core/lang"
"github.com/zeromicro/go-zero/core/logx"
"github.com/zeromicro/go-zero/core/proc"
"github.com/zeromicro/go-zero/core/syncx"
"github.com/zeromicro/go-zero/core/threading"
clientv3 "go.etcd.io/etcd/client/v3"
)
@@ -145,16 +145,23 @@ func (p *Publisher) revoke(cli internal.EtcdClient) {
}
}
// WithPubEtcdAccount provides the etcd username/password.
func WithPubEtcdAccount(user, pass string) PubOption {
return func(pub *Publisher) {
internal.AddAccount(pub.endpoints, user, pass)
}
}
// WithId customizes a Publisher with the id.
func WithId(id int64) PubOption {
return func(publisher *Publisher) {
publisher.id = id
}
}
// WithPubEtcdAccount provides the etcd username/password.
func WithPubEtcdAccount(user, pass string) PubOption {
return func(pub *Publisher) {
RegisterAccount(pub.endpoints, user, pass)
}
}
// WithPubEtcdTLS provides the etcd CertFile/CertKeyFile/CACertFile.
func WithPubEtcdTLS(certFile, certKeyFile, caFile string, insecureSkipVerify bool) PubOption {
return func(pub *Publisher) {
logx.Must(RegisterTLS(pub.endpoints, certFile, certKeyFile, caFile, insecureSkipVerify))
}
}

View File

@@ -8,10 +8,10 @@ import (
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
"github.com/tal-tech/go-zero/core/discov/internal"
"github.com/tal-tech/go-zero/core/lang"
"github.com/tal-tech/go-zero/core/logx"
"github.com/tal-tech/go-zero/core/stringx"
"github.com/zeromicro/go-zero/core/discov/internal"
"github.com/zeromicro/go-zero/core/lang"
"github.com/zeromicro/go-zero/core/logx"
"github.com/zeromicro/go-zero/core/stringx"
clientv3 "go.etcd.io/etcd/client/v3"
)

View File

@@ -4,8 +4,9 @@ import (
"sync"
"sync/atomic"
"github.com/tal-tech/go-zero/core/discov/internal"
"github.com/tal-tech/go-zero/core/syncx"
"github.com/zeromicro/go-zero/core/discov/internal"
"github.com/zeromicro/go-zero/core/logx"
"github.com/zeromicro/go-zero/core/syncx"
)
type (
@@ -58,9 +59,17 @@ func Exclusive() SubOption {
}
}
// WithSubEtcdAccount provides the etcd username/password.
func WithSubEtcdAccount(user, pass string) SubOption {
return func(sub *Subscriber) {
internal.AddAccount(sub.endpoints, user, pass)
RegisterAccount(sub.endpoints, user, pass)
}
}
// WithSubEtcdTLS provides the etcd CertFile/CertKeyFile/CACertFile.
func WithSubEtcdTLS(certFile, certKeyFile, caFile string, insecureSkipVerify bool) SubOption {
return func(sub *Subscriber) {
logx.Must(RegisterTLS(sub.endpoints, certFile, certKeyFile, caFile, insecureSkipVerify))
}
}

View File

@@ -5,8 +5,8 @@ import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/tal-tech/go-zero/core/discov/internal"
"github.com/tal-tech/go-zero/core/stringx"
"github.com/zeromicro/go-zero/core/discov/internal"
"github.com/zeromicro/go-zero/core/stringx"
)
const (

View File

@@ -4,7 +4,7 @@ import (
"sync"
"time"
"github.com/tal-tech/go-zero/core/threading"
"github.com/zeromicro/go-zero/core/threading"
)
// A DelayExecutor delays a tasks on given delay interval.

View File

@@ -3,8 +3,8 @@ package executors
import (
"time"
"github.com/tal-tech/go-zero/core/syncx"
"github.com/tal-tech/go-zero/core/timex"
"github.com/zeromicro/go-zero/core/syncx"
"github.com/zeromicro/go-zero/core/timex"
)
// A LessExecutor is an executor to limit execution once within given time interval.

View File

@@ -5,7 +5,7 @@ import (
"time"
"github.com/stretchr/testify/assert"
"github.com/tal-tech/go-zero/core/timex"
"github.com/zeromicro/go-zero/core/timex"
)
func TestLessExecutor_DoOrDiscard(t *testing.T) {

View File

@@ -6,11 +6,11 @@ import (
"sync/atomic"
"time"
"github.com/tal-tech/go-zero/core/lang"
"github.com/tal-tech/go-zero/core/proc"
"github.com/tal-tech/go-zero/core/syncx"
"github.com/tal-tech/go-zero/core/threading"
"github.com/tal-tech/go-zero/core/timex"
"github.com/zeromicro/go-zero/core/lang"
"github.com/zeromicro/go-zero/core/proc"
"github.com/zeromicro/go-zero/core/syncx"
"github.com/zeromicro/go-zero/core/threading"
"github.com/zeromicro/go-zero/core/timex"
)
const idleRound = 10

View File

@@ -8,7 +8,7 @@ import (
"time"
"github.com/stretchr/testify/assert"
"github.com/tal-tech/go-zero/core/timex"
"github.com/zeromicro/go-zero/core/timex"
)
const threshold = 10

View File

@@ -5,7 +5,7 @@ import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/tal-tech/go-zero/core/fs"
"github.com/zeromicro/go-zero/core/fs"
)
const (

View File

@@ -5,7 +5,7 @@ import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/tal-tech/go-zero/core/fs"
"github.com/zeromicro/go-zero/core/fs"
)
func TestSplitLineChunks(t *testing.T) {

View File

@@ -5,7 +5,7 @@ import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/tal-tech/go-zero/core/fs"
"github.com/zeromicro/go-zero/core/fs"
)
func TestRangeReader(t *testing.T) {

View File

@@ -4,7 +4,7 @@ import (
"io/ioutil"
"os"
"github.com/tal-tech/go-zero/core/hash"
"github.com/zeromicro/go-zero/core/hash"
)
// TempFileWithText creates the temporary file with the given content,

View File

@@ -1,6 +1,6 @@
package fx
import "github.com/tal-tech/go-zero/core/threading"
import "github.com/zeromicro/go-zero/core/threading"
// Parallel runs fns parallelly and waits for done.
func Parallel(fns ...func()) {

View File

@@ -1,6 +1,6 @@
package fx
import "github.com/tal-tech/go-zero/core/errorx"
import "github.com/zeromicro/go-zero/core/errorx"
const defaultRetryTimes = 3

View File

@@ -4,9 +4,9 @@ import (
"sort"
"sync"
"github.com/tal-tech/go-zero/core/collection"
"github.com/tal-tech/go-zero/core/lang"
"github.com/tal-tech/go-zero/core/threading"
"github.com/zeromicro/go-zero/core/collection"
"github.com/zeromicro/go-zero/core/lang"
"github.com/zeromicro/go-zero/core/threading"
)
const (
@@ -90,6 +90,8 @@ func Range(source <-chan interface{}) Stream {
func (s Stream) AllMach(predicate func(item interface{}) bool) bool {
for item := range s.source {
if !predicate(item) {
// make sure the former goroutine not block, and current func returns fast.
go drain(s.source)
return false
}
}
@@ -103,6 +105,8 @@ func (s Stream) AllMach(predicate func(item interface{}) bool) bool {
func (s Stream) AnyMach(predicate func(item interface{}) bool) bool {
for item := range s.source {
if predicate(item) {
// make sure the former goroutine not block, and current func returns fast.
go drain(s.source)
return true
}
}
@@ -186,8 +190,7 @@ func (s Stream) Distinct(fn KeyFunc) Stream {
// Done waits all upstreaming operations to be done.
func (s Stream) Done() {
for range s.source {
}
drain(s.source)
}
// Filter filters the items by the given FilterFunc.
@@ -199,9 +202,22 @@ func (s Stream) Filter(fn FilterFunc, opts ...Option) Stream {
}, opts...)
}
// First returns the first item, nil if no items.
func (s Stream) First() interface{} {
for item := range s.source {
// make sure the former goroutine not block, and current func returns fast.
go drain(s.source)
return item
}
return nil
}
// ForAll handles the streaming elements from the source and no later streams.
func (s Stream) ForAll(fn ForAllFunc) {
fn(s.source)
// avoid goroutine leak on fn not consuming all items.
go drain(s.source)
}
// ForEach seals the Stream with the ForEachFunc on each item, no successive operations.
@@ -246,11 +262,14 @@ func (s Stream) Head(n int64) Stream {
}
if n == 0 {
// let successive method go ASAP even we have more items to skip
// why we don't just break the loop, because if break,
// this former goroutine will block forever, which will cause goroutine leak.
close(source)
// why we don't just break the loop, and drain to consume all items.
// because if breaks, this former goroutine will block forever,
// which will cause goroutine leak.
drain(s.source)
}
}
// not enough items in s.source, but we need to let successive method to go ASAP.
if n > 0 {
close(source)
}
@@ -259,6 +278,13 @@ func (s Stream) Head(n int64) Stream {
return Range(source)
}
// Last returns the last item, or nil if no items.
func (s Stream) Last() (item interface{}) {
for item = range s.source {
}
return
}
// Map converts each item to another corresponding item, which means it's a 1:1 model.
func (s Stream) Map(fn MapFunc, opts ...Option) Stream {
return s.Walk(func(item interface{}, pipe chan<- interface{}) {
@@ -280,6 +306,21 @@ func (s Stream) Merge() Stream {
return Range(source)
}
// NoneMatch returns whether all elements of this stream don't match the provided predicate.
// May not evaluate the predicate on all elements if not necessary for determining the result.
// If the stream is empty then true is returned and the predicate is not evaluated.
func (s Stream) NoneMatch(predicate func(item interface{}) bool) bool {
for item := range s.source {
if predicate(item) {
// make sure the former goroutine not block, and current func returns fast.
go drain(s.source)
return false
}
}
return true
}
// Parallel applies the given ParallelFunc to each item concurrently with given number of workers.
func (s Stream) Parallel(fn ParallelFunc, opts ...Option) {
s.Walk(func(item interface{}, pipe chan<- interface{}) {
@@ -411,15 +452,12 @@ func (s Stream) walkLimited(fn WalkFunc, option *rxOptions) Stream {
var wg sync.WaitGroup
pool := make(chan lang.PlaceholderType, option.workers)
for {
for item := range s.source {
// important, used in another goroutine
val := item
pool <- lang.Placeholder
item, ok := <-s.source
if !ok {
<-pool
break
}
wg.Add(1)
// better to safely run caller defined method
threading.GoSafe(func() {
defer func() {
@@ -427,7 +465,7 @@ func (s Stream) walkLimited(fn WalkFunc, option *rxOptions) Stream {
<-pool
}()
fn(item, pipe)
fn(val, pipe)
})
}
@@ -439,22 +477,19 @@ func (s Stream) walkLimited(fn WalkFunc, option *rxOptions) Stream {
}
func (s Stream) walkUnlimited(fn WalkFunc, option *rxOptions) Stream {
pipe := make(chan interface{}, defaultWorkers)
pipe := make(chan interface{}, option.workers)
go func() {
var wg sync.WaitGroup
for {
item, ok := <-s.source
if !ok {
break
}
for item := range s.source {
// important, used in another goroutine
val := item
wg.Add(1)
// better to safely run caller defined method
threading.GoSafe(func() {
defer wg.Done()
fn(item, pipe)
fn(val, pipe)
})
}
@@ -465,14 +500,14 @@ func (s Stream) walkUnlimited(fn WalkFunc, option *rxOptions) Stream {
return Range(pipe)
}
// UnlimitedWorkers lets the caller to use as many workers as the tasks.
// UnlimitedWorkers lets the caller use as many workers as the tasks.
func UnlimitedWorkers() Option {
return func(opts *rxOptions) {
opts.unlimitedWorkers = true
}
}
// WithWorkers lets the caller to customize the concurrent workers.
// WithWorkers lets the caller customize the concurrent workers.
func WithWorkers(workers int) Option {
return func(opts *rxOptions) {
if workers < minWorkers {
@@ -483,6 +518,7 @@ func WithWorkers(workers int) Option {
}
}
// buildOptions returns a rxOptions with given customizations.
func buildOptions(opts ...Option) *rxOptions {
options := newOptions()
for _, opt := range opts {
@@ -492,6 +528,13 @@ func buildOptions(opts ...Option) *rxOptions {
return options
}
// drain drains the given channel.
func drain(channel <-chan interface{}) {
for range channel {
}
}
// newOptions returns a default rxOptions.
func newOptions() *rxOptions {
return &rxOptions{
workers: defaultWorkers,

View File

@@ -13,324 +13,494 @@ import (
"time"
"github.com/stretchr/testify/assert"
"github.com/tal-tech/go-zero/core/stringx"
"github.com/zeromicro/go-zero/core/stringx"
"go.uber.org/goleak"
)
func TestBuffer(t *testing.T) {
const N = 5
var count int32
var wait sync.WaitGroup
wait.Add(1)
From(func(source chan<- interface{}) {
ticker := time.NewTicker(10 * time.Millisecond)
defer ticker.Stop()
runCheckedTest(t, func(t *testing.T) {
const N = 5
var count int32
var wait sync.WaitGroup
wait.Add(1)
From(func(source chan<- interface{}) {
ticker := time.NewTicker(10 * time.Millisecond)
defer ticker.Stop()
for i := 0; i < 2*N; i++ {
select {
case source <- i:
atomic.AddInt32(&count, 1)
case <-ticker.C:
wait.Done()
return
for i := 0; i < 2*N; i++ {
select {
case source <- i:
atomic.AddInt32(&count, 1)
case <-ticker.C:
wait.Done()
return
}
}
}
}).Buffer(N).ForAll(func(pipe <-chan interface{}) {
wait.Wait()
// why N+1, because take one more to wait for sending into the channel
assert.Equal(t, int32(N+1), atomic.LoadInt32(&count))
}).Buffer(N).ForAll(func(pipe <-chan interface{}) {
wait.Wait()
// why N+1, because take one more to wait for sending into the channel
assert.Equal(t, int32(N+1), atomic.LoadInt32(&count))
})
})
}
func TestBufferNegative(t *testing.T) {
var result int
Just(1, 2, 3, 4).Buffer(-1).Reduce(func(pipe <-chan interface{}) (interface{}, error) {
for item := range pipe {
result += item.(int)
}
return result, nil
runCheckedTest(t, func(t *testing.T) {
var result int
Just(1, 2, 3, 4).Buffer(-1).Reduce(func(pipe <-chan interface{}) (interface{}, error) {
for item := range pipe {
result += item.(int)
}
return result, nil
})
assert.Equal(t, 10, result)
})
assert.Equal(t, 10, result)
}
func TestCount(t *testing.T) {
tests := []struct {
name string
elements []interface{}
}{
{
name: "no elements with nil",
},
{
name: "no elements",
elements: []interface{}{},
},
{
name: "1 element",
elements: []interface{}{1},
},
{
name: "multiple elements",
elements: []interface{}{1, 2, 3},
},
}
runCheckedTest(t, func(t *testing.T) {
tests := []struct {
name string
elements []interface{}
}{
{
name: "no elements with nil",
},
{
name: "no elements",
elements: []interface{}{},
},
{
name: "1 element",
elements: []interface{}{1},
},
{
name: "multiple elements",
elements: []interface{}{1, 2, 3},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
val := Just(test.elements...).Count()
assert.Equal(t, len(test.elements), val)
})
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
val := Just(test.elements...).Count()
assert.Equal(t, len(test.elements), val)
})
}
})
}
func TestDone(t *testing.T) {
var count int32
Just(1, 2, 3).Walk(func(item interface{}, pipe chan<- interface{}) {
time.Sleep(time.Millisecond * 100)
atomic.AddInt32(&count, int32(item.(int)))
}).Done()
assert.Equal(t, int32(6), count)
runCheckedTest(t, func(t *testing.T) {
var count int32
Just(1, 2, 3).Walk(func(item interface{}, pipe chan<- interface{}) {
time.Sleep(time.Millisecond * 100)
atomic.AddInt32(&count, int32(item.(int)))
}).Done()
assert.Equal(t, int32(6), count)
})
}
func TestJust(t *testing.T) {
var result int
Just(1, 2, 3, 4).Reduce(func(pipe <-chan interface{}) (interface{}, error) {
for item := range pipe {
result += item.(int)
}
return result, nil
runCheckedTest(t, func(t *testing.T) {
var result int
Just(1, 2, 3, 4).Reduce(func(pipe <-chan interface{}) (interface{}, error) {
for item := range pipe {
result += item.(int)
}
return result, nil
})
assert.Equal(t, 10, result)
})
assert.Equal(t, 10, result)
}
func TestDistinct(t *testing.T) {
var result int
Just(4, 1, 3, 2, 3, 4).Distinct(func(item interface{}) interface{} {
return item
}).Reduce(func(pipe <-chan interface{}) (interface{}, error) {
for item := range pipe {
result += item.(int)
}
return result, nil
runCheckedTest(t, func(t *testing.T) {
var result int
Just(4, 1, 3, 2, 3, 4).Distinct(func(item interface{}) interface{} {
return item
}).Reduce(func(pipe <-chan interface{}) (interface{}, error) {
for item := range pipe {
result += item.(int)
}
return result, nil
})
assert.Equal(t, 10, result)
})
assert.Equal(t, 10, result)
}
func TestFilter(t *testing.T) {
var result int
Just(1, 2, 3, 4).Filter(func(item interface{}) bool {
return item.(int)%2 == 0
}).Reduce(func(pipe <-chan interface{}) (interface{}, error) {
for item := range pipe {
result += item.(int)
}
return result, nil
runCheckedTest(t, func(t *testing.T) {
var result int
Just(1, 2, 3, 4).Filter(func(item interface{}) bool {
return item.(int)%2 == 0
}).Reduce(func(pipe <-chan interface{}) (interface{}, error) {
for item := range pipe {
result += item.(int)
}
return result, nil
})
assert.Equal(t, 6, result)
})
}
func TestFirst(t *testing.T) {
runCheckedTest(t, func(t *testing.T) {
assert.Nil(t, Just().First())
assert.Equal(t, "foo", Just("foo").First())
assert.Equal(t, "foo", Just("foo", "bar").First())
})
assert.Equal(t, 6, result)
}
func TestForAll(t *testing.T) {
var result int
Just(1, 2, 3, 4).Filter(func(item interface{}) bool {
return item.(int)%2 == 0
}).ForAll(func(pipe <-chan interface{}) {
for item := range pipe {
result += item.(int)
}
runCheckedTest(t, func(t *testing.T) {
var result int
Just(1, 2, 3, 4).Filter(func(item interface{}) bool {
return item.(int)%2 == 0
}).ForAll(func(pipe <-chan interface{}) {
for item := range pipe {
result += item.(int)
}
})
assert.Equal(t, 6, result)
})
assert.Equal(t, 6, result)
}
func TestGroup(t *testing.T) {
var groups [][]int
Just(10, 11, 20, 21).Group(func(item interface{}) interface{} {
v := item.(int)
return v / 10
}).ForEach(func(item interface{}) {
v := item.([]interface{})
var group []int
for _, each := range v {
group = append(group, each.(int))
}
groups = append(groups, group)
})
runCheckedTest(t, func(t *testing.T) {
var groups [][]int
Just(10, 11, 20, 21).Group(func(item interface{}) interface{} {
v := item.(int)
return v / 10
}).ForEach(func(item interface{}) {
v := item.([]interface{})
var group []int
for _, each := range v {
group = append(group, each.(int))
}
groups = append(groups, group)
})
assert.Equal(t, 2, len(groups))
for _, group := range groups {
assert.Equal(t, 2, len(group))
assert.True(t, group[0]/10 == group[1]/10)
}
assert.Equal(t, 2, len(groups))
for _, group := range groups {
assert.Equal(t, 2, len(group))
assert.True(t, group[0]/10 == group[1]/10)
}
})
}
func TestHead(t *testing.T) {
var result int
Just(1, 2, 3, 4).Head(2).Reduce(func(pipe <-chan interface{}) (interface{}, error) {
for item := range pipe {
result += item.(int)
}
return result, nil
runCheckedTest(t, func(t *testing.T) {
var result int
Just(1, 2, 3, 4).Head(2).Reduce(func(pipe <-chan interface{}) (interface{}, error) {
for item := range pipe {
result += item.(int)
}
return result, nil
})
assert.Equal(t, 3, result)
})
assert.Equal(t, 3, result)
}
func TestHeadZero(t *testing.T) {
assert.Panics(t, func() {
Just(1, 2, 3, 4).Head(0).Reduce(func(pipe <-chan interface{}) (interface{}, error) {
return nil, nil
runCheckedTest(t, func(t *testing.T) {
assert.Panics(t, func() {
Just(1, 2, 3, 4).Head(0).Reduce(func(pipe <-chan interface{}) (interface{}, error) {
return nil, nil
})
})
})
}
func TestHeadMore(t *testing.T) {
var result int
Just(1, 2, 3, 4).Head(6).Reduce(func(pipe <-chan interface{}) (interface{}, error) {
for item := range pipe {
result += item.(int)
}
return result, nil
runCheckedTest(t, func(t *testing.T) {
var result int
Just(1, 2, 3, 4).Head(6).Reduce(func(pipe <-chan interface{}) (interface{}, error) {
for item := range pipe {
result += item.(int)
}
return result, nil
})
assert.Equal(t, 10, result)
})
}
func TestLast(t *testing.T) {
runCheckedTest(t, func(t *testing.T) {
goroutines := runtime.NumGoroutine()
assert.Nil(t, Just().Last())
assert.Equal(t, "foo", Just("foo").Last())
assert.Equal(t, "bar", Just("foo", "bar").Last())
// let scheduler schedule first
runtime.Gosched()
assert.Equal(t, goroutines, runtime.NumGoroutine())
})
assert.Equal(t, 10, result)
}
func TestMap(t *testing.T) {
log.SetOutput(ioutil.Discard)
runCheckedTest(t, func(t *testing.T) {
log.SetOutput(ioutil.Discard)
tests := []struct {
mapper MapFunc
expect int
}{
{
mapper: func(item interface{}) interface{} {
v := item.(int)
return v * v
tests := []struct {
mapper MapFunc
expect int
}{
{
mapper: func(item interface{}) interface{} {
v := item.(int)
return v * v
},
expect: 30,
},
expect: 30,
},
{
mapper: func(item interface{}) interface{} {
v := item.(int)
if v%2 == 0 {
return 0
}
return v * v
},
expect: 10,
},
{
mapper: func(item interface{}) interface{} {
v := item.(int)
if v%2 == 0 {
panic(v)
}
return v * v
},
expect: 10,
},
}
// Map(...) works even WithWorkers(0)
for i, test := range tests {
t.Run(stringx.Rand(), func(t *testing.T) {
var result int
var workers int
if i%2 == 0 {
workers = 0
} else {
workers = runtime.NumCPU()
}
From(func(source chan<- interface{}) {
for i := 1; i < 5; i++ {
source <- i
}
}).Map(test.mapper, WithWorkers(workers)).Reduce(
func(pipe <-chan interface{}) (interface{}, error) {
for item := range pipe {
result += item.(int)
{
mapper: func(item interface{}) interface{} {
v := item.(int)
if v%2 == 0 {
return 0
}
return result, nil
})
return v * v
},
expect: 10,
},
{
mapper: func(item interface{}) interface{} {
v := item.(int)
if v%2 == 0 {
panic(v)
}
return v * v
},
expect: 10,
},
}
assert.Equal(t, test.expect, result)
})
}
// Map(...) works even WithWorkers(0)
for i, test := range tests {
t.Run(stringx.Rand(), func(t *testing.T) {
var result int
var workers int
if i%2 == 0 {
workers = 0
} else {
workers = runtime.NumCPU()
}
From(func(source chan<- interface{}) {
for i := 1; i < 5; i++ {
source <- i
}
}).Map(test.mapper, WithWorkers(workers)).Reduce(
func(pipe <-chan interface{}) (interface{}, error) {
for item := range pipe {
result += item.(int)
}
return result, nil
})
assert.Equal(t, test.expect, result)
})
}
})
}
func TestMerge(t *testing.T) {
Just(1, 2, 3, 4).Merge().ForEach(func(item interface{}) {
assert.ElementsMatch(t, []interface{}{1, 2, 3, 4}, item.([]interface{}))
runCheckedTest(t, func(t *testing.T) {
Just(1, 2, 3, 4).Merge().ForEach(func(item interface{}) {
assert.ElementsMatch(t, []interface{}{1, 2, 3, 4}, item.([]interface{}))
})
})
}
func TestParallelJust(t *testing.T) {
var count int32
Just(1, 2, 3).Parallel(func(item interface{}) {
time.Sleep(time.Millisecond * 100)
atomic.AddInt32(&count, int32(item.(int)))
}, UnlimitedWorkers())
assert.Equal(t, int32(6), count)
runCheckedTest(t, func(t *testing.T) {
var count int32
Just(1, 2, 3).Parallel(func(item interface{}) {
time.Sleep(time.Millisecond * 100)
atomic.AddInt32(&count, int32(item.(int)))
}, UnlimitedWorkers())
assert.Equal(t, int32(6), count)
})
}
func TestReverse(t *testing.T) {
Just(1, 2, 3, 4).Reverse().Merge().ForEach(func(item interface{}) {
assert.ElementsMatch(t, []interface{}{4, 3, 2, 1}, item.([]interface{}))
runCheckedTest(t, func(t *testing.T) {
Just(1, 2, 3, 4).Reverse().Merge().ForEach(func(item interface{}) {
assert.ElementsMatch(t, []interface{}{4, 3, 2, 1}, item.([]interface{}))
})
})
}
func TestSort(t *testing.T) {
var prev int
Just(5, 3, 7, 1, 9, 6, 4, 8, 2).Sort(func(a, b interface{}) bool {
return a.(int) < b.(int)
}).ForEach(func(item interface{}) {
next := item.(int)
assert.True(t, prev < next)
prev = next
runCheckedTest(t, func(t *testing.T) {
var prev int
Just(5, 3, 7, 1, 9, 6, 4, 8, 2).Sort(func(a, b interface{}) bool {
return a.(int) < b.(int)
}).ForEach(func(item interface{}) {
next := item.(int)
assert.True(t, prev < next)
prev = next
})
})
}
func TestSplit(t *testing.T) {
assert.Panics(t, func() {
Just(1, 2, 3, 4, 5, 6, 7, 8, 9, 10).Split(0).Done()
runCheckedTest(t, func(t *testing.T) {
assert.Panics(t, func() {
Just(1, 2, 3, 4, 5, 6, 7, 8, 9, 10).Split(0).Done()
})
var chunks [][]interface{}
Just(1, 2, 3, 4, 5, 6, 7, 8, 9, 10).Split(4).ForEach(func(item interface{}) {
chunk := item.([]interface{})
chunks = append(chunks, chunk)
})
assert.EqualValues(t, [][]interface{}{
{1, 2, 3, 4},
{5, 6, 7, 8},
{9, 10},
}, chunks)
})
var chunks [][]interface{}
Just(1, 2, 3, 4, 5, 6, 7, 8, 9, 10).Split(4).ForEach(func(item interface{}) {
chunk := item.([]interface{})
chunks = append(chunks, chunk)
})
assert.EqualValues(t, [][]interface{}{
{1, 2, 3, 4},
{5, 6, 7, 8},
{9, 10},
}, chunks)
}
func TestTail(t *testing.T) {
var result int
Just(1, 2, 3, 4).Tail(2).Reduce(func(pipe <-chan interface{}) (interface{}, error) {
for item := range pipe {
result += item.(int)
}
return result, nil
runCheckedTest(t, func(t *testing.T) {
var result int
Just(1, 2, 3, 4).Tail(2).Reduce(func(pipe <-chan interface{}) (interface{}, error) {
for item := range pipe {
result += item.(int)
}
return result, nil
})
assert.Equal(t, 7, result)
})
assert.Equal(t, 7, result)
}
func TestTailZero(t *testing.T) {
assert.Panics(t, func() {
Just(1, 2, 3, 4).Tail(0).Reduce(func(pipe <-chan interface{}) (interface{}, error) {
return nil, nil
runCheckedTest(t, func(t *testing.T) {
assert.Panics(t, func() {
Just(1, 2, 3, 4).Tail(0).Reduce(func(pipe <-chan interface{}) (interface{}, error) {
return nil, nil
})
})
})
}
func TestWalk(t *testing.T) {
var result int
Just(1, 2, 3, 4, 5).Walk(func(item interface{}, pipe chan<- interface{}) {
if item.(int)%2 != 0 {
pipe <- item
}
}, UnlimitedWorkers()).ForEach(func(item interface{}) {
result += item.(int)
runCheckedTest(t, func(t *testing.T) {
var result int
Just(1, 2, 3, 4, 5).Walk(func(item interface{}, pipe chan<- interface{}) {
if item.(int)%2 != 0 {
pipe <- item
}
}, UnlimitedWorkers()).ForEach(func(item interface{}) {
result += item.(int)
})
assert.Equal(t, 9, result)
})
}
func TestStream_AnyMach(t *testing.T) {
runCheckedTest(t, func(t *testing.T) {
assetEqual(t, false, Just(1, 2, 3).AnyMach(func(item interface{}) bool {
return item.(int) == 4
}))
assetEqual(t, false, Just(1, 2, 3).AnyMach(func(item interface{}) bool {
return item.(int) == 0
}))
assetEqual(t, true, Just(1, 2, 3).AnyMach(func(item interface{}) bool {
return item.(int) == 2
}))
assetEqual(t, true, Just(1, 2, 3).AnyMach(func(item interface{}) bool {
return item.(int) == 2
}))
})
}
func TestStream_AllMach(t *testing.T) {
runCheckedTest(t, func(t *testing.T) {
assetEqual(
t, true, Just(1, 2, 3).AllMach(func(item interface{}) bool {
return true
}),
)
assetEqual(
t, false, Just(1, 2, 3).AllMach(func(item interface{}) bool {
return false
}),
)
assetEqual(
t, false, Just(1, 2, 3).AllMach(func(item interface{}) bool {
return item.(int) == 1
}),
)
})
}
func TestStream_NoneMatch(t *testing.T) {
runCheckedTest(t, func(t *testing.T) {
assetEqual(
t, true, Just(1, 2, 3).NoneMatch(func(item interface{}) bool {
return false
}),
)
assetEqual(
t, false, Just(1, 2, 3).NoneMatch(func(item interface{}) bool {
return true
}),
)
assetEqual(
t, true, Just(1, 2, 3).NoneMatch(func(item interface{}) bool {
return item.(int) == 4
}),
)
})
}
func TestConcat(t *testing.T) {
runCheckedTest(t, func(t *testing.T) {
a1 := []interface{}{1, 2, 3}
a2 := []interface{}{4, 5, 6}
s1 := Just(a1...)
s2 := Just(a2...)
stream := Concat(s1, s2)
var items []interface{}
for item := range stream.source {
items = append(items, item)
}
sort.Slice(items, func(i, j int) bool {
return items[i].(int) < items[j].(int)
})
ints := make([]interface{}, 0)
ints = append(ints, a1...)
ints = append(ints, a2...)
assetEqual(t, ints, items)
})
}
func TestStream_Skip(t *testing.T) {
runCheckedTest(t, func(t *testing.T) {
assetEqual(t, 3, Just(1, 2, 3, 4).Skip(1).Count())
assetEqual(t, 1, Just(1, 2, 3, 4).Skip(3).Count())
assetEqual(t, 4, Just(1, 2, 3, 4).Skip(0).Count())
equal(t, Just(1, 2, 3, 4).Skip(3), []interface{}{4})
assert.Panics(t, func() {
Just(1, 2, 3, 4).Skip(-1)
})
})
}
func TestStream_Concat(t *testing.T) {
runCheckedTest(t, func(t *testing.T) {
stream := Just(1).Concat(Just(2), Just(3))
var items []interface{}
for item := range stream.source {
items = append(items, item)
}
sort.Slice(items, func(i, j int) bool {
return items[i].(int) < items[j].(int)
})
assetEqual(t, []interface{}{1, 2, 3}, items)
just := Just(1)
equal(t, just.Concat(just), []interface{}{1})
})
assert.Equal(t, 9, result)
}
func BenchmarkParallelMapReduce(b *testing.B) {
@@ -377,6 +547,12 @@ func BenchmarkMapReduce(b *testing.B) {
}).Map(mapper).Reduce(reducer)
}
func assetEqual(t *testing.T, except, data interface{}) {
if !reflect.DeepEqual(except, data) {
t.Errorf(" %v, want %v", data, except)
}
}
func equal(t *testing.T, stream Stream, data []interface{}) {
items := make([]interface{}, 0)
for item := range stream.source {
@@ -387,85 +563,7 @@ func equal(t *testing.T, stream Stream, data []interface{}) {
}
}
func assetEqual(t *testing.T, except, data interface{}) {
if !reflect.DeepEqual(except, data) {
t.Errorf(" %v, want %v", data, except)
}
}
func TestStream_AnyMach(t *testing.T) {
assetEqual(t, false, Just(1, 2, 3).AnyMach(func(item interface{}) bool {
return item.(int) == 4
}))
assetEqual(t, false, Just(1, 2, 3).AnyMach(func(item interface{}) bool {
return item.(int) == 0
}))
assetEqual(t, true, Just(1, 2, 3).AnyMach(func(item interface{}) bool {
return item.(int) == 2
}))
assetEqual(t, true, Just(1, 2, 3).AnyMach(func(item interface{}) bool {
return item.(int) == 2
}))
}
func TestStream_AllMach(t *testing.T) {
assetEqual(
t, true, Just(1, 2, 3).AllMach(func(item interface{}) bool {
return true
}),
)
assetEqual(
t, false, Just(1, 2, 3).AllMach(func(item interface{}) bool {
return false
}),
)
assetEqual(
t, false, Just(1, 2, 3).AllMach(func(item interface{}) bool {
return item.(int) == 1
}),
)
}
func TestConcat(t *testing.T) {
a1 := []interface{}{1, 2, 3}
a2 := []interface{}{4, 5, 6}
s1 := Just(a1...)
s2 := Just(a2...)
stream := Concat(s1, s2)
var items []interface{}
for item := range stream.source {
items = append(items, item)
}
sort.Slice(items, func(i, j int) bool {
return items[i].(int) < items[j].(int)
})
ints := make([]interface{}, 0)
ints = append(ints, a1...)
ints = append(ints, a2...)
assetEqual(t, ints, items)
}
func TestStream_Skip(t *testing.T) {
assetEqual(t, 3, Just(1, 2, 3, 4).Skip(1).Count())
assetEqual(t, 1, Just(1, 2, 3, 4).Skip(3).Count())
assetEqual(t, 4, Just(1, 2, 3, 4).Skip(0).Count())
equal(t, Just(1, 2, 3, 4).Skip(3), []interface{}{4})
assert.Panics(t, func() {
Just(1, 2, 3, 4).Skip(-1)
})
}
func TestStream_Concat(t *testing.T) {
stream := Just(1).Concat(Just(2), Just(3))
var items []interface{}
for item := range stream.source {
items = append(items, item)
}
sort.Slice(items, func(i, j int) bool {
return items[i].(int) < items[j].(int)
})
assetEqual(t, []interface{}{1, 2, 3}, items)
just := Just(1)
equal(t, just.Concat(just), []interface{}{1})
func runCheckedTest(t *testing.T, fn func(t *testing.T)) {
defer goleak.VerifyNone(t)
fn(t)
}

View File

@@ -6,8 +6,8 @@ import (
"strconv"
"sync"
"github.com/tal-tech/go-zero/core/lang"
"github.com/tal-tech/go-zero/core/mapping"
"github.com/zeromicro/go-zero/core/lang"
"github.com/zeromicro/go-zero/core/mapping"
)
const (

View File

@@ -6,7 +6,7 @@ import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/tal-tech/go-zero/core/mathx"
"github.com/zeromicro/go-zero/core/mathx"
)
const (

View File

@@ -9,8 +9,8 @@ import (
"time"
"github.com/stretchr/testify/assert"
"github.com/tal-tech/go-zero/core/fs"
"github.com/tal-tech/go-zero/core/stringx"
"github.com/zeromicro/go-zero/core/fs"
"github.com/zeromicro/go-zero/core/stringx"
)
func TestReadText(t *testing.T) {

View File

@@ -4,8 +4,8 @@ package lang
var Placeholder PlaceholderType
type (
// GenericType can be used to hold any type.
GenericType = interface{}
// AnyType can be used to hold any type.
AnyType = interface{}
// PlaceholderType represents a placeholder type.
PlaceholderType = struct{}
)

View File

@@ -5,12 +5,11 @@ import (
"strconv"
"time"
"github.com/tal-tech/go-zero/core/stores/redis"
"github.com/zeromicro/go-zero/core/stores/redis"
)
const (
// to be compatible with aliyun redis, we cannot use `local key = KEYS[1]` to reuse the key
periodScript = `local limit = tonumber(ARGV[1])
// to be compatible with aliyun redis, we cannot use `local key = KEYS[1]` to reuse the key
const periodScript = `local limit = tonumber(ARGV[1])
local window = tonumber(ARGV[2])
local current = redis.call("INCRBY", KEYS[1], 1)
if current == 1 then
@@ -23,8 +22,6 @@ elseif current == limit then
else
return 0
end`
zoneDiff = 3600 * 8 // GMT+8 for our services
)
const (
// Unknown means not initialized state.
@@ -104,7 +101,9 @@ func (h *PeriodLimit) Take(key string) (int, error) {
func (h *PeriodLimit) calcExpireSeconds() int {
if h.align {
unix := time.Now().Unix() + zoneDiff
now := time.Now()
_, offset := now.Zone()
unix := now.Unix() + int64(offset)
return h.period - int(unix%int64(h.period))
}
@@ -112,6 +111,8 @@ func (h *PeriodLimit) calcExpireSeconds() int {
}
// Align returns a func to customize a PeriodLimit with alignment.
// For example, if we want to limit end users with 5 sms verification messages every day,
// we need to align with the local timezone and the start of the day.
func Align() PeriodOption {
return func(l *PeriodLimit) {
l.align = true

View File

@@ -5,8 +5,8 @@ import (
"github.com/alicebob/miniredis/v2"
"github.com/stretchr/testify/assert"
"github.com/tal-tech/go-zero/core/stores/redis"
"github.com/tal-tech/go-zero/core/stores/redis/redistest"
"github.com/zeromicro/go-zero/core/stores/redis"
"github.com/zeromicro/go-zero/core/stores/redis/redistest"
)
func TestPeriodLimit_Take(t *testing.T) {

View File

@@ -7,8 +7,8 @@ import (
"sync/atomic"
"time"
"github.com/tal-tech/go-zero/core/logx"
"github.com/tal-tech/go-zero/core/stores/redis"
"github.com/zeromicro/go-zero/core/logx"
"github.com/zeromicro/go-zero/core/stores/redis"
xrate "golang.org/x/time/rate"
)
@@ -85,8 +85,8 @@ func (lim *TokenLimiter) Allow() bool {
}
// AllowN reports whether n events may happen at time now.
// Use this method if you intend to drop / skip events that exceed the rate rate.
// Otherwise use Reserve or Wait.
// Use this method if you intend to drop / skip events that exceed the rate.
// Otherwise, use Reserve or Wait.
func (lim *TokenLimiter) AllowN(now time.Time, n int) bool {
return lim.reserveN(now, n)
}
@@ -112,7 +112,8 @@ func (lim *TokenLimiter) reserveN(now time.Time, n int) bool {
// Lua boolean false -> r Nil bulk reply
if err == redis.Nil {
return false
} else if err != nil {
}
if err != nil {
logx.Errorf("fail to use rate limiter: %s, use in-process limiter for rescue", err)
lim.startMonitor()
return lim.rescueLimiter.AllowN(now, n)

View File

@@ -6,9 +6,9 @@ import (
"github.com/alicebob/miniredis/v2"
"github.com/stretchr/testify/assert"
"github.com/tal-tech/go-zero/core/logx"
"github.com/tal-tech/go-zero/core/stores/redis"
"github.com/tal-tech/go-zero/core/stores/redis/redistest"
"github.com/zeromicro/go-zero/core/logx"
"github.com/zeromicro/go-zero/core/stores/redis"
"github.com/zeromicro/go-zero/core/stores/redis/redistest"
)
func init() {

View File

@@ -7,11 +7,11 @@ import (
"sync/atomic"
"time"
"github.com/tal-tech/go-zero/core/collection"
"github.com/tal-tech/go-zero/core/logx"
"github.com/tal-tech/go-zero/core/stat"
"github.com/tal-tech/go-zero/core/syncx"
"github.com/tal-tech/go-zero/core/timex"
"github.com/zeromicro/go-zero/core/collection"
"github.com/zeromicro/go-zero/core/logx"
"github.com/zeromicro/go-zero/core/stat"
"github.com/zeromicro/go-zero/core/syncx"
"github.com/zeromicro/go-zero/core/timex"
)
const (

View File

@@ -8,11 +8,11 @@ import (
"time"
"github.com/stretchr/testify/assert"
"github.com/tal-tech/go-zero/core/collection"
"github.com/tal-tech/go-zero/core/logx"
"github.com/tal-tech/go-zero/core/mathx"
"github.com/tal-tech/go-zero/core/stat"
"github.com/tal-tech/go-zero/core/syncx"
"github.com/zeromicro/go-zero/core/collection"
"github.com/zeromicro/go-zero/core/logx"
"github.com/zeromicro/go-zero/core/mathx"
"github.com/zeromicro/go-zero/core/stat"
"github.com/zeromicro/go-zero/core/syncx"
)
const (

View File

@@ -3,7 +3,7 @@ package load
import (
"io"
"github.com/tal-tech/go-zero/core/syncx"
"github.com/zeromicro/go-zero/core/syncx"
)
// A ShedderGroup is a manager to manage key based shedders.

View File

@@ -4,8 +4,8 @@ import (
"sync/atomic"
"time"
"github.com/tal-tech/go-zero/core/logx"
"github.com/tal-tech/go-zero/core/stat"
"github.com/zeromicro/go-zero/core/logx"
"github.com/zeromicro/go-zero/core/stat"
)
type (

View File

@@ -3,10 +3,11 @@ package logx
// A LogConf is a logging config.
type LogConf struct {
ServiceName string `json:",optional"`
Mode string `json:",default=console,options=console|file|volume"`
Mode string `json:",default=console,options=[console,file,volume]"`
Encoding string `json:",default=json,options=[json,plain]"`
TimeFormat string `json:",optional"`
Path string `json:",default=logs"`
Level string `json:",default=info,options=info|error|severe"`
Level string `json:",default=info,options=[info,error,severe]"`
Compress bool `json:",optional"`
KeepDays int `json:",optional"`
StackCooldownMillis int `json:",default=100"`

View File

@@ -5,7 +5,7 @@ import (
"io"
"time"
"github.com/tal-tech/go-zero/core/timex"
"github.com/zeromicro/go-zero/core/timex"
)
const durationCallerDepth = 3
@@ -79,8 +79,15 @@ func (l *durationLogger) WithDuration(duration time.Duration) Logger {
}
func (l *durationLogger) write(writer io.Writer, level string, val interface{}) {
l.Timestamp = getTimestamp()
l.Level = level
l.Content = val
outputJson(writer, l)
switch encoding {
case plainEncodingType:
writePlainAny(writer, level, val, l.Duration)
default:
outputJson(writer, &durationLogger{
Timestamp: getTimestamp(),
Level: level,
Content: val,
Duration: l.Duration,
})
}
}

View File

@@ -23,6 +23,13 @@ func TestWithDurationErrorf(t *testing.T) {
assert.True(t, strings.Contains(builder.String(), "duration"), builder.String())
}
func TestWithDurationErrorv(t *testing.T) {
var builder strings.Builder
log.SetOutput(&builder)
WithDuration(time.Second).Errorv("foo")
assert.True(t, strings.Contains(builder.String(), "duration"), builder.String())
}
func TestWithDurationInfo(t *testing.T) {
var builder strings.Builder
log.SetOutput(&builder)
@@ -30,6 +37,19 @@ func TestWithDurationInfo(t *testing.T) {
assert.True(t, strings.Contains(builder.String(), "duration"), builder.String())
}
func TestWithDurationInfoConsole(t *testing.T) {
old := encoding
encoding = plainEncodingType
defer func() {
encoding = old
}()
var builder strings.Builder
log.SetOutput(&builder)
WithDuration(time.Second).Info("foo")
assert.True(t, strings.Contains(builder.String(), "ms"), builder.String())
}
func TestWithDurationInfof(t *testing.T) {
var builder strings.Builder
log.SetOutput(&builder)
@@ -37,6 +57,13 @@ func TestWithDurationInfof(t *testing.T) {
assert.True(t, strings.Contains(builder.String(), "duration"), builder.String())
}
func TestWithDurationInfov(t *testing.T) {
var builder strings.Builder
log.SetOutput(&builder)
WithDuration(time.Second).Infov("foo")
assert.True(t, strings.Contains(builder.String(), "duration"), builder.String())
}
func TestWithDurationSlow(t *testing.T) {
var builder strings.Builder
log.SetOutput(&builder)
@@ -50,3 +77,10 @@ func TestWithDurationSlowf(t *testing.T) {
WithDuration(time.Second).WithDuration(time.Hour).Slowf("foo")
assert.True(t, strings.Contains(builder.String(), "duration"), builder.String())
}
func TestWithDurationSlowv(t *testing.T) {
var builder strings.Builder
log.SetOutput(&builder)
WithDuration(time.Second).WithDuration(time.Hour).Slowv("foo")
assert.True(t, strings.Contains(builder.String(), "duration"), builder.String())
}

View File

@@ -4,8 +4,8 @@ import (
"sync/atomic"
"time"
"github.com/tal-tech/go-zero/core/syncx"
"github.com/tal-tech/go-zero/core/timex"
"github.com/zeromicro/go-zero/core/syncx"
"github.com/zeromicro/go-zero/core/timex"
)
type limitedExecutor struct {

View File

@@ -6,7 +6,7 @@ import (
"time"
"github.com/stretchr/testify/assert"
"github.com/tal-tech/go-zero/core/timex"
"github.com/zeromicro/go-zero/core/timex"
)
func TestLimitedExecutor_logOrDiscard(t *testing.T) {

View File

@@ -1,6 +1,7 @@
package logx
import (
"bytes"
"encoding/json"
"errors"
"fmt"
@@ -17,9 +18,9 @@ import (
"sync/atomic"
"time"
"github.com/tal-tech/go-zero/core/iox"
"github.com/tal-tech/go-zero/core/sysx"
"github.com/tal-tech/go-zero/core/timex"
"github.com/zeromicro/go-zero/core/iox"
"github.com/zeromicro/go-zero/core/sysx"
"github.com/zeromicro/go-zero/core/timex"
)
const (
@@ -31,6 +32,15 @@ const (
SevereLevel
)
const (
jsonEncodingType = iota
plainEncodingType
jsonEncoding = "json"
plainEncoding = "plain"
plainEncodingSep = '\t'
)
const (
accessFilename = "access.log"
errorFilename = "error.log"
@@ -65,6 +75,7 @@ var (
timeFormat = "2006-01-02T15:04:05.000Z07"
writeConsole bool
logLevel uint32
encoding = jsonEncodingType
// use uint32 for atomic operations
disableStat uint32
infoLog io.WriteCloser
@@ -124,6 +135,12 @@ func SetUp(c LogConf) error {
if len(c.TimeFormat) > 0 {
timeFormat = c.TimeFormat
}
switch c.Encoding {
case plainEncoding:
encoding = plainEncodingType
default:
encoding = jsonEncodingType
}
switch c.Mode {
case consoleMode:
@@ -407,21 +424,31 @@ func infoTextSync(msg string) {
}
func outputAny(writer io.Writer, level string, val interface{}) {
info := logEntry{
Timestamp: getTimestamp(),
Level: level,
Content: val,
switch encoding {
case plainEncodingType:
writePlainAny(writer, level, val)
default:
info := logEntry{
Timestamp: getTimestamp(),
Level: level,
Content: val,
}
outputJson(writer, info)
}
outputJson(writer, info)
}
func outputText(writer io.Writer, level, msg string) {
info := logEntry{
Timestamp: getTimestamp(),
Level: level,
Content: msg,
switch encoding {
case plainEncodingType:
writePlainText(writer, level, msg)
default:
info := logEntry{
Timestamp: getTimestamp(),
Level: level,
Content: msg,
}
outputJson(writer, info)
}
outputJson(writer, info)
}
func outputError(writer io.Writer, msg string, callDepth int) {
@@ -565,6 +592,62 @@ func statSync(msg string) {
}
}
func writePlainAny(writer io.Writer, level string, val interface{}, fields ...string) {
switch v := val.(type) {
case string:
writePlainText(writer, level, v, fields...)
case error:
writePlainText(writer, level, v.Error(), fields...)
case fmt.Stringer:
writePlainText(writer, level, v.String(), fields...)
default:
var buf bytes.Buffer
buf.WriteString(getTimestamp())
buf.WriteByte(plainEncodingSep)
buf.WriteString(level)
for _, item := range fields {
buf.WriteByte(plainEncodingSep)
buf.WriteString(item)
}
buf.WriteByte(plainEncodingSep)
if err := json.NewEncoder(&buf).Encode(val); err != nil {
log.Println(err.Error())
return
}
buf.WriteByte('\n')
if atomic.LoadUint32(&initialized) == 0 || writer == nil {
log.Println(buf.String())
return
}
if _, err := writer.Write(buf.Bytes()); err != nil {
log.Println(err.Error())
}
}
}
func writePlainText(writer io.Writer, level, msg string, fields ...string) {
var buf bytes.Buffer
buf.WriteString(getTimestamp())
buf.WriteByte(plainEncodingSep)
buf.WriteString(level)
for _, item := range fields {
buf.WriteByte(plainEncodingSep)
buf.WriteString(item)
}
buf.WriteByte(plainEncodingSep)
buf.WriteString(msg)
buf.WriteByte('\n')
if atomic.LoadUint32(&initialized) == 0 || writer == nil {
log.Println(buf.String())
return
}
if _, err := writer.Write(buf.Bytes()); err != nil {
log.Println(err.Error())
}
}
type logWriter struct {
logger *log.Logger
}

View File

@@ -141,6 +141,78 @@ func TestStructedLogInfov(t *testing.T) {
})
}
func TestStructedLogInfoConsoleAny(t *testing.T) {
doTestStructedLogConsole(t, func(writer io.WriteCloser) {
infoLog = writer
}, func(v ...interface{}) {
old := encoding
encoding = plainEncodingType
defer func() {
encoding = old
}()
Infov(v)
})
}
func TestStructedLogInfoConsoleAnyString(t *testing.T) {
doTestStructedLogConsole(t, func(writer io.WriteCloser) {
infoLog = writer
}, func(v ...interface{}) {
old := encoding
encoding = plainEncodingType
defer func() {
encoding = old
}()
Infov(fmt.Sprint(v...))
})
}
func TestStructedLogInfoConsoleAnyError(t *testing.T) {
doTestStructedLogConsole(t, func(writer io.WriteCloser) {
infoLog = writer
}, func(v ...interface{}) {
old := encoding
encoding = plainEncodingType
defer func() {
encoding = old
}()
Infov(errors.New(fmt.Sprint(v...)))
})
}
func TestStructedLogInfoConsoleAnyStringer(t *testing.T) {
doTestStructedLogConsole(t, func(writer io.WriteCloser) {
infoLog = writer
}, func(v ...interface{}) {
old := encoding
encoding = plainEncodingType
defer func() {
encoding = old
}()
Infov(ValStringer{
val: fmt.Sprint(v...),
})
})
}
func TestStructedLogInfoConsoleText(t *testing.T) {
doTestStructedLogConsole(t, func(writer io.WriteCloser) {
infoLog = writer
}, func(v ...interface{}) {
old := encoding
encoding = plainEncodingType
defer func() {
encoding = old
}()
Info(fmt.Sprint(v...))
})
}
func TestStructedLogSlow(t *testing.T) {
doTestStructedLog(t, levelSlow, func(writer io.WriteCloser) {
slowLog = writer
@@ -432,6 +504,17 @@ func doTestStructedLog(t *testing.T, level string, setup func(writer io.WriteClo
assert.True(t, strings.Contains(val, message))
}
func doTestStructedLogConsole(t *testing.T, setup func(writer io.WriteCloser),
write func(...interface{})) {
const message = "hello there"
writer := new(mockWriter)
setup(writer)
atomic.StoreUint32(&initialized, 1)
write(message)
println(writer.String())
assert.True(t, strings.Contains(writer.String(), message))
}
func testSetLevelTwiceWithMode(t *testing.T, mode string) {
SetUp(LogConf{
Mode: mode,
@@ -456,3 +539,11 @@ func testSetLevelTwiceWithMode(t *testing.T, mode string) {
ErrorStackf(message)
assert.Equal(t, 0, writer.builder.Len())
}
type ValStringer struct {
val string
}
func (v ValStringer) String() string {
return v.val
}

View File

@@ -13,9 +13,9 @@ import (
"sync"
"time"
"github.com/tal-tech/go-zero/core/fs"
"github.com/tal-tech/go-zero/core/lang"
"github.com/tal-tech/go-zero/core/timex"
"github.com/zeromicro/go-zero/core/fs"
"github.com/zeromicro/go-zero/core/lang"
"github.com/zeromicro/go-zero/core/timex"
)
const (

View File

@@ -8,7 +8,7 @@ import (
"time"
"github.com/stretchr/testify/assert"
"github.com/tal-tech/go-zero/core/fs"
"github.com/zeromicro/go-zero/core/fs"
)
func TestDailyRotateRuleMarkRotated(t *testing.T) {

View File

@@ -6,7 +6,7 @@ import (
"io"
"time"
"github.com/tal-tech/go-zero/core/timex"
"github.com/zeromicro/go-zero/core/timex"
"go.opentelemetry.io/otel/trace"
)
@@ -77,12 +77,24 @@ func (l *traceLogger) WithDuration(duration time.Duration) Logger {
}
func (l *traceLogger) write(writer io.Writer, level string, val interface{}) {
l.Timestamp = getTimestamp()
l.Level = level
l.Content = val
l.Trace = traceIdFromContext(l.ctx)
l.Span = spanIdFromContext(l.ctx)
outputJson(writer, l)
traceID := traceIdFromContext(l.ctx)
spanID := spanIdFromContext(l.ctx)
switch encoding {
case plainEncodingType:
writePlainAny(writer, level, val, l.Duration, traceID, spanID)
default:
outputJson(writer, &traceLogger{
logEntry: logEntry{
Timestamp: getTimestamp(),
Level: level,
Duration: l.Duration,
Content: val,
},
Trace: traceID,
Span: spanID,
})
}
}
// WithContext sets ctx to log, for keeping tracing information.

View File

@@ -51,6 +51,10 @@ func TestTraceError(t *testing.T) {
l.WithDuration(time.Second).Errorf(testlog)
assert.True(t, strings.Contains(buf.String(), traceKey))
assert.True(t, strings.Contains(buf.String(), spanKey))
buf.Reset()
l.WithDuration(time.Second).Errorv(testlog)
assert.True(t, strings.Contains(buf.String(), traceKey))
assert.True(t, strings.Contains(buf.String(), spanKey))
}
func TestTraceInfo(t *testing.T) {
@@ -72,6 +76,41 @@ func TestTraceInfo(t *testing.T) {
l.WithDuration(time.Second).Infof(testlog)
assert.True(t, strings.Contains(buf.String(), traceKey))
assert.True(t, strings.Contains(buf.String(), spanKey))
buf.Reset()
l.WithDuration(time.Second).Infov(testlog)
assert.True(t, strings.Contains(buf.String(), traceKey))
assert.True(t, strings.Contains(buf.String(), spanKey))
}
func TestTraceInfoConsole(t *testing.T) {
old := encoding
encoding = plainEncodingType
defer func() {
encoding = old
}()
var buf mockWriter
atomic.StoreUint32(&initialized, 1)
infoLog = newLogWriter(log.New(&buf, "", flags))
otp := otel.GetTracerProvider()
tp := sdktrace.NewTracerProvider(sdktrace.WithSampler(sdktrace.AlwaysSample()))
otel.SetTracerProvider(tp)
defer otel.SetTracerProvider(otp)
ctx, _ := tp.Tracer("foo").Start(context.Background(), "bar")
l := WithContext(ctx).(*traceLogger)
SetLevel(InfoLevel)
l.WithDuration(time.Second).Info(testlog)
assert.True(t, strings.Contains(buf.String(), traceIdFromContext(ctx)))
assert.True(t, strings.Contains(buf.String(), spanIdFromContext(ctx)))
buf.Reset()
l.WithDuration(time.Second).Infof(testlog)
assert.True(t, strings.Contains(buf.String(), traceIdFromContext(ctx)))
assert.True(t, strings.Contains(buf.String(), spanIdFromContext(ctx)))
buf.Reset()
l.WithDuration(time.Second).Infov(testlog)
assert.True(t, strings.Contains(buf.String(), traceIdFromContext(ctx)))
assert.True(t, strings.Contains(buf.String(), spanIdFromContext(ctx)))
}
func TestTraceSlow(t *testing.T) {
@@ -93,6 +132,10 @@ func TestTraceSlow(t *testing.T) {
l.WithDuration(time.Second).Slowf(testlog)
assert.True(t, strings.Contains(buf.String(), traceKey))
assert.True(t, strings.Contains(buf.String(), spanKey))
buf.Reset()
l.WithDuration(time.Second).Slowv(testlog)
assert.True(t, strings.Contains(buf.String(), traceKey))
assert.True(t, strings.Contains(buf.String(), spanKey))
}
func TestTraceWithoutContext(t *testing.T) {

View File

@@ -3,7 +3,7 @@ package mapping
import (
"io"
"github.com/tal-tech/go-zero/core/jsonx"
"github.com/zeromicro/go-zero/core/jsonx"
)
const jsonTagKey = "json"
@@ -15,6 +15,11 @@ func UnmarshalJsonBytes(content []byte, v interface{}) error {
return unmarshalJsonBytes(content, v, jsonUnmarshaler)
}
// UnmarshalJsonMap unmarshals content from m into v.
func UnmarshalJsonMap(m map[string]interface{}, v interface{}) error {
return jsonUnmarshaler.Unmarshal(m, v)
}
// UnmarshalJsonReader unmarshals content from reader into v.
func UnmarshalJsonReader(reader io.Reader, v interface{}) error {
return unmarshalJsonReader(reader, v, jsonUnmarshaler)

View File

@@ -871,3 +871,50 @@ func TestUnmarshalReaderError(t *testing.T) {
assert.NotNil(t, err)
assert.True(t, strings.Contains(err.Error(), payload))
}
func TestUnmarshalMap(t *testing.T) {
t.Run("nil map and valid", func(t *testing.T) {
var m map[string]interface{}
var v struct {
Any string `json:",optional"`
}
err := UnmarshalJsonMap(m, &v)
assert.Nil(t, err)
assert.True(t, len(v.Any) == 0)
})
t.Run("empty map but not valid", func(t *testing.T) {
m := map[string]interface{}{}
var v struct {
Any string
}
err := UnmarshalJsonMap(m, &v)
assert.NotNil(t, err)
})
t.Run("empty map and valid", func(t *testing.T) {
m := map[string]interface{}{}
var v struct {
Any string `json:",optional"`
}
err := UnmarshalJsonMap(m, &v)
assert.Nil(t, err)
assert.True(t, len(v.Any) == 0)
})
t.Run("valid map", func(t *testing.T) {
m := map[string]interface{}{
"Any": "foo",
}
var v struct {
Any string
}
err := UnmarshalJsonMap(m, &v)
assert.Nil(t, err)
assert.Equal(t, "foo", v.Any)
})
}

View File

@@ -7,12 +7,11 @@ import (
"reflect"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/tal-tech/go-zero/core/jsonx"
"github.com/tal-tech/go-zero/core/lang"
"github.com/tal-tech/go-zero/core/stringx"
"github.com/zeromicro/go-zero/core/jsonx"
"github.com/zeromicro/go-zero/core/lang"
"github.com/zeromicro/go-zero/core/stringx"
)
const (
@@ -25,15 +24,17 @@ var (
errValueNotSettable = errors.New("value is not settable")
errValueNotStruct = errors.New("value type is not struct")
keyUnmarshaler = NewUnmarshaler(defaultKeyName)
cacheKeys atomic.Value
cacheKeysLock sync.Mutex
durationType = reflect.TypeOf(time.Duration(0))
cacheKeys map[string][]string
cacheKeysLock sync.Mutex
defaultCache map[string]interface{}
defaultCacheLock sync.Mutex
emptyMap = map[string]interface{}{}
emptyValue = reflect.ValueOf(lang.Placeholder)
)
type (
// A Unmarshaler is used to unmarshal with given tag key.
// Unmarshaler is used to unmarshal with given tag key.
Unmarshaler struct {
key string
opts unmarshalOptions
@@ -46,12 +47,11 @@ type (
fromString bool
canonicalKey func(key string) string
}
keyCache map[string][]string
)
func init() {
cacheKeys.Store(make(keyCache))
cacheKeys = make(map[string][]string)
defaultCache = make(map[string]interface{})
}
// NewUnmarshaler returns a Unmarshaler.
@@ -388,7 +388,13 @@ func (u *Unmarshaler) processNamedFieldWithoutValue(field reflect.StructField, v
if derefedType == durationType {
return fillDurationValue(fieldKind, value, defaultValue)
}
return setValue(fieldKind, value, defaultValue)
switch fieldKind {
case reflect.Array, reflect.Slice:
return u.fillSliceWithDefault(derefedType, value, defaultValue)
default:
return setValue(fieldKind, value, defaultValue)
}
}
switch fieldKind {
@@ -502,7 +508,8 @@ func (u *Unmarshaler) fillSliceFromString(fieldType reflect.Type, value reflect.
return nil
}
func (u *Unmarshaler) fillSliceValue(slice reflect.Value, index int, baseKind reflect.Kind, value interface{}) error {
func (u *Unmarshaler) fillSliceValue(slice reflect.Value, index int,
baseKind reflect.Kind, value interface{}) error {
ithVal := slice.Index(index)
switch v := value.(type) {
case json.Number:
@@ -531,6 +538,28 @@ func (u *Unmarshaler) fillSliceValue(slice reflect.Value, index int, baseKind re
}
}
func (u *Unmarshaler) fillSliceWithDefault(derefedType reflect.Type, value reflect.Value,
defaultValue string) error {
baseFieldType := Deref(derefedType.Elem())
baseFieldKind := baseFieldType.Kind()
defaultCacheLock.Lock()
slice, ok := defaultCache[defaultValue]
defaultCacheLock.Unlock()
if !ok {
if baseFieldKind == reflect.String {
slice = parseGroupedSegments(defaultValue)
} else if err := jsonx.UnmarshalFromString(defaultValue, &slice); err != nil {
return err
}
defaultCacheLock.Lock()
defaultCache[defaultValue] = slice
defaultCacheLock.Unlock()
}
return u.fillSlice(derefedType, value, slice)
}
func (u *Unmarshaler) generateMap(keyType, elemType reflect.Type, mapValue interface{}) (reflect.Value, error) {
mapType := reflect.MapOf(keyType, elemType)
valueType := reflect.TypeOf(mapValue)
@@ -713,7 +742,9 @@ func getValueWithChainedKeys(m Valuer, keys []string) (interface{}, bool) {
if len(keys) == 1 {
v, ok := m.Value(keys[0])
return v, ok
} else if len(keys) > 1 {
}
if len(keys) > 1 {
if v, ok := m.Value(keys[0]); ok {
if nextm, ok := v.(map[string]interface{}); ok {
return getValueWithChainedKeys(MapValuer(nextm), keys[1:])
@@ -724,20 +755,6 @@ func getValueWithChainedKeys(m Valuer, keys []string) (interface{}, bool) {
return nil, false
}
func insertKeys(key string, cache []string) {
cacheKeysLock.Lock()
defer cacheKeysLock.Unlock()
keys := cacheKeys.Load().(keyCache)
// copy the contents into the new map, to guarantee the old map is immutable
newKeys := make(keyCache)
for k, v := range keys {
newKeys[k] = v
}
newKeys[key] = cache
cacheKeys.Store(newKeys)
}
func join(elem ...string) string {
var builder strings.Builder
@@ -768,15 +785,19 @@ func newTypeMismatchError(name string) error {
}
func readKeys(key string) []string {
cache := cacheKeys.Load().(keyCache)
if keys, ok := cache[key]; ok {
cacheKeysLock.Lock()
keys, ok := cacheKeys[key]
cacheKeysLock.Unlock()
if ok {
return keys
}
keys := strings.FieldsFunc(key, func(c rune) bool {
keys = strings.FieldsFunc(key, func(c rune) bool {
return c == delimiter
})
insertKeys(key, keys)
cacheKeysLock.Lock()
cacheKeys[key] = keys
cacheKeysLock.Unlock()
return keys
}

View File

@@ -8,7 +8,7 @@ import (
"time"
"github.com/stretchr/testify/assert"
"github.com/tal-tech/go-zero/core/stringx"
"github.com/zeromicro/go-zero/core/stringx"
)
// because json.Number doesn't support strconv.ParseUint(...),
@@ -198,6 +198,66 @@ func TestUnmarshalIntWithDefault(t *testing.T) {
assert.Equal(t, 1, in.Int)
}
func TestUnmarshalBoolSliceWithDefault(t *testing.T) {
type inner struct {
Bools []bool `key:"bools,default=[true,false]"`
}
var in inner
assert.Nil(t, UnmarshalKey(nil, &in))
assert.ElementsMatch(t, []bool{true, false}, in.Bools)
}
func TestUnmarshalIntSliceWithDefault(t *testing.T) {
type inner struct {
Ints []int `key:"ints,default=[1,2,3]"`
}
var in inner
assert.Nil(t, UnmarshalKey(nil, &in))
assert.ElementsMatch(t, []int{1, 2, 3}, in.Ints)
}
func TestUnmarshalIntSliceWithDefaultHasSpaces(t *testing.T) {
type inner struct {
Ints []int `key:"ints,default=[1, 2, 3]"`
}
var in inner
assert.Nil(t, UnmarshalKey(nil, &in))
assert.ElementsMatch(t, []int{1, 2, 3}, in.Ints)
}
func TestUnmarshalFloatSliceWithDefault(t *testing.T) {
type inner struct {
Floats []float32 `key:"floats,default=[1.1,2.2,3.3]"`
}
var in inner
assert.Nil(t, UnmarshalKey(nil, &in))
assert.ElementsMatch(t, []float32{1.1, 2.2, 3.3}, in.Floats)
}
func TestUnmarshalStringSliceWithDefault(t *testing.T) {
type inner struct {
Strs []string `key:"strs,default=[foo,bar,woo]"`
}
var in inner
assert.Nil(t, UnmarshalKey(nil, &in))
assert.ElementsMatch(t, []string{"foo", "bar", "woo"}, in.Strs)
}
func TestUnmarshalStringSliceWithDefaultHasSpaces(t *testing.T) {
type inner struct {
Strs []string `key:"strs,default=[foo, bar, woo]"`
}
var in inner
assert.Nil(t, UnmarshalKey(nil, &in))
assert.ElementsMatch(t, []string{"foo", "bar", "woo"}, in.Strs)
}
func TestUnmarshalUint(t *testing.T) {
type inner struct {
Uint uint `key:"uint"`
@@ -861,10 +921,12 @@ func TestUnmarshalSliceOfStruct(t *testing.T) {
func TestUnmarshalWithStringOptionsCorrect(t *testing.T) {
type inner struct {
Value string `key:"value,options=first|second"`
Foo string `key:"foo,options=[bar,baz]"`
Correct string `key:"correct,options=1|2"`
}
m := map[string]interface{}{
"value": "first",
"foo": "bar",
"correct": "2",
}
@@ -872,6 +934,7 @@ func TestUnmarshalWithStringOptionsCorrect(t *testing.T) {
ast := assert.New(t)
ast.Nil(UnmarshalKey(m, &in))
ast.Equal("first", in.Value)
ast.Equal("bar", in.Foo)
ast.Equal("2", in.Correct)
}
@@ -943,6 +1006,22 @@ func TestUnmarshalStringOptionsWithStringOptionsIncorrect(t *testing.T) {
ast.NotNil(unmarshaler.Unmarshal(m, &in))
}
func TestUnmarshalStringOptionsWithStringOptionsIncorrectGrouped(t *testing.T) {
type inner struct {
Value string `key:"value,options=[first,second]"`
Correct string `key:"correct,options=1|2"`
}
m := map[string]interface{}{
"value": "third",
"correct": "2",
}
var in inner
unmarshaler := NewUnmarshaler(defaultKeyName, WithStringValues())
ast := assert.New(t)
ast.NotNil(unmarshaler.Unmarshal(m, &in))
}
func TestUnmarshalWithStringOptionsIncorrect(t *testing.T) {
type inner struct {
Value string `key:"value,options=first|second"`
@@ -2518,3 +2597,29 @@ func TestUnmarshalJsonReaderPtrArray(t *testing.T) {
assert.Nil(t, err)
assert.Equal(t, 3, len(res.B))
}
func TestUnmarshalJsonWithoutKey(t *testing.T) {
payload := `{"A": "1", "B": "2"}`
var res struct {
A string `json:""`
B string `json:","`
}
reader := strings.NewReader(payload)
err := UnmarshalJsonReader(reader, &res)
assert.Nil(t, err)
assert.Equal(t, "1", res.A)
assert.Equal(t, "2", res.B)
}
func BenchmarkDefaultValue(b *testing.B) {
for i := 0; i < b.N; i++ {
var a struct {
Ints []int `json:"ints,default=[1,2,3]"`
Strs []string `json:"strs,default=[foo,bar,baz]"`
}
_ = UnmarshalJsonMap(nil, &a)
if len(a.Strs) != 3 || len(a.Ints) != 3 {
b.Fatal("failed")
}
}
}

View File

@@ -10,17 +10,23 @@ import (
"strings"
"sync"
"github.com/tal-tech/go-zero/core/stringx"
"github.com/zeromicro/go-zero/core/stringx"
)
const (
defaultOption = "default"
stringOption = "string"
optionalOption = "optional"
optionsOption = "options"
rangeOption = "range"
optionSeparator = "|"
equalToken = "="
defaultOption = "default"
stringOption = "string"
optionalOption = "optional"
optionsOption = "options"
rangeOption = "range"
optionSeparator = "|"
equalToken = "="
escapeChar = '\\'
leftBracket = '('
rightBracket = ')'
leftSquareBracket = '['
rightSquareBracket = ']'
segmentSeparator = ','
)
var (
@@ -118,7 +124,7 @@ func convertType(kind reflect.Kind, str string) (interface{}, error) {
}
func doParseKeyAndOptions(field reflect.StructField, value string) (string, *fieldOptions, error) {
segments := strings.Split(value, ",")
segments := parseSegments(value)
key := strings.TrimSpace(segments[0])
options := segments[1:]
@@ -198,6 +204,16 @@ func maybeNewValue(field reflect.StructField, value reflect.Value) {
}
}
func parseGroupedSegments(val string) []string {
val = strings.TrimLeftFunc(val, func(r rune) bool {
return r == leftBracket || r == leftSquareBracket
})
val = strings.TrimRightFunc(val, func(r rune) bool {
return r == rightBracket || r == rightSquareBracket
})
return parseSegments(val)
}
// don't modify returned fieldOptions, it's cached and shared among different calls.
func parseKeyAndOptions(tagName string, field reflect.StructField) (string, *fieldOptions, error) {
value := field.Tag.Get(tagName)
@@ -309,7 +325,7 @@ func parseOption(fieldOpts *fieldOptions, fieldName, option string) error {
return fmt.Errorf("field %s has wrong options", fieldName)
}
fieldOpts.Options = strings.Split(segs[1], optionSeparator)
fieldOpts.Options = parseOptions(segs[1])
case strings.HasPrefix(option, defaultOption):
segs := strings.Split(option, equalToken)
if len(segs) != 2 {
@@ -334,6 +350,69 @@ func parseOption(fieldOpts *fieldOptions, fieldName, option string) error {
return nil
}
// parseOptions parses the given options in tag.
// for example: `json:"name,options=foo|bar"` or `json:"name,options=[foo,bar]"`
func parseOptions(val string) []string {
if len(val) == 0 {
return nil
}
if val[0] == leftSquareBracket {
return parseGroupedSegments(val)
}
return strings.Split(val, optionSeparator)
}
func parseSegments(val string) []string {
var segments []string
var escaped, grouped bool
var buf strings.Builder
for _, ch := range val {
if escaped {
buf.WriteRune(ch)
escaped = false
continue
}
switch ch {
case segmentSeparator:
if grouped {
buf.WriteRune(ch)
} else {
// need to trim spaces, but we cannot ignore empty string,
// because the first segment stands for the key might be empty.
// if ignored, the later tag will be used as the key.
segments = append(segments, strings.TrimSpace(buf.String()))
buf.Reset()
}
case escapeChar:
if grouped {
buf.WriteRune(ch)
} else {
escaped = true
}
case leftBracket, leftSquareBracket:
buf.WriteRune(ch)
grouped = true
case rightBracket, rightSquareBracket:
buf.WriteRune(ch)
grouped = false
default:
buf.WriteRune(ch)
}
}
last := strings.TrimSpace(buf.String())
// ignore last empty string
if len(last) > 0 {
segments = append(segments, last)
}
return segments
}
func reprOfValue(val reflect.Value) string {
switch vt := val.Interface().(type) {
case bool:

View File

@@ -90,6 +90,82 @@ func TestParseKeyAndOptionWithTagAndOption(t *testing.T) {
assert.True(t, options.FromString)
}
func TestParseSegments(t *testing.T) {
tests := []struct {
input string
expect []string
}{
{
input: "",
expect: []string{},
},
{
input: ",",
expect: []string{""},
},
{
input: "foo,",
expect: []string{"foo"},
},
{
input: ",foo",
// the first empty string cannot be ignored, it's the key.
expect: []string{"", "foo"},
},
{
input: "foo",
expect: []string{"foo"},
},
{
input: "foo,bar",
expect: []string{"foo", "bar"},
},
{
input: "foo,bar,baz",
expect: []string{"foo", "bar", "baz"},
},
{
input: "foo,options=a|b",
expect: []string{"foo", "options=a|b"},
},
{
input: "foo,bar,default=[baz,qux]",
expect: []string{"foo", "bar", "default=[baz,qux]"},
},
{
input: "foo,bar,options=[baz,qux]",
expect: []string{"foo", "bar", "options=[baz,qux]"},
},
{
input: `foo\,bar,options=[baz,qux]`,
expect: []string{`foo,bar`, "options=[baz,qux]"},
},
{
input: `foo,bar,options=\[baz,qux]`,
expect: []string{"foo", "bar", "options=[baz", "qux]"},
},
{
input: `foo,bar,options=[baz\,qux]`,
expect: []string{"foo", "bar", `options=[baz\,qux]`},
},
{
input: `foo\,bar,options=[baz,qux],default=baz`,
expect: []string{`foo,bar`, "options=[baz,qux]", "default=baz"},
},
{
input: `foo\,bar,options=[baz,qux, quux],default=[qux, baz]`,
expect: []string{`foo,bar`, "options=[baz,qux, quux]", "default=[qux, baz]"},
},
}
for _, test := range tests {
test := test
t.Run(test.input, func(t *testing.T) {
assert.ElementsMatch(t, test.expect, parseSegments(test.input))
})
}
}
func TestValidatePtrWithNonPtr(t *testing.T) {
var foo string
rve := reflect.ValueOf(foo)

View File

@@ -4,7 +4,7 @@ import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/tal-tech/go-zero/core/stringx"
"github.com/zeromicro/go-zero/core/stringx"
)
func TestMaxInt(t *testing.T) {

View File

@@ -2,7 +2,7 @@ package metric
import (
prom "github.com/prometheus/client_golang/prometheus"
"github.com/tal-tech/go-zero/core/proc"
"github.com/zeromicro/go-zero/core/proc"
)
type (

View File

@@ -2,7 +2,7 @@ package metric
import (
prom "github.com/prometheus/client_golang/prometheus"
"github.com/tal-tech/go-zero/core/proc"
"github.com/zeromicro/go-zero/core/proc"
)
type (

View File

@@ -2,7 +2,7 @@ package metric
import (
prom "github.com/prometheus/client_golang/prometheus"
"github.com/tal-tech/go-zero/core/proc"
"github.com/zeromicro/go-zero/core/proc"
)
type (

View File

@@ -1,14 +1,14 @@
package mr
import (
"context"
"errors"
"fmt"
"sync"
"github.com/tal-tech/go-zero/core/errorx"
"github.com/tal-tech/go-zero/core/lang"
"github.com/tal-tech/go-zero/core/syncx"
"github.com/tal-tech/go-zero/core/threading"
"github.com/zeromicro/go-zero/core/errorx"
"github.com/zeromicro/go-zero/core/lang"
"github.com/zeromicro/go-zero/core/threading"
)
const (
@@ -24,12 +24,12 @@ var (
)
type (
// ForEachFunc is used to do element processing, but no output.
ForEachFunc func(item interface{})
// GenerateFunc is used to let callers send elements into source.
GenerateFunc func(source chan<- interface{})
// MapFunc is used to do element processing and write the output to writer.
MapFunc func(item interface{}, writer Writer)
// VoidMapFunc is used to do element processing, but no output.
VoidMapFunc func(item interface{})
// MapperFunc is used to do element processing and write the output to writer,
// use cancel func to cancel the processing.
MapperFunc func(item interface{}, writer Writer, cancel func(error))
@@ -43,6 +43,7 @@ type (
Option func(opts *mapReduceOptions)
mapReduceOptions struct {
ctx context.Context
workers int
}
@@ -68,7 +69,6 @@ func Finish(fns ...func() error) error {
cancel(err)
}
}, func(pipe <-chan interface{}, cancel func(error)) {
drain(pipe)
}, WithWorkers(len(fns)))
}
@@ -78,7 +78,7 @@ func FinishVoid(fns ...func()) {
return
}
MapVoid(func(source chan<- interface{}) {
ForEach(func(source chan<- interface{}) {
for _, fn := range fns {
source <- fn
}
@@ -88,27 +88,35 @@ func FinishVoid(fns ...func()) {
}, WithWorkers(len(fns)))
}
// ForEach maps all elements from given generate but no output.
func ForEach(generate GenerateFunc, mapper ForEachFunc, opts ...Option) {
drain(Map(generate, func(item interface{}, writer Writer) {
mapper(item)
}, opts...))
}
// Map maps all elements generated from given generate func, and returns an output channel.
func Map(generate GenerateFunc, mapper MapFunc, opts ...Option) chan interface{} {
options := buildOptions(opts...)
source := buildSource(generate)
collector := make(chan interface{}, options.workers)
done := syncx.NewDoneChan()
done := make(chan lang.PlaceholderType)
go executeMappers(mapper, source, collector, done.Done(), options.workers)
go executeMappers(options.ctx, mapper, source, collector, done, options.workers)
return collector
}
// MapReduce maps all elements generated from given generate func,
// and reduces the output elements with given reducer.
func MapReduce(generate GenerateFunc, mapper MapperFunc, reducer ReducerFunc, opts ...Option) (interface{}, error) {
func MapReduce(generate GenerateFunc, mapper MapperFunc, reducer ReducerFunc,
opts ...Option) (interface{}, error) {
source := buildSource(generate)
return MapReduceWithSource(source, mapper, reducer, opts...)
return MapReduceChan(source, mapper, reducer, opts...)
}
// MapReduceWithSource maps all elements from source, and reduce the output elements with given reducer.
func MapReduceWithSource(source <-chan interface{}, mapper MapperFunc, reducer ReducerFunc,
// MapReduceChan maps all elements from source, and reduce the output elements with given reducer.
func MapReduceChan(source <-chan interface{}, mapper MapperFunc, reducer ReducerFunc,
opts ...Option) (interface{}, error) {
options := buildOptions(opts...)
output := make(chan interface{})
@@ -119,13 +127,13 @@ func MapReduceWithSource(source <-chan interface{}, mapper MapperFunc, reducer R
}()
collector := make(chan interface{}, options.workers)
done := syncx.NewDoneChan()
writer := newGuardedWriter(output, done.Done())
done := make(chan lang.PlaceholderType)
writer := newGuardedWriter(options.ctx, output, done)
var closeOnce sync.Once
var retErr errorx.AtomicError
finish := func() {
closeOnce.Do(func() {
done.Close()
close(done)
close(output)
})
}
@@ -154,17 +162,22 @@ func MapReduceWithSource(source <-chan interface{}, mapper MapperFunc, reducer R
reducer(collector, writer, cancel)
}()
go executeMappers(func(item interface{}, w Writer) {
go executeMappers(options.ctx, func(item interface{}, w Writer) {
mapper(item, w, cancel)
}, source, collector, done.Done(), options.workers)
}, source, collector, done, options.workers)
value, ok := <-output
if err := retErr.Load(); err != nil {
return nil, err
} else if ok {
return value, nil
} else {
return nil, ErrReduceNoOutput
select {
case <-options.ctx.Done():
cancel(context.DeadlineExceeded)
return nil, context.DeadlineExceeded
case value, ok := <-output:
if err := retErr.Load(); err != nil {
return nil, err
} else if ok {
return value, nil
} else {
return nil, ErrReduceNoOutput
}
}
}
@@ -173,18 +186,19 @@ func MapReduceWithSource(source <-chan interface{}, mapper MapperFunc, reducer R
func MapReduceVoid(generate GenerateFunc, mapper MapperFunc, reducer VoidReducerFunc, opts ...Option) error {
_, err := MapReduce(generate, mapper, func(input <-chan interface{}, writer Writer, cancel func(error)) {
reducer(input, cancel)
// We need to write a placeholder to let MapReduce to continue on reducer done,
// otherwise, all goroutines are waiting. The placeholder will be discarded by MapReduce.
writer.Write(lang.Placeholder)
}, opts...)
if errors.Is(err, ErrReduceNoOutput) {
return nil
}
return err
}
// MapVoid maps all elements from given generate but no output.
func MapVoid(generate GenerateFunc, mapper VoidMapFunc, opts ...Option) {
drain(Map(generate, func(item interface{}, writer Writer) {
mapper(item)
}, opts...))
// WithContext customizes a mapreduce processing accepts a given ctx.
func WithContext(ctx context.Context) Option {
return func(opts *mapReduceOptions) {
opts.ctx = ctx
}
}
// WithWorkers customizes a mapreduce processing with given workers.
@@ -224,8 +238,8 @@ func drain(channel <-chan interface{}) {
}
}
func executeMappers(mapper MapFunc, input <-chan interface{}, collector chan<- interface{},
done <-chan lang.PlaceholderType, workers int) {
func executeMappers(ctx context.Context, mapper MapFunc, input <-chan interface{},
collector chan<- interface{}, done <-chan lang.PlaceholderType, workers int) {
var wg sync.WaitGroup
defer func() {
wg.Wait()
@@ -233,9 +247,11 @@ func executeMappers(mapper MapFunc, input <-chan interface{}, collector chan<- i
}()
pool := make(chan lang.PlaceholderType, workers)
writer := newGuardedWriter(collector, done)
writer := newGuardedWriter(ctx, collector, done)
for {
select {
case <-ctx.Done():
return
case <-done:
return
case pool <- lang.Placeholder:
@@ -261,6 +277,7 @@ func executeMappers(mapper MapFunc, input <-chan interface{}, collector chan<- i
func newOptions() *mapReduceOptions {
return &mapReduceOptions{
ctx: context.Background(),
workers: defaultWorkers,
}
}
@@ -275,12 +292,15 @@ func once(fn func(error)) func(error) {
}
type guardedWriter struct {
ctx context.Context
channel chan<- interface{}
done <-chan lang.PlaceholderType
}
func newGuardedWriter(channel chan<- interface{}, done <-chan lang.PlaceholderType) guardedWriter {
func newGuardedWriter(ctx context.Context, channel chan<- interface{},
done <-chan lang.PlaceholderType) guardedWriter {
return guardedWriter{
ctx: ctx,
channel: channel,
done: done,
}
@@ -288,6 +308,8 @@ func newGuardedWriter(channel chan<- interface{}, done <-chan lang.PlaceholderTy
func (gw guardedWriter) Write(v interface{}) {
select {
case <-gw.ctx.Done():
return
case <-gw.done:
return
default:

View File

@@ -1,6 +1,7 @@
package mr
import (
"context"
"errors"
"io/ioutil"
"log"
@@ -10,8 +11,9 @@ import (
"time"
"github.com/stretchr/testify/assert"
"github.com/tal-tech/go-zero/core/stringx"
"github.com/tal-tech/go-zero/core/syncx"
"github.com/zeromicro/go-zero/core/stringx"
"github.com/zeromicro/go-zero/core/syncx"
"go.uber.org/goleak"
)
var errDummy = errors.New("dummy")
@@ -21,6 +23,8 @@ func init() {
}
func TestFinish(t *testing.T) {
defer goleak.VerifyNone(t)
var total uint32
err := Finish(func() error {
atomic.AddUint32(&total, 2)
@@ -38,14 +42,20 @@ func TestFinish(t *testing.T) {
}
func TestFinishNone(t *testing.T) {
defer goleak.VerifyNone(t)
assert.Nil(t, Finish())
}
func TestFinishVoidNone(t *testing.T) {
defer goleak.VerifyNone(t)
FinishVoid()
}
func TestFinishErr(t *testing.T) {
defer goleak.VerifyNone(t)
var total uint32
err := Finish(func() error {
atomic.AddUint32(&total, 2)
@@ -62,6 +72,8 @@ func TestFinishErr(t *testing.T) {
}
func TestFinishVoid(t *testing.T) {
defer goleak.VerifyNone(t)
var total uint32
FinishVoid(func() {
atomic.AddUint32(&total, 2)
@@ -74,7 +86,57 @@ func TestFinishVoid(t *testing.T) {
assert.Equal(t, uint32(10), atomic.LoadUint32(&total))
}
func TestForEach(t *testing.T) {
const tasks = 1000
t.Run("all", func(t *testing.T) {
defer goleak.VerifyNone(t)
var count uint32
ForEach(func(source chan<- interface{}) {
for i := 0; i < tasks; i++ {
source <- i
}
}, func(item interface{}) {
atomic.AddUint32(&count, 1)
}, WithWorkers(-1))
assert.Equal(t, tasks, int(count))
})
t.Run("odd", func(t *testing.T) {
defer goleak.VerifyNone(t)
var count uint32
ForEach(func(source chan<- interface{}) {
for i := 0; i < tasks; i++ {
source <- i
}
}, func(item interface{}) {
if item.(int)%2 == 0 {
atomic.AddUint32(&count, 1)
}
})
assert.Equal(t, tasks/2, int(count))
})
t.Run("all", func(t *testing.T) {
defer goleak.VerifyNone(t)
ForEach(func(source chan<- interface{}) {
for i := 0; i < tasks; i++ {
source <- i
}
}, func(item interface{}) {
panic("foo")
})
})
}
func TestMap(t *testing.T) {
defer goleak.VerifyNone(t)
tests := []struct {
mapper MapFunc
expect int
@@ -127,6 +189,8 @@ func TestMap(t *testing.T) {
}
func TestMapReduce(t *testing.T) {
defer goleak.VerifyNone(t)
tests := []struct {
mapper MapperFunc
reducer ReducerFunc
@@ -202,7 +266,22 @@ func TestMapReduce(t *testing.T) {
}
}
func TestMapReducePanicBothMapperAndReducer(t *testing.T) {
defer goleak.VerifyNone(t)
_, _ = MapReduce(func(source chan<- interface{}) {
source <- 0
source <- 1
}, func(item interface{}, writer Writer, cancel func(error)) {
panic("foo")
}, func(pipe <-chan interface{}, writer Writer, cancel func(error)) {
panic("bar")
})
}
func TestMapReduceWithReduerWriteMoreThanOnce(t *testing.T) {
defer goleak.VerifyNone(t)
assert.Panics(t, func() {
MapReduce(func(source chan<- interface{}) {
for i := 0; i < 10; i++ {
@@ -219,6 +298,8 @@ func TestMapReduceWithReduerWriteMoreThanOnce(t *testing.T) {
}
func TestMapReduceVoid(t *testing.T) {
defer goleak.VerifyNone(t)
var value uint32
tests := []struct {
mapper MapperFunc
@@ -295,6 +376,8 @@ func TestMapReduceVoid(t *testing.T) {
}
func TestMapReduceVoidWithDelay(t *testing.T) {
defer goleak.VerifyNone(t)
var result []int
err := MapReduceVoid(func(source chan<- interface{}) {
source <- 0
@@ -318,9 +401,11 @@ func TestMapReduceVoidWithDelay(t *testing.T) {
}
func TestMapVoid(t *testing.T) {
defer goleak.VerifyNone(t)
const tasks = 1000
var count uint32
MapVoid(func(source chan<- interface{}) {
ForEach(func(source chan<- interface{}) {
for i := 0; i < tasks; i++ {
source <- i
}
@@ -332,6 +417,8 @@ func TestMapVoid(t *testing.T) {
}
func TestMapReducePanic(t *testing.T) {
defer goleak.VerifyNone(t)
v, err := MapReduce(func(source chan<- interface{}) {
source <- 0
source <- 1
@@ -349,6 +436,8 @@ func TestMapReducePanic(t *testing.T) {
}
func TestMapReduceVoidCancel(t *testing.T) {
defer goleak.VerifyNone(t)
var result []int
err := MapReduceVoid(func(source chan<- interface{}) {
source <- 0
@@ -370,6 +459,8 @@ func TestMapReduceVoidCancel(t *testing.T) {
}
func TestMapReduceVoidCancelWithRemains(t *testing.T) {
defer goleak.VerifyNone(t)
var done syncx.AtomicBool
var result []int
err := MapReduceVoid(func(source chan<- interface{}) {
@@ -395,6 +486,8 @@ func TestMapReduceVoidCancelWithRemains(t *testing.T) {
}
func TestMapReduceWithoutReducerWrite(t *testing.T) {
defer goleak.VerifyNone(t)
uids := []int{1, 2, 3}
res, err := MapReduce(func(source chan<- interface{}) {
for _, uid := range uids {
@@ -410,6 +503,54 @@ func TestMapReduceWithoutReducerWrite(t *testing.T) {
assert.Nil(t, res)
}
func TestMapReduceVoidPanicInReducer(t *testing.T) {
defer goleak.VerifyNone(t)
const message = "foo"
var done syncx.AtomicBool
err := MapReduceVoid(func(source chan<- interface{}) {
for i := 0; i < defaultWorkers*2; i++ {
source <- i
}
done.Set(true)
}, func(item interface{}, writer Writer, cancel func(error)) {
i := item.(int)
writer.Write(i)
}, func(pipe <-chan interface{}, cancel func(error)) {
panic(message)
}, WithWorkers(1))
assert.NotNil(t, err)
assert.Equal(t, message, err.Error())
assert.True(t, done.True())
}
func TestMapReduceWithContext(t *testing.T) {
defer goleak.VerifyNone(t)
var done syncx.AtomicBool
var result []int
ctx, cancel := context.WithCancel(context.Background())
err := MapReduceVoid(func(source chan<- interface{}) {
for i := 0; i < defaultWorkers*2; i++ {
source <- i
}
done.Set(true)
}, func(item interface{}, writer Writer, c func(error)) {
i := item.(int)
if i == defaultWorkers/2 {
cancel()
}
writer.Write(i)
}, func(pipe <-chan interface{}, cancel func(error)) {
for item := range pipe {
i := item.(int)
result = append(result, i)
}
}, WithContext(ctx))
assert.NotNil(t, err)
assert.Equal(t, context.DeadlineExceeded, err)
}
func BenchmarkMapReduce(b *testing.B) {
b.ReportAllocs()

89
core/mr/readme-cn.md Normal file
View File

@@ -0,0 +1,89 @@
# mapreduce
[English](readme.md) | 简体中文
## 为什么需要 MapReduce
在实际的业务场景中我们常常需要从不同的 rpc 服务中获取相应属性来组装成复杂对象。
比如要查询商品详情:
1. 商品服务-查询商品属性
2. 库存服务-查询库存属性
3. 价格服务-查询价格属性
4. 营销服务-查询营销属性
如果是串行调用的话响应时间会随着 rpc 调用次数呈线性增长,所以我们要优化性能一般会将串行改并行。
简单的场景下使用 `WaitGroup` 也能够满足需求,但是如果我们需要对 rpc 调用返回的数据进行校验、数据加工转换、数据汇总呢?继续使用 `WaitGroup` 就有点力不从心了go 的官方库中并没有这种工具java 中提供了 CompleteFuture我们依据 MapReduce 架构思想实现了进程内的数据批处理 MapReduce 并发工具类。
## 设计思路
我们尝试把自己代入到作者的角色梳理一下并发工具可能的业务场景:
1. 查询商品详情:支持并发调用多个服务来组合产品属性,支持调用错误可以立即结束。
2. 商品详情页自动推荐用户卡券:支持并发校验卡券,校验失败自动剔除,返回全部卡券。
以上实际都是在进行对输入数据进行处理最后输出清洗后的数据,针对数据处理有个非常经典的异步模式:生产者消费者模式。于是我们可以抽象一下数据批处理的生命周期,大致可以分为三个阶段:
<img src="https://raw.githubusercontent.com/zeromicro/zero-doc/main/doc/images/mapreduce-serial-cn.png" width="500">
1. 数据生产 generate
2. 数据加工 mapper
3. 数据聚合 reducer
其中数据生产是不可或缺的阶段,数据加工、数据聚合是可选阶段,数据生产与加工支持并发调用,数据聚合基本属于纯内存操作单协程即可。
再来思考一下不同阶段之间数据应该如何流转,既然不同阶段的数据处理都是由不同 goroutine 执行的,那么很自然的可以考虑采用 channel 来实现 goroutine 之间的通信。
<img src="https://raw.githubusercontent.com/zeromicro/zero-doc/main/doc/images/mapreduce-cn.png" width="500">
如何实现随时终止流程呢?
`goroutine` 中监听一个全局的结束 `channel` 和调用方提供的 `ctx` 就行。
## 简单示例
并行求平方和(不要嫌弃示例简单,只是模拟并发)
```go
package main
import (
"fmt"
"log"
"github.com/tal-tech/go-zero/core/mr"
)
func main() {
val, err := mr.MapReduce(func(source chan<- interface{}) {
// generator
for i := 0; i < 10; i++ {
source <- i
}
}, func(item interface{}, writer mr.Writer, cancel func(error)) {
// mapper
i := item.(int)
writer.Write(i * i)
}, func(pipe <-chan interface{}, writer mr.Writer, cancel func(error)) {
// reducer
var sum int
for i := range pipe {
sum += i.(int)
}
writer.Write(sum)
})
if err != nil {
log.Fatal(err)
}
fmt.Println("result:", val)
}
```
更多示例:[https://github.com/zeromicro/zero-examples/tree/main/mapreduce](https://github.com/zeromicro/zero-examples/tree/main/mapreduce)
## 欢迎 star
如果你正在使用或者觉得这个项目对你有帮助,请 **star** 支持,感谢!

90
core/mr/readme.md Normal file
View File

@@ -0,0 +1,90 @@
<img align="right" width="150px" src="https://raw.githubusercontent.com/zeromicro/zero-doc/main/doc/images/go-zero.png">
# mapreduce
English | [简体中文](readme-cn.md)
## Why MapReduce is needed
In practical business scenarios we often need to get the corresponding properties from different rpc services to assemble complex objects.
For example, to query product details.
1. product service - query product attributes
2. inventory service - query inventory properties
3. price service - query price attributes
4. marketing service - query marketing properties
If it is a serial call, the response time will increase linearly with the number of rpc calls, so we will generally change serial to parallel to optimize response time.
Simple scenarios using `WaitGroup` can also meet the needs, but what if we need to check the data returned by the rpc call, data processing, data aggregation? The official go library does not have such a tool (CompleteFuture is provided in java), so we implemented an in-process data batching MapReduce concurrent tool based on the MapReduce architecture.
## Design ideas
Let's try to put ourselves in the author's shoes and sort out the possible business scenarios for the concurrency tool:
1. querying product details: supporting concurrent calls to multiple services to combine product attributes, and supporting call errors that can be ended immediately.
2. automatic recommendation of user card coupons on product details page: support concurrently verifying card coupons, automatically rejecting them if they fail, and returning all of them.
The above is actually processing the input data and finally outputting the cleaned data. There is a very classic asynchronous pattern for data processing: the producer-consumer pattern. So we can abstract the life cycle of data batch processing, which can be roughly divided into three phases.
<img src="https://raw.githubusercontent.com/zeromicro/zero-doc/main/doc/images/mapreduce-serial-en.png" width="500">
1. data production generate
2. data processing mapper
3. data aggregation reducer
Data producing is an indispensable stage, data processing and data aggregation are optional stages, data producing and processing support concurrent calls, data aggregation is basically a pure memory operation, so a single concurrent process can do it.
Since different stages of data processing are performed by different goroutines, it is natural to consider the use of channel to achieve communication between goroutines.
<img src="https://raw.githubusercontent.com/zeromicro/zero-doc/main/doc/images/mapreduce-en.png" width="500">
How can I terminate the process at any time?
It's simple, just receive from a channel or the given context in the goroutine.
## A simple example
Calculate the sum of squares, simulating the concurrency.
```go
package main
import (
"fmt"
"log"
"github.com/tal-tech/go-zero/core/mr"
)
func main() {
val, err := mr.MapReduce(func(source chan<- interface{}) {
// generator
for i := 0; i < 10; i++ {
source <- i
}
}, func(item interface{}, writer mr.Writer, cancel func(error)) {
// mapper
i := item.(int)
writer.Write(i * i)
}, func(pipe <-chan interface{}, writer mr.Writer, cancel func(error)) {
// reducer
var sum int
for i := range pipe {
sum += i.(int)
}
writer.Write(sum)
})
if err != nil {
log.Fatal(err)
}
fmt.Println("result:", val)
}
```
More examples: [https://github.com/zeromicro/zero-examples/tree/main/mapreduce](https://github.com/zeromicro/zero-examples/tree/main/mapreduce)
## Give a Star! ⭐
If you like or are using this project to learn or start your solution, please give it a star. Thanks!

View File

@@ -11,7 +11,7 @@ import (
"syscall"
"time"
"github.com/tal-tech/go-zero/core/logx"
"github.com/zeromicro/go-zero/core/logx"
)
const (

View File

@@ -15,7 +15,7 @@ import (
"syscall"
"time"
"github.com/tal-tech/go-zero/core/logx"
"github.com/zeromicro/go-zero/core/logx"
)
// DefaultMemProfileRate is the default memory profiling rate.

View File

@@ -10,7 +10,8 @@ import (
"syscall"
"time"
"github.com/tal-tech/go-zero/core/logx"
"github.com/zeromicro/go-zero/core/logx"
"github.com/zeromicro/go-zero/core/threading"
)
const (
@@ -46,10 +47,10 @@ func gracefulStop(signals chan os.Signal) {
signal.Stop(signals)
logx.Info("Got signal SIGTERM, shutting down...")
wrapUpListeners.notifyListeners()
go wrapUpListeners.notifyListeners()
time.Sleep(wrapUpTime)
shutdownListeners.notifyListeners()
go shutdownListeners.notifyListeners()
time.Sleep(delayTimeBeforeForceQuit - wrapUpTime)
logx.Infof("Still alive after %v, going to force kill the process...", delayTimeBeforeForceQuit)
@@ -81,7 +82,9 @@ func (lm *listenerManager) notifyListeners() {
lm.lock.Lock()
defer lm.lock.Unlock()
group := threading.NewRoutineGroup()
for _, listener := range lm.listeners {
listener()
group.RunSafe(listener)
}
group.Wait()
}

View File

@@ -8,7 +8,7 @@ import (
"os/signal"
"syscall"
"github.com/tal-tech/go-zero/core/logx"
"github.com/zeromicro/go-zero/core/logx"
)
const timeFormat = "0102150405"

View File

@@ -8,8 +8,8 @@ import (
"time"
"github.com/olekukonko/tablewriter"
"github.com/tal-tech/go-zero/core/logx"
"github.com/tal-tech/go-zero/core/threading"
"github.com/zeromicro/go-zero/core/logx"
"github.com/zeromicro/go-zero/core/threading"
)
type (

View File

@@ -1,6 +1,6 @@
package prof
import "github.com/tal-tech/go-zero/core/utils"
import "github.com/zeromicro/go-zero/core/utils"
type (
// A ProfilePoint is a profile time point.

View File

@@ -3,7 +3,7 @@ package prof
import (
"testing"
"github.com/tal-tech/go-zero/core/utils"
"github.com/zeromicro/go-zero/core/utils"
)
func TestProfiler(t *testing.T) {

View File

@@ -6,9 +6,9 @@ import (
"sync"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/tal-tech/go-zero/core/logx"
"github.com/tal-tech/go-zero/core/syncx"
"github.com/tal-tech/go-zero/core/threading"
"github.com/zeromicro/go-zero/core/logx"
"github.com/zeromicro/go-zero/core/syncx"
"github.com/zeromicro/go-zero/core/threading"
)
var (

View File

@@ -4,7 +4,7 @@ import (
"errors"
"sync/atomic"
"github.com/tal-tech/go-zero/core/logx"
"github.com/zeromicro/go-zero/core/logx"
)
// ErrNoAvailablePusher indicates no pusher available.

Some files were not shown because too many files have changed in this diff Show More