Compare commits
262 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9d528dddd6 | ||
|
|
543d590710 | ||
|
|
f1d70eb6b2 | ||
|
|
d828c3f37e | ||
|
|
038491b7bc | ||
|
|
cf683411ee | ||
|
|
de5ed6a677 | ||
|
|
3dda557410 | ||
|
|
c800f6f723 | ||
|
|
0395ba1816 | ||
|
|
86f9f63b46 | ||
|
|
a7a6753118 | ||
|
|
2e80d12d6a | ||
|
|
417a96cbf2 | ||
|
|
2d4c29ea7c | ||
|
|
67db40ed4f | ||
|
|
11c485a5ed | ||
|
|
b0573af9a9 | ||
|
|
09eb53f308 | ||
|
|
11f85d1b80 | ||
|
|
0cb86c6990 | ||
|
|
57d2f22c24 | ||
|
|
fa0c364982 | ||
|
|
a6c8113419 | ||
|
|
4f5c30e083 | ||
|
|
9d0b51fa26 | ||
|
|
ba5f8045a2 | ||
|
|
3a510a9138 | ||
|
|
d3bfa16813 | ||
|
|
28409791fa | ||
|
|
c1abe87953 | ||
|
|
f8367856e8 | ||
|
|
a72b0a689b | ||
|
|
69a4d213a3 | ||
|
|
c28e01fed3 | ||
|
|
e8efcef108 | ||
|
|
d011316997 | ||
|
|
4d22b0c497 | ||
|
|
539215d7df | ||
|
|
3ede597a15 | ||
|
|
01786c5e63 | ||
|
|
6aba5f74fc | ||
|
|
3c894a3fb7 | ||
|
|
1ece3a498f | ||
|
|
b76c7ae55d | ||
|
|
91b10bd3b9 | ||
|
|
7e3fe77e7b | ||
|
|
ba43214dae | ||
|
|
ebc90720ea | ||
|
|
785d100be9 | ||
|
|
f13e6f1149 | ||
|
|
8be0f77d96 | ||
|
|
429f85a9de | ||
|
|
b4d1c6da2c | ||
|
|
3c1cfd4c1e | ||
|
|
a71a210704 | ||
|
|
769d06c8ab | ||
|
|
cd1f8da13f | ||
|
|
8230474667 | ||
|
|
27f553bf84 | ||
|
|
d48bff8c8b | ||
|
|
59b9687f31 | ||
|
|
c1a8ccda11 | ||
|
|
9df6786b09 | ||
|
|
bef5bd4e4f | ||
|
|
68acfb1891 | ||
|
|
9fd3f752d1 | ||
|
|
9c48e9ceab | ||
|
|
bd26783b33 | ||
|
|
eda8230521 | ||
|
|
462ddbb145 | ||
|
|
496a2f341e | ||
|
|
7109d6d635 | ||
|
|
ca72241fa3 | ||
|
|
a6bdffd225 | ||
|
|
5636bf4955 | ||
|
|
a944a7fd7e | ||
|
|
a40fa405e4 | ||
|
|
eab77e21dd | ||
|
|
d41163f5c1 | ||
|
|
265b1f2459 | ||
|
|
c92ea59228 | ||
|
|
afddfea093 | ||
|
|
fa4dc151ca | ||
|
|
44202acb18 | ||
|
|
cf00786209 | ||
|
|
6a8638fc85 | ||
|
|
837a9ffa03 | ||
|
|
d28cfe5f20 | ||
|
|
022c100dc9 | ||
|
|
426b09c356 | ||
|
|
40dc21e4cf | ||
|
|
9b114e3251 | ||
|
|
4c6234f108 | ||
|
|
3cdfcb05f1 | ||
|
|
9f5bfa0088 | ||
|
|
2d42c8fa00 | ||
|
|
10e7922597 | ||
|
|
6e34b55ba7 | ||
|
|
ed15ca04f4 | ||
|
|
295ec27e1b | ||
|
|
d1e702e8a3 | ||
|
|
d1bfb5ef61 | ||
|
|
e43357164c | ||
|
|
cd21c9fa74 | ||
|
|
cdd2fcbbc9 | ||
|
|
8d2db09d45 | ||
|
|
65905b914d | ||
|
|
80e3407be1 | ||
|
|
657d27213a | ||
|
|
8ac18a9422 | ||
|
|
d3ae9cfd49 | ||
|
|
d7f42161fd | ||
|
|
e03229cabe | ||
|
|
8403ed16ae | ||
|
|
d87d203c3b | ||
|
|
3ae6a882a7 | ||
|
|
41c980f00c | ||
|
|
f34d81ca2c | ||
|
|
004ee488a6 | ||
|
|
2e12cd2c99 | ||
|
|
2695c30886 | ||
|
|
c74fb988e0 | ||
|
|
e8a340c1c0 | ||
|
|
06e114e5a3 | ||
|
|
74ad681a66 | ||
|
|
e7bbc09093 | ||
|
|
1eb1450c43 | ||
|
|
9a724fe907 | ||
|
|
30e49f2939 | ||
|
|
a5407479a6 | ||
|
|
7fb5bab26b | ||
|
|
27249e021f | ||
|
|
d809795fec | ||
|
|
c9db9588b7 | ||
|
|
872c50b71a | ||
|
|
7c83155e4f | ||
|
|
358d86b8ae | ||
|
|
f4bb9f5635 | ||
|
|
5c6a3132eb | ||
|
|
2bd95aa007 | ||
|
|
e8376936d5 | ||
|
|
71c0288023 | ||
|
|
9e2f07a842 | ||
|
|
24fd34413f | ||
|
|
3f47251892 | ||
|
|
0b6bc69afa | ||
|
|
5b9bdc8d02 | ||
|
|
ded22e296e | ||
|
|
f0ed2370a3 | ||
|
|
6bf6cfdd01 | ||
|
|
5cc9eb0de4 | ||
|
|
f070d447ef | ||
|
|
f6d9e19ecb | ||
|
|
56807aabf6 | ||
|
|
861dcf2f36 | ||
|
|
c837dc21bb | ||
|
|
96a35ecf1a | ||
|
|
bdec5f2349 | ||
|
|
bc92b57bdb | ||
|
|
d8905b9e9e | ||
|
|
dec6309c55 | ||
|
|
10805577f5 | ||
|
|
a4d8286e36 | ||
|
|
84d2b64e7c | ||
|
|
6476da4a18 | ||
|
|
79eab0ea2f | ||
|
|
3b683fd498 | ||
|
|
d179b342b2 | ||
|
|
58874779e7 | ||
|
|
8829c31c0d | ||
|
|
b42f3fa047 | ||
|
|
9bdadf2381 | ||
|
|
20f665ede8 | ||
|
|
0325d8e92d | ||
|
|
2125977281 | ||
|
|
c26c187e11 | ||
|
|
4ef1859f0b | ||
|
|
407a6cbf9c | ||
|
|
76fc1ef460 | ||
|
|
423955c55f | ||
|
|
db95b3f0e3 | ||
|
|
4bee60eb7f | ||
|
|
7618139dad | ||
|
|
6fd08027ff | ||
|
|
b9e268aae8 | ||
|
|
4c1bb1148b | ||
|
|
50a6bbe6b9 | ||
|
|
dfb3cb510a | ||
|
|
519db812b4 | ||
|
|
3203f8e06b | ||
|
|
b71ac2042a | ||
|
|
d0f9e57022 | ||
|
|
aa68210cde | ||
|
|
280e837c9e | ||
|
|
f669e1226c | ||
|
|
cd15c19250 | ||
|
|
5b35fa17de | ||
|
|
9672298fa8 | ||
|
|
bf3ce16823 | ||
|
|
189721da16 | ||
|
|
a523ab1f93 | ||
|
|
7ea8b636d9 | ||
|
|
b2fea65faa | ||
|
|
a1fe8bf6cd | ||
|
|
67ee9e4391 | ||
|
|
9c1ee50497 | ||
|
|
7c842f22d0 | ||
|
|
14ec29991c | ||
|
|
c7f5aad83a | ||
|
|
e77747cff8 | ||
|
|
f2612db4b1 | ||
|
|
a21ff71373 | ||
|
|
fc04ad7854 | ||
|
|
fbf2eebc42 | ||
|
|
dc43430812 | ||
|
|
c6642bc2e6 | ||
|
|
bdca24dd3b | ||
|
|
00c5734021 | ||
|
|
33f87cf1f0 | ||
|
|
69935c1ba3 | ||
|
|
1fb356f328 | ||
|
|
0b0406f41a | ||
|
|
cc264dcf55 | ||
|
|
e024aebb66 | ||
|
|
f204729482 | ||
|
|
d20cf56a69 | ||
|
|
54d57c7d4b | ||
|
|
28a7c9d38f | ||
|
|
872e75e10d | ||
|
|
af1730079e | ||
|
|
04521e2d24 | ||
|
|
02adcccbf4 | ||
|
|
a74aaf1823 | ||
|
|
1eb2089c69 | ||
|
|
f7f3730e1a | ||
|
|
0ee7654407 | ||
|
|
16cc990fdd | ||
|
|
00061c2e5b | ||
|
|
6793f7a1de | ||
|
|
c8428a7f65 | ||
|
|
a5e1d0d0dc | ||
|
|
8270c7deed | ||
|
|
9f4a882a1b | ||
|
|
cb7b7cb72e | ||
|
|
603c93aa4a | ||
|
|
cb8d9d413a | ||
|
|
ff7443c6a7 | ||
|
|
b812e74d6f | ||
|
|
089cdaa75f | ||
|
|
476026e393 | ||
|
|
75952308f9 | ||
|
|
df0550d6dc | ||
|
|
e481b63b21 | ||
|
|
e47079f0f4 | ||
|
|
9b2a279948 | ||
|
|
db87fd3239 | ||
|
|
598fda0c97 | ||
|
|
b0e335e7b0 | ||
|
|
efdf475da4 | ||
|
|
22a1315136 | ||
|
|
5b22823018 |
@@ -1,4 +1,3 @@
|
||||
comment: false
|
||||
ignore:
|
||||
- "doc"
|
||||
- "example"
|
||||
- "tools"
|
||||
- "tools"
|
||||
12
.github/FUNDING.yml
vendored
Normal file
12
.github/FUNDING.yml
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
# These are supported funding model platforms
|
||||
|
||||
github: # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2]
|
||||
patreon: # Replace with a single Patreon username
|
||||
open_collective: # Replace with a single Open Collective username
|
||||
ko_fi: # Replace with a single Ko-fi username
|
||||
tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
|
||||
community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
|
||||
liberapay: # Replace with a single Liberapay username
|
||||
issuehunt: # Replace with a single IssueHunt username
|
||||
otechie: # Replace with a single Otechie username
|
||||
custom: # https://gitee.com/kevwan/static/raw/master/images/sponsor.jpg
|
||||
40
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
40
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
@@ -0,0 +1,40 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Create a report to help us improve
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Describe the bug**
|
||||
A clear and concise description of what the bug is.
|
||||
|
||||
**To Reproduce**
|
||||
Steps to reproduce the behavior, if applicable:
|
||||
|
||||
1. The code is
|
||||
|
||||
```go
|
||||
|
||||
```
|
||||
|
||||
2. The error is
|
||||
|
||||
```
|
||||
|
||||
```
|
||||
|
||||
**Expected behavior**
|
||||
A clear and concise description of what you expected to happen.
|
||||
|
||||
**Screenshots**
|
||||
If applicable, add screenshots to help explain your problem.
|
||||
|
||||
**Environments (please complete the following information):**
|
||||
- OS: [e.g. Linux]
|
||||
- go-zero version [e.g. 1.2.1]
|
||||
- goctl version [e.g. 1.2.1, optional]
|
||||
|
||||
**More description**
|
||||
Add any other context about the problem here.
|
||||
20
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
20
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
---
|
||||
name: Feature request
|
||||
about: Suggest an idea for this project
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Is your feature request related to a problem? Please describe.**
|
||||
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
|
||||
|
||||
**Describe the solution you'd like**
|
||||
A clear and concise description of what you want to happen.
|
||||
|
||||
**Describe alternatives you've considered**
|
||||
A clear and concise description of any alternative solutions or features you've considered.
|
||||
|
||||
**Additional context**
|
||||
Add any other context or screenshots about the feature request here.
|
||||
10
.github/ISSUE_TEMPLATE/question.md
vendored
Normal file
10
.github/ISSUE_TEMPLATE/question.md
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
---
|
||||
name: Question
|
||||
about: Ask a question on using go-zero or goctl
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
|
||||
12
.github/workflows/go.yml
vendored
12
.github/workflows/go.yml
vendored
@@ -15,7 +15,7 @@ jobs:
|
||||
- name: Set up Go 1.x
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ^1.13
|
||||
go-version: ^1.14
|
||||
id: go
|
||||
|
||||
- name: Check out code into the Go module directory
|
||||
@@ -25,10 +25,14 @@ jobs:
|
||||
run: |
|
||||
go get -v -t -d ./...
|
||||
|
||||
- name: Lint
|
||||
run: |
|
||||
go vet -stdmethods=false $(go list ./...)
|
||||
go install mvdan.cc/gofumpt@latest
|
||||
test -z "$(gofumpt -s -l -extra .)" || echo "Please run 'gofumpt -l -w -extra .'"
|
||||
|
||||
- name: Test
|
||||
run: go test -race -coverprofile=coverage.txt -covermode=atomic ./...
|
||||
|
||||
- name: Codecov
|
||||
uses: codecov/codecov-action@v1.0.6
|
||||
with:
|
||||
token: ${{secrets.CODECOV_TOKEN}}
|
||||
uses: codecov/codecov-action@v2
|
||||
|
||||
19
.github/workflows/issues.yml
vendored
Normal file
19
.github/workflows/issues.yml
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
name: Close inactive issues
|
||||
on:
|
||||
schedule:
|
||||
- cron: "30 1 * * *"
|
||||
|
||||
jobs:
|
||||
close-issues:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/stale@v3
|
||||
with:
|
||||
days-before-issue-stale: 30
|
||||
days-before-issue-close: 14
|
||||
stale-issue-label: "stale"
|
||||
stale-issue-message: "This issue is stale because it has been open for 30 days with no activity."
|
||||
close-issue-message: "This issue was closed because it has been inactive for 14 days since being marked as stale."
|
||||
days-before-pr-stale: -1
|
||||
days-before-pr-close: -1
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
19
.github/workflows/reviewdog.yml
vendored
Normal file
19
.github/workflows/reviewdog.yml
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
name: reviewdog
|
||||
on: [pull_request]
|
||||
jobs:
|
||||
staticcheck:
|
||||
name: runner / staticcheck
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: reviewdog/action-staticcheck@v1
|
||||
with:
|
||||
github_token: ${{ secrets.github_token }}
|
||||
# Change reviewdog reporter if you need [github-pr-check,github-check,github-pr-review].
|
||||
reporter: github-pr-review
|
||||
# Report all results.
|
||||
filter_mode: nofilter
|
||||
# Exit with 1 when it find at least one finding.
|
||||
fail_on_error: true
|
||||
# Set staticcheck flags
|
||||
staticcheck_flags: -checks=inherit,-SA1019,-SA1029,-SA5008
|
||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -10,10 +10,14 @@
|
||||
!*/
|
||||
!api
|
||||
|
||||
# ignore
|
||||
.idea
|
||||
**/.DS_Store
|
||||
**/logs
|
||||
|
||||
# for test purpose
|
||||
adhoc
|
||||
|
||||
# gitlab ci
|
||||
.cache
|
||||
|
||||
|
||||
@@ -90,7 +90,7 @@ After that, run these local verifications before submitting pull request to pred
|
||||
fail of continuous integration.
|
||||
|
||||
* Format the code with `gofmt`
|
||||
* Run the test with data race enabled `go test -race ./…`
|
||||
* Run the test with data race enabled `go test -race ./...`
|
||||
|
||||
## Code Review
|
||||
|
||||
|
||||
25
ROADMAP.md
25
ROADMAP.md
@@ -5,17 +5,22 @@ Community and contributor involvement is vital for successfully implementing all
|
||||
We hope that the items listed below will inspire further engagement from the community to keep go-zero progressing and shipping exciting and valuable features.
|
||||
|
||||
## 2021 Q2
|
||||
- Support TLS in redis connections
|
||||
- Support service discovery through K8S watch api
|
||||
- Log full sql statements for easier sql problem solving
|
||||
- [x] Support service discovery through K8S client api
|
||||
- [x] Log full sql statements for easier sql problem solving
|
||||
|
||||
## 2021 Q3
|
||||
- Support `goctl mock` command to start a mocking server with given `.api` file
|
||||
- Adapt builtin tracing mechanism to opentracing solutions
|
||||
- Support `goctl model pg` to support PostgreSQL code generation
|
||||
- [x] Support `goctl model pg` to support PostgreSQL code generation
|
||||
- [x] Adapt builtin tracing mechanism to opentracing solutions
|
||||
|
||||
## 2021 Q4
|
||||
- Support `goctl doctor` command to report potential issues for given service
|
||||
- Support `context` in redis related methods for timeout and tracing
|
||||
- Support `context` in sql related methods for timeout and tracing
|
||||
- Support `context` in mongodb related methods for timeout and tracing
|
||||
- [x] Support `username/password` authentication in ETCD
|
||||
- [x] Support `SSL/TLS` in `zRPC`
|
||||
- [x] Support `TLS` in redis connections
|
||||
|
||||
## 2022
|
||||
- [ ] Support `goctl mock` command to start a mocking server with given `.api` file
|
||||
- [ ] Add `httpx.Client` with governance, like circuit breaker etc.
|
||||
- [ ] Support `goctl doctor` command to report potential issues for given service
|
||||
- [ ] Support `context` in redis related methods for timeout and tracing
|
||||
- [ ] Support `context` in sql related methods for timeout and tracing
|
||||
- [ ] Support `context` in mongodb related methods for timeout and tracing
|
||||
|
||||
@@ -34,7 +34,7 @@ type (
|
||||
expire time.Duration
|
||||
timingWheel *TimingWheel
|
||||
lruCache lru
|
||||
barrier syncx.SharedCalls
|
||||
barrier syncx.SingleFlight
|
||||
unstableExpiry mathx.Unstable
|
||||
stats *cacheStat
|
||||
}
|
||||
@@ -46,7 +46,7 @@ func NewCache(expire time.Duration, opts ...CacheOption) (*Cache, error) {
|
||||
data: make(map[string]interface{}),
|
||||
expire: expire,
|
||||
lruCache: emptyLruCache,
|
||||
barrier: syncx.NewSharedCalls(),
|
||||
barrier: syncx.NewSingleFlight(),
|
||||
unstableExpiry: mathx.NewUnstable(expiryDeviation),
|
||||
}
|
||||
|
||||
|
||||
@@ -106,9 +106,7 @@ func (s *Set) KeysInt() []int {
|
||||
var keys []int
|
||||
|
||||
for key := range s.data {
|
||||
if intKey, ok := key.(int); !ok {
|
||||
continue
|
||||
} else {
|
||||
if intKey, ok := key.(int); ok {
|
||||
keys = append(keys, intKey)
|
||||
}
|
||||
}
|
||||
@@ -121,9 +119,7 @@ func (s *Set) KeysInt64() []int64 {
|
||||
var keys []int64
|
||||
|
||||
for key := range s.data {
|
||||
if intKey, ok := key.(int64); !ok {
|
||||
continue
|
||||
} else {
|
||||
if intKey, ok := key.(int64); ok {
|
||||
keys = append(keys, intKey)
|
||||
}
|
||||
}
|
||||
@@ -136,9 +132,7 @@ func (s *Set) KeysUint() []uint {
|
||||
var keys []uint
|
||||
|
||||
for key := range s.data {
|
||||
if intKey, ok := key.(uint); !ok {
|
||||
continue
|
||||
} else {
|
||||
if intKey, ok := key.(uint); ok {
|
||||
keys = append(keys, intKey)
|
||||
}
|
||||
}
|
||||
@@ -151,9 +145,7 @@ func (s *Set) KeysUint64() []uint64 {
|
||||
var keys []uint64
|
||||
|
||||
for key := range s.data {
|
||||
if intKey, ok := key.(uint64); !ok {
|
||||
continue
|
||||
} else {
|
||||
if intKey, ok := key.(uint64); ok {
|
||||
keys = append(keys, intKey)
|
||||
}
|
||||
}
|
||||
@@ -166,9 +158,7 @@ func (s *Set) KeysStr() []string {
|
||||
var keys []string
|
||||
|
||||
for key := range s.data {
|
||||
if strKey, ok := key.(string); !ok {
|
||||
continue
|
||||
} else {
|
||||
if strKey, ok := key.(string); ok {
|
||||
keys = append(keys, strKey)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -47,9 +47,11 @@ func TestUnmarshalContextWithMissing(t *testing.T) {
|
||||
Name string `ctx:"name"`
|
||||
Age int `ctx:"age"`
|
||||
}
|
||||
type name string
|
||||
const PersonNameKey name = "name"
|
||||
|
||||
ctx := context.Background()
|
||||
ctx = context.WithValue(ctx, "name", "kevin")
|
||||
ctx = context.WithValue(ctx, PersonNameKey, "kevin")
|
||||
|
||||
var person Person
|
||||
err := For(ctx, &person)
|
||||
|
||||
@@ -9,7 +9,9 @@ import (
|
||||
)
|
||||
|
||||
func TestContextCancel(t *testing.T) {
|
||||
c := context.WithValue(context.Background(), "key", "value")
|
||||
type key string
|
||||
var nameKey key = "name"
|
||||
c := context.WithValue(context.Background(), nameKey, "value")
|
||||
c1, cancel := context.WithCancel(c)
|
||||
o := ValueOnlyFrom(c1)
|
||||
c2, cancel2 := context.WithCancel(o)
|
||||
|
||||
7
core/discov/accountregistry.go
Normal file
7
core/discov/accountregistry.go
Normal file
@@ -0,0 +1,7 @@
|
||||
package discov
|
||||
|
||||
import "github.com/tal-tech/go-zero/core/discov/internal"
|
||||
|
||||
func RegisterAccount(endpoints []string, user, pass string) {
|
||||
internal.AddAccount(endpoints, user, pass)
|
||||
}
|
||||
21
core/discov/accountregistry_test.go
Normal file
21
core/discov/accountregistry_test.go
Normal file
@@ -0,0 +1,21 @@
|
||||
package discov
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/tal-tech/go-zero/core/discov/internal"
|
||||
"github.com/tal-tech/go-zero/core/stringx"
|
||||
)
|
||||
|
||||
func TestRegisterAccount(t *testing.T) {
|
||||
endpoints := []string{
|
||||
"localhost:2379",
|
||||
}
|
||||
user := "foo" + stringx.Rand()
|
||||
RegisterAccount(endpoints, user, "bar")
|
||||
account, ok := internal.GetAccount(endpoints)
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, user, account.User)
|
||||
assert.Equal(t, "bar", account.Pass)
|
||||
}
|
||||
@@ -6,6 +6,13 @@ import "errors"
|
||||
type EtcdConf struct {
|
||||
Hosts []string
|
||||
Key string
|
||||
User string `json:",optional"`
|
||||
Pass string `json:",optional"`
|
||||
}
|
||||
|
||||
// HasAccount returns if account provided.
|
||||
func (c EtcdConf) HasAccount() bool {
|
||||
return len(c.User) > 0 && len(c.Pass) > 0
|
||||
}
|
||||
|
||||
// Validate validates c.
|
||||
|
||||
@@ -44,3 +44,39 @@ func TestConfig(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEtcdConf_HasAccount(t *testing.T) {
|
||||
tests := []struct {
|
||||
EtcdConf
|
||||
hasAccount bool
|
||||
}{
|
||||
{
|
||||
EtcdConf: EtcdConf{
|
||||
Hosts: []string{"any"},
|
||||
Key: "key",
|
||||
},
|
||||
hasAccount: false,
|
||||
},
|
||||
{
|
||||
EtcdConf: EtcdConf{
|
||||
Hosts: []string{"any"},
|
||||
Key: "key",
|
||||
User: "foo",
|
||||
},
|
||||
hasAccount: false,
|
||||
},
|
||||
{
|
||||
EtcdConf: EtcdConf{
|
||||
Hosts: []string{"any"},
|
||||
Key: "key",
|
||||
User: "foo",
|
||||
Pass: "bar",
|
||||
},
|
||||
hasAccount: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
assert.Equal(t, test.hasAccount, test.EtcdConf.HasAccount())
|
||||
}
|
||||
}
|
||||
|
||||
31
core/discov/internal/accountmanager.go
Normal file
31
core/discov/internal/accountmanager.go
Normal file
@@ -0,0 +1,31 @@
|
||||
package internal
|
||||
|
||||
import "sync"
|
||||
|
||||
type Account struct {
|
||||
User string
|
||||
Pass string
|
||||
}
|
||||
|
||||
var (
|
||||
accounts = make(map[string]Account)
|
||||
lock sync.RWMutex
|
||||
)
|
||||
|
||||
func AddAccount(endpoints []string, user, pass string) {
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
|
||||
accounts[getClusterKey(endpoints)] = Account{
|
||||
User: user,
|
||||
Pass: pass,
|
||||
}
|
||||
}
|
||||
|
||||
func GetAccount(endpoints []string) (Account, bool) {
|
||||
lock.RLock()
|
||||
defer lock.RUnlock()
|
||||
|
||||
account, ok := accounts[getClusterKey(endpoints)]
|
||||
return account, ok
|
||||
}
|
||||
34
core/discov/internal/accountmanager_test.go
Normal file
34
core/discov/internal/accountmanager_test.go
Normal file
@@ -0,0 +1,34 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/tal-tech/go-zero/core/stringx"
|
||||
)
|
||||
|
||||
func TestAccount(t *testing.T) {
|
||||
endpoints := []string{
|
||||
"192.168.0.2:2379",
|
||||
"192.168.0.3:2379",
|
||||
"192.168.0.4:2379",
|
||||
}
|
||||
username := "foo" + stringx.Rand()
|
||||
password := "bar"
|
||||
anotherPassword := "any"
|
||||
|
||||
_, ok := GetAccount(endpoints)
|
||||
assert.False(t, ok)
|
||||
|
||||
AddAccount(endpoints, username, password)
|
||||
account, ok := GetAccount(endpoints)
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, username, account.User)
|
||||
assert.Equal(t, password, account.Pass)
|
||||
|
||||
AddAccount(endpoints, username, anotherPassword)
|
||||
account, ok = GetAccount(endpoints)
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, username, account.User)
|
||||
assert.Equal(t, anotherPassword, account.Pass)
|
||||
}
|
||||
@@ -37,25 +37,35 @@ func GetRegistry() *Registry {
|
||||
|
||||
// GetConn returns an etcd client connection associated with given endpoints.
|
||||
func (r *Registry) GetConn(endpoints []string) (EtcdClient, error) {
|
||||
return r.getCluster(endpoints).getClient()
|
||||
c, _ := r.getCluster(endpoints)
|
||||
return c.getClient()
|
||||
}
|
||||
|
||||
// Monitor monitors the key on given etcd endpoints, notify with the given UpdateListener.
|
||||
func (r *Registry) Monitor(endpoints []string, key string, l UpdateListener) error {
|
||||
return r.getCluster(endpoints).monitor(key, l)
|
||||
c, exists := r.getCluster(endpoints)
|
||||
// if exists, the existing values should be updated to the listener.
|
||||
if exists {
|
||||
kvs := c.getCurrent(key)
|
||||
for _, kv := range kvs {
|
||||
l.OnAdd(kv)
|
||||
}
|
||||
}
|
||||
|
||||
return c.monitor(key, l)
|
||||
}
|
||||
|
||||
func (r *Registry) getCluster(endpoints []string) *cluster {
|
||||
func (r *Registry) getCluster(endpoints []string) (c *cluster, exists bool) {
|
||||
clusterKey := getClusterKey(endpoints)
|
||||
r.lock.Lock()
|
||||
defer r.lock.Unlock()
|
||||
c, ok := r.clusters[clusterKey]
|
||||
if !ok {
|
||||
c, exists = r.clusters[clusterKey]
|
||||
if !exists {
|
||||
c = newCluster(endpoints)
|
||||
r.clusters[clusterKey] = c
|
||||
}
|
||||
|
||||
return c
|
||||
return
|
||||
}
|
||||
|
||||
type cluster struct {
|
||||
@@ -94,6 +104,21 @@ func (c *cluster) getClient() (EtcdClient, error) {
|
||||
return val.(EtcdClient), nil
|
||||
}
|
||||
|
||||
func (c *cluster) getCurrent(key string) []KV {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
var kvs []KV
|
||||
for k, v := range c.values[key] {
|
||||
kvs = append(kvs, KV{
|
||||
Key: k,
|
||||
Val: v,
|
||||
})
|
||||
}
|
||||
|
||||
return kvs
|
||||
}
|
||||
|
||||
func (c *cluster) handleChanges(key string, kvs []KV) {
|
||||
var add []KV
|
||||
var remove []KV
|
||||
@@ -197,14 +222,12 @@ func (c *cluster) load(cli EtcdClient, key string) {
|
||||
}
|
||||
|
||||
var kvs []KV
|
||||
c.lock.Lock()
|
||||
for _, ev := range resp.Kvs {
|
||||
kvs = append(kvs, KV{
|
||||
Key: string(ev.Key),
|
||||
Val: string(ev.Value),
|
||||
})
|
||||
}
|
||||
c.lock.Unlock()
|
||||
|
||||
c.handleChanges(key, kvs)
|
||||
}
|
||||
@@ -302,14 +325,20 @@ func (c *cluster) watchConnState(cli EtcdClient) {
|
||||
|
||||
// DialClient dials an etcd cluster with given endpoints.
|
||||
func DialClient(endpoints []string) (EtcdClient, error) {
|
||||
return clientv3.New(clientv3.Config{
|
||||
cfg := clientv3.Config{
|
||||
Endpoints: endpoints,
|
||||
AutoSyncInterval: autoSyncInterval,
|
||||
DialTimeout: DialTimeout,
|
||||
DialKeepAliveTime: dialKeepAliveTime,
|
||||
DialKeepAliveTimeout: DialTimeout,
|
||||
RejectOldCluster: true,
|
||||
})
|
||||
}
|
||||
if account, ok := GetAccount(endpoints); ok {
|
||||
cfg.Username = account.User
|
||||
cfg.Password = account.Pass
|
||||
}
|
||||
|
||||
return clientv3.New(cfg)
|
||||
}
|
||||
|
||||
func getClusterKey(endpoints []string) string {
|
||||
|
||||
@@ -33,9 +33,10 @@ func setMockClient(cli EtcdClient) func() {
|
||||
}
|
||||
|
||||
func TestGetCluster(t *testing.T) {
|
||||
c1 := GetRegistry().getCluster([]string{"first"})
|
||||
c2 := GetRegistry().getCluster([]string{"second"})
|
||||
c3 := GetRegistry().getCluster([]string{"first"})
|
||||
AddAccount([]string{"first"}, "foo", "bar")
|
||||
c1, _ := GetRegistry().getCluster([]string{"first"})
|
||||
c2, _ := GetRegistry().getCluster([]string{"second"})
|
||||
c3, _ := GetRegistry().getCluster([]string{"first"})
|
||||
assert.Equal(t, c1, c3)
|
||||
assert.NotEqual(t, c1, c2)
|
||||
}
|
||||
|
||||
@@ -6,9 +6,10 @@ package internal
|
||||
|
||||
import (
|
||||
context "context"
|
||||
reflect "reflect"
|
||||
|
||||
gomock "github.com/golang/mock/gomock"
|
||||
connectivity "google.golang.org/grpc/connectivity"
|
||||
reflect "reflect"
|
||||
)
|
||||
|
||||
// MocketcdConn is a mock of etcdConn interface
|
||||
|
||||
@@ -5,8 +5,9 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
gomock "github.com/golang/mock/gomock"
|
||||
reflect "reflect"
|
||||
|
||||
gomock "github.com/golang/mock/gomock"
|
||||
)
|
||||
|
||||
// MockUpdateListener is a mock of UpdateListener interface
|
||||
|
||||
@@ -11,8 +11,8 @@ import (
|
||||
)
|
||||
|
||||
type (
|
||||
// PublisherOption defines the method to customize a Publisher.
|
||||
PublisherOption func(client *Publisher)
|
||||
// PubOption defines the method to customize a Publisher.
|
||||
PubOption func(client *Publisher)
|
||||
|
||||
// A Publisher can be used to publish the value to an etcd cluster on the given key.
|
||||
Publisher struct {
|
||||
@@ -32,7 +32,7 @@ type (
|
||||
// endpoints is the hosts of the etcd cluster.
|
||||
// key:value are a pair to be published.
|
||||
// opts are used to customize the Publisher.
|
||||
func NewPublisher(endpoints []string, key, value string, opts ...PublisherOption) *Publisher {
|
||||
func NewPublisher(endpoints []string, key, value string, opts ...PubOption) *Publisher {
|
||||
publisher := &Publisher{
|
||||
endpoints: endpoints,
|
||||
key: key,
|
||||
@@ -145,8 +145,15 @@ func (p *Publisher) revoke(cli internal.EtcdClient) {
|
||||
}
|
||||
}
|
||||
|
||||
// WithPubEtcdAccount provides the etcd username/password.
|
||||
func WithPubEtcdAccount(user, pass string) PubOption {
|
||||
return func(pub *Publisher) {
|
||||
internal.AddAccount(pub.endpoints, user, pass)
|
||||
}
|
||||
}
|
||||
|
||||
// WithId customizes a Publisher with the id.
|
||||
func WithId(id int64) PublisherOption {
|
||||
func WithId(id int64) PubOption {
|
||||
return func(publisher *Publisher) {
|
||||
publisher.id = id
|
||||
}
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/tal-tech/go-zero/core/discov/internal"
|
||||
"github.com/tal-tech/go-zero/core/lang"
|
||||
"github.com/tal-tech/go-zero/core/logx"
|
||||
"github.com/tal-tech/go-zero/core/stringx"
|
||||
clientv3 "go.etcd.io/etcd/client/v3"
|
||||
)
|
||||
|
||||
@@ -30,7 +31,8 @@ func TestPublisher_register(t *testing.T) {
|
||||
ID: id,
|
||||
}, nil)
|
||||
cli.EXPECT().Put(gomock.Any(), makeEtcdKey("thekey", id), "thevalue", gomock.Any())
|
||||
pub := NewPublisher(nil, "thekey", "thevalue")
|
||||
pub := NewPublisher(nil, "thekey", "thevalue",
|
||||
WithPubEtcdAccount(stringx.Rand(), "bar"))
|
||||
_, err := pub.register(cli)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
@@ -9,16 +9,14 @@ import (
|
||||
)
|
||||
|
||||
type (
|
||||
subOptions struct {
|
||||
exclusive bool
|
||||
}
|
||||
|
||||
// SubOption defines the method to customize a Subscriber.
|
||||
SubOption func(opts *subOptions)
|
||||
SubOption func(sub *Subscriber)
|
||||
|
||||
// A Subscriber is used to subscribe the given key on a etcd cluster.
|
||||
Subscriber struct {
|
||||
items *container
|
||||
endpoints []string
|
||||
exclusive bool
|
||||
items *container
|
||||
}
|
||||
)
|
||||
|
||||
@@ -27,14 +25,14 @@ type (
|
||||
// key is the key to subscribe.
|
||||
// opts are used to customize the Subscriber.
|
||||
func NewSubscriber(endpoints []string, key string, opts ...SubOption) (*Subscriber, error) {
|
||||
var subOpts subOptions
|
||||
for _, opt := range opts {
|
||||
opt(&subOpts)
|
||||
}
|
||||
|
||||
sub := &Subscriber{
|
||||
items: newContainer(subOpts.exclusive),
|
||||
endpoints: endpoints,
|
||||
}
|
||||
for _, opt := range opts {
|
||||
opt(sub)
|
||||
}
|
||||
sub.items = newContainer(sub.exclusive)
|
||||
|
||||
if err := internal.GetRegistry().Monitor(endpoints, key, sub.items); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -55,8 +53,14 @@ func (s *Subscriber) Values() []string {
|
||||
// Exclusive means that key value can only be 1:1,
|
||||
// which means later added value will remove the keys associated with the same value previously.
|
||||
func Exclusive() SubOption {
|
||||
return func(opts *subOptions) {
|
||||
opts.exclusive = true
|
||||
return func(sub *Subscriber) {
|
||||
sub.exclusive = true
|
||||
}
|
||||
}
|
||||
|
||||
func WithSubEtcdAccount(user, pass string) SubOption {
|
||||
return func(sub *Subscriber) {
|
||||
internal.AddAccount(sub.endpoints, user, pass)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/tal-tech/go-zero/core/discov/internal"
|
||||
"github.com/tal-tech/go-zero/core/stringx"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -201,11 +202,9 @@ func TestContainer(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSubscriber(t *testing.T) {
|
||||
var opt subOptions
|
||||
Exclusive()(&opt)
|
||||
|
||||
sub := new(Subscriber)
|
||||
sub.items = newContainer(opt.exclusive)
|
||||
Exclusive()(sub)
|
||||
sub.items = newContainer(sub.exclusive)
|
||||
var count int32
|
||||
sub.AddListener(func() {
|
||||
atomic.AddInt32(&count, 1)
|
||||
@@ -214,3 +213,15 @@ func TestSubscriber(t *testing.T) {
|
||||
assert.Empty(t, sub.Values())
|
||||
assert.Equal(t, int32(1), atomic.LoadInt32(&count))
|
||||
}
|
||||
|
||||
func TestWithSubEtcdAccount(t *testing.T) {
|
||||
endpoints := []string{"localhost:2379"}
|
||||
user := stringx.Rand()
|
||||
WithSubEtcdAccount(user, "bar")(&Subscriber{
|
||||
endpoints: endpoints,
|
||||
})
|
||||
account, ok := internal.GetAccount(endpoints)
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, user, account.User)
|
||||
assert.Equal(t, "bar", account.Pass)
|
||||
}
|
||||
|
||||
@@ -9,7 +9,9 @@ type AtomicError struct {
|
||||
|
||||
// Set sets the error.
|
||||
func (ae *AtomicError) Set(err error) {
|
||||
ae.err.Store(err)
|
||||
if err != nil {
|
||||
ae.err.Store(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Load returns the error.
|
||||
|
||||
@@ -17,6 +17,15 @@ func TestAtomicError(t *testing.T) {
|
||||
assert.Equal(t, errDummy, err.Load())
|
||||
}
|
||||
|
||||
func TestAtomicErrorSetNil(t *testing.T) {
|
||||
var (
|
||||
errNil error
|
||||
err AtomicError
|
||||
)
|
||||
err.Set(errNil)
|
||||
assert.Equal(t, errNil, err.Load())
|
||||
}
|
||||
|
||||
func TestAtomicErrorNil(t *testing.T) {
|
||||
var err AtomicError
|
||||
assert.Nil(t, err.Load())
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package fs
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
//go:build linux || darwin
|
||||
// +build linux darwin
|
||||
|
||||
package fs
|
||||
|
||||
@@ -395,16 +395,16 @@ func assetEqual(t *testing.T, except, data interface{}) {
|
||||
|
||||
func TestStream_AnyMach(t *testing.T) {
|
||||
assetEqual(t, false, Just(1, 2, 3).AnyMach(func(item interface{}) bool {
|
||||
return 4 == item.(int)
|
||||
return item.(int) == 4
|
||||
}))
|
||||
assetEqual(t, false, Just(1, 2, 3).AnyMach(func(item interface{}) bool {
|
||||
return 0 == item.(int)
|
||||
return item.(int) == 0
|
||||
}))
|
||||
assetEqual(t, true, Just(1, 2, 3).AnyMach(func(item interface{}) bool {
|
||||
return 2 == item.(int)
|
||||
return item.(int) == 2
|
||||
}))
|
||||
assetEqual(t, true, Just(1, 2, 3).AnyMach(func(item interface{}) bool {
|
||||
return 2 == item.(int)
|
||||
return item.(int) == 2
|
||||
}))
|
||||
}
|
||||
|
||||
|
||||
@@ -2,6 +2,9 @@ package fx
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"runtime/debug"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
@@ -30,7 +33,8 @@ func DoWithTimeout(fn func() error, timeout time.Duration, opts ...DoOption) err
|
||||
go func() {
|
||||
defer func() {
|
||||
if p := recover(); p != nil {
|
||||
panicChan <- p
|
||||
// attach call stack to avoid missing in different goroutine
|
||||
panicChan <- fmt.Sprintf("%+v\n\n%s", p, strings.TrimSpace(string(debug.Stack())))
|
||||
}
|
||||
}()
|
||||
done <- fn()
|
||||
|
||||
@@ -74,12 +74,12 @@ func TestConsistentHashIncrementalTransfer(t *testing.T) {
|
||||
laterCh := create()
|
||||
laterCh.AddWithWeight(node, 10*(i+1))
|
||||
|
||||
for i := 0; i < requestSize; i++ {
|
||||
key, ok := laterCh.Get(requestSize + i)
|
||||
for j := 0; j < requestSize; j++ {
|
||||
key, ok := laterCh.Get(requestSize + j)
|
||||
assert.True(t, ok)
|
||||
assert.NotNil(t, key)
|
||||
value := key.(string)
|
||||
assert.True(t, value == keys[i] || value == node)
|
||||
assert.True(t, value == keys[j] || value == node)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -20,6 +20,11 @@ func TestMd5(t *testing.T) {
|
||||
assert.Equal(t, md5Digest, actual)
|
||||
}
|
||||
|
||||
func TestMd5Hex(t *testing.T) {
|
||||
actual := Md5Hex([]byte(text))
|
||||
assert.Equal(t, md5Digest, actual)
|
||||
}
|
||||
|
||||
func BenchmarkHashFnv(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
h := fnv.New32()
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/globalsign/mgo/bson"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
@@ -106,3 +107,20 @@ func TestMilliTime_UnmarshalJSON(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnmarshalWithError(t *testing.T) {
|
||||
var mt MilliTime
|
||||
assert.NotNil(t, mt.UnmarshalJSON([]byte("hello")))
|
||||
}
|
||||
|
||||
func TestSetBSON(t *testing.T) {
|
||||
data, err := bson.Marshal(time.Now())
|
||||
assert.Nil(t, err)
|
||||
|
||||
var raw bson.Raw
|
||||
assert.Nil(t, bson.Unmarshal(data, &raw))
|
||||
|
||||
var mt MilliTime
|
||||
assert.Nil(t, mt.SetBSON(raw))
|
||||
assert.NotNil(t, mt.SetBSON(bson.Raw{}))
|
||||
}
|
||||
|
||||
@@ -24,7 +24,7 @@ func TestTokenLimit_Rescue(t *testing.T) {
|
||||
rate = 5
|
||||
burst = 10
|
||||
)
|
||||
l := NewTokenLimiter(rate, burst, redis.NewRedis(s.Addr(), redis.NodeType), "tokenlimit")
|
||||
l := NewTokenLimiter(rate, burst, redis.New(s.Addr()), "tokenlimit")
|
||||
s.Close()
|
||||
|
||||
var allowed int
|
||||
|
||||
@@ -31,6 +31,8 @@ var (
|
||||
|
||||
// default to be enabled
|
||||
enabled = syncx.ForAtomicBool(true)
|
||||
// default to be enabled
|
||||
logEnabled = syncx.ForAtomicBool(true)
|
||||
// make it a variable for unit test
|
||||
systemOverloadChecker = func(cpuThreshold int64) bool {
|
||||
return stat.CpuUsage() >= cpuThreshold
|
||||
@@ -80,6 +82,11 @@ func Disable() {
|
||||
enabled.Set(false)
|
||||
}
|
||||
|
||||
// DisableLog disables the stat logs for load shedding.
|
||||
func DisableLog() {
|
||||
logEnabled.Set(false)
|
||||
}
|
||||
|
||||
// NewAdaptiveShedder returns an adaptive shedder.
|
||||
// opts can be used to customize the Shedder.
|
||||
func NewAdaptiveShedder(opts ...ShedderOption) Shedder {
|
||||
|
||||
@@ -25,6 +25,7 @@ func init() {
|
||||
}
|
||||
|
||||
func TestAdaptiveShedder(t *testing.T) {
|
||||
DisableLog()
|
||||
shedder := NewAdaptiveShedder(WithWindow(bucketDuration), WithBuckets(buckets), WithCpuThreshold(100))
|
||||
var wg sync.WaitGroup
|
||||
var drop int64
|
||||
|
||||
@@ -48,6 +48,25 @@ func (s *SheddingStat) IncrementDrop() {
|
||||
atomic.AddInt64(&s.drop, 1)
|
||||
}
|
||||
|
||||
func (s *SheddingStat) loop(c <-chan time.Time) {
|
||||
for range c {
|
||||
st := s.reset()
|
||||
|
||||
if !logEnabled.True() {
|
||||
continue
|
||||
}
|
||||
|
||||
c := stat.CpuUsage()
|
||||
if st.Drop == 0 {
|
||||
logx.Statf("(%s) shedding_stat [1m], cpu: %d, total: %d, pass: %d, drop: %d",
|
||||
s.name, c, st.Total, st.Pass, st.Drop)
|
||||
} else {
|
||||
logx.Statf("(%s) shedding_stat_drop [1m], cpu: %d, total: %d, pass: %d, drop: %d",
|
||||
s.name, c, st.Total, st.Pass, st.Drop)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *SheddingStat) reset() snapshot {
|
||||
return snapshot{
|
||||
Total: atomic.SwapInt64(&s.total, 0),
|
||||
@@ -59,15 +78,6 @@ func (s *SheddingStat) reset() snapshot {
|
||||
func (s *SheddingStat) run() {
|
||||
ticker := time.NewTicker(time.Minute)
|
||||
defer ticker.Stop()
|
||||
for range ticker.C {
|
||||
c := stat.CpuUsage()
|
||||
st := s.reset()
|
||||
if st.Drop == 0 {
|
||||
logx.Statf("(%s) shedding_stat [1m], cpu: %d, total: %d, pass: %d, drop: %d",
|
||||
s.name, c, st.Total, st.Pass, st.Drop)
|
||||
} else {
|
||||
logx.Statf("(%s) shedding_stat_drop [1m], cpu: %d, total: %d, pass: %d, drop: %d",
|
||||
s.name, c, st.Total, st.Pass, st.Drop)
|
||||
}
|
||||
}
|
||||
|
||||
s.loop(ticker.C)
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package load
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
@@ -22,3 +23,32 @@ func TestSheddingStat(t *testing.T) {
|
||||
assert.Equal(t, int64(5), result.Pass)
|
||||
assert.Equal(t, int64(7), result.Drop)
|
||||
}
|
||||
|
||||
func TestLoopTrue(t *testing.T) {
|
||||
ch := make(chan time.Time, 1)
|
||||
ch <- time.Now()
|
||||
close(ch)
|
||||
st := new(SheddingStat)
|
||||
logEnabled.Set(true)
|
||||
st.loop(ch)
|
||||
}
|
||||
|
||||
func TestLoopTrueAndDrop(t *testing.T) {
|
||||
ch := make(chan time.Time, 1)
|
||||
ch <- time.Now()
|
||||
close(ch)
|
||||
st := new(SheddingStat)
|
||||
st.IncrementDrop()
|
||||
logEnabled.Set(true)
|
||||
st.loop(ch)
|
||||
}
|
||||
|
||||
func TestLoopFalseAndDrop(t *testing.T) {
|
||||
ch := make(chan time.Time, 1)
|
||||
ch <- time.Now()
|
||||
close(ch)
|
||||
st := new(SheddingStat)
|
||||
st.IncrementDrop()
|
||||
logEnabled.Set(false)
|
||||
st.loop(ch)
|
||||
}
|
||||
|
||||
@@ -20,49 +20,67 @@ func WithDuration(d time.Duration) Logger {
|
||||
}
|
||||
|
||||
func (l *durationLogger) Error(v ...interface{}) {
|
||||
if shouldLog(ErrorLevel) {
|
||||
if shallLog(ErrorLevel) {
|
||||
l.write(errorLog, levelError, formatWithCaller(fmt.Sprint(v...), durationCallerDepth))
|
||||
}
|
||||
}
|
||||
|
||||
func (l *durationLogger) Errorf(format string, v ...interface{}) {
|
||||
if shouldLog(ErrorLevel) {
|
||||
if shallLog(ErrorLevel) {
|
||||
l.write(errorLog, levelError, formatWithCaller(fmt.Sprintf(format, v...), durationCallerDepth))
|
||||
}
|
||||
}
|
||||
|
||||
func (l *durationLogger) Errorv(v interface{}) {
|
||||
if shallLog(ErrorLevel) {
|
||||
l.write(errorLog, levelError, v)
|
||||
}
|
||||
}
|
||||
|
||||
func (l *durationLogger) Info(v ...interface{}) {
|
||||
if shouldLog(InfoLevel) {
|
||||
if shallLog(InfoLevel) {
|
||||
l.write(infoLog, levelInfo, fmt.Sprint(v...))
|
||||
}
|
||||
}
|
||||
|
||||
func (l *durationLogger) Infof(format string, v ...interface{}) {
|
||||
if shouldLog(InfoLevel) {
|
||||
if shallLog(InfoLevel) {
|
||||
l.write(infoLog, levelInfo, fmt.Sprintf(format, v...))
|
||||
}
|
||||
}
|
||||
|
||||
func (l *durationLogger) Infov(v interface{}) {
|
||||
if shallLog(InfoLevel) {
|
||||
l.write(infoLog, levelInfo, v)
|
||||
}
|
||||
}
|
||||
|
||||
func (l *durationLogger) Slow(v ...interface{}) {
|
||||
if shouldLog(ErrorLevel) {
|
||||
if shallLog(ErrorLevel) {
|
||||
l.write(slowLog, levelSlow, fmt.Sprint(v...))
|
||||
}
|
||||
}
|
||||
|
||||
func (l *durationLogger) Slowf(format string, v ...interface{}) {
|
||||
if shouldLog(ErrorLevel) {
|
||||
if shallLog(ErrorLevel) {
|
||||
l.write(slowLog, levelSlow, fmt.Sprintf(format, v...))
|
||||
}
|
||||
}
|
||||
|
||||
func (l *durationLogger) Slowv(v interface{}) {
|
||||
if shallLog(ErrorLevel) {
|
||||
l.write(slowLog, levelSlow, v)
|
||||
}
|
||||
}
|
||||
|
||||
func (l *durationLogger) WithDuration(duration time.Duration) Logger {
|
||||
l.Duration = timex.ReprOfDuration(duration)
|
||||
return l
|
||||
}
|
||||
|
||||
func (l *durationLogger) write(writer io.Writer, level, content string) {
|
||||
func (l *durationLogger) write(writer io.Writer, level string, val interface{}) {
|
||||
l.Timestamp = getTimestamp()
|
||||
l.Level = level
|
||||
l.Content = content
|
||||
outputJson(writer, logEntry(*l))
|
||||
l.Content = val
|
||||
outputJson(writer, l)
|
||||
}
|
||||
|
||||
62
core/logx/limitedexecutor_test.go
Normal file
62
core/logx/limitedexecutor_test.go
Normal file
@@ -0,0 +1,62 @@
|
||||
package logx
|
||||
|
||||
import (
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/tal-tech/go-zero/core/timex"
|
||||
)
|
||||
|
||||
func TestLimitedExecutor_logOrDiscard(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
threshold time.Duration
|
||||
lastTime time.Duration
|
||||
discarded uint32
|
||||
executed bool
|
||||
}{
|
||||
{
|
||||
name: "nil executor",
|
||||
executed: true,
|
||||
},
|
||||
{
|
||||
name: "regular",
|
||||
threshold: time.Hour,
|
||||
lastTime: timex.Now(),
|
||||
discarded: 10,
|
||||
executed: false,
|
||||
},
|
||||
{
|
||||
name: "slow",
|
||||
threshold: time.Duration(1),
|
||||
lastTime: -1000,
|
||||
discarded: 10,
|
||||
executed: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
test := test
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
executor := newLimitedExecutor(0)
|
||||
executor.threshold = test.threshold
|
||||
executor.discarded = test.discarded
|
||||
executor.lastTime.Set(test.lastTime)
|
||||
|
||||
var run int32
|
||||
executor.logOrDiscard(func() {
|
||||
atomic.AddInt32(&run, 1)
|
||||
})
|
||||
if test.executed {
|
||||
assert.Equal(t, int32(1), atomic.LoadInt32(&run))
|
||||
} else {
|
||||
assert.Equal(t, int32(0), atomic.LoadInt32(&run))
|
||||
assert.Equal(t, test.discarded+1, atomic.LoadUint32(&executor.discarded))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -65,12 +65,14 @@ var (
|
||||
timeFormat = "2006-01-02T15:04:05.000Z07"
|
||||
writeConsole bool
|
||||
logLevel uint32
|
||||
infoLog io.WriteCloser
|
||||
errorLog io.WriteCloser
|
||||
severeLog io.WriteCloser
|
||||
slowLog io.WriteCloser
|
||||
statLog io.WriteCloser
|
||||
stackLog io.Writer
|
||||
// use uint32 for atomic operations
|
||||
disableStat uint32
|
||||
infoLog io.WriteCloser
|
||||
errorLog io.WriteCloser
|
||||
severeLog io.WriteCloser
|
||||
slowLog io.WriteCloser
|
||||
statLog io.WriteCloser
|
||||
stackLog io.Writer
|
||||
|
||||
once sync.Once
|
||||
initialized uint32
|
||||
@@ -79,10 +81,10 @@ var (
|
||||
|
||||
type (
|
||||
logEntry struct {
|
||||
Timestamp string `json:"@timestamp"`
|
||||
Level string `json:"level"`
|
||||
Duration string `json:"duration,omitempty"`
|
||||
Content string `json:"content"`
|
||||
Timestamp string `json:"@timestamp"`
|
||||
Level string `json:"level"`
|
||||
Duration string `json:"duration,omitempty"`
|
||||
Content interface{} `json:"content"`
|
||||
}
|
||||
|
||||
logOptions struct {
|
||||
@@ -98,10 +100,13 @@ type (
|
||||
Logger interface {
|
||||
Error(...interface{})
|
||||
Errorf(string, ...interface{})
|
||||
Errorv(interface{})
|
||||
Info(...interface{})
|
||||
Infof(string, ...interface{})
|
||||
Infov(interface{})
|
||||
Slow(...interface{})
|
||||
Slowf(string, ...interface{})
|
||||
Slowv(interface{})
|
||||
WithDuration(time.Duration) Logger
|
||||
}
|
||||
)
|
||||
@@ -133,7 +138,7 @@ func SetUp(c LogConf) error {
|
||||
|
||||
// Alert alerts v in alert level, and the message is written to error log.
|
||||
func Alert(v string) {
|
||||
output(errorLog, levelAlert, v)
|
||||
outputText(errorLog, levelAlert, v)
|
||||
}
|
||||
|
||||
// Close closes the logging.
|
||||
@@ -195,24 +200,29 @@ func Disable() {
|
||||
})
|
||||
}
|
||||
|
||||
// DisableStat disables the stat logs.
|
||||
func DisableStat() {
|
||||
atomic.StoreUint32(&disableStat, 1)
|
||||
}
|
||||
|
||||
// Error writes v into error log.
|
||||
func Error(v ...interface{}) {
|
||||
ErrorCaller(1, v...)
|
||||
}
|
||||
|
||||
// Errorf writes v with format into error log.
|
||||
func Errorf(format string, v ...interface{}) {
|
||||
ErrorCallerf(1, format, v...)
|
||||
}
|
||||
|
||||
// ErrorCaller writes v with context into error log.
|
||||
func ErrorCaller(callDepth int, v ...interface{}) {
|
||||
errorSync(fmt.Sprint(v...), callDepth+callerInnerDepth)
|
||||
errorTextSync(fmt.Sprint(v...), callDepth+callerInnerDepth)
|
||||
}
|
||||
|
||||
// ErrorCallerf writes v with context in format into error log.
|
||||
func ErrorCallerf(callDepth int, format string, v ...interface{}) {
|
||||
errorSync(fmt.Sprintf(format, v...), callDepth+callerInnerDepth)
|
||||
errorTextSync(fmt.Errorf(format, v...).Error(), callDepth+callerInnerDepth)
|
||||
}
|
||||
|
||||
// Errorf writes v with format into error log.
|
||||
func Errorf(format string, v ...interface{}) {
|
||||
ErrorCallerf(1, format, v...)
|
||||
}
|
||||
|
||||
// ErrorStack writes v along with call stack into error log.
|
||||
@@ -227,14 +237,25 @@ func ErrorStackf(format string, v ...interface{}) {
|
||||
stackSync(fmt.Sprintf(format, v...))
|
||||
}
|
||||
|
||||
// Errorv writes v into error log with json content.
|
||||
// No call stack attached, because not elegant to pack the messages.
|
||||
func Errorv(v interface{}) {
|
||||
errorAnySync(v)
|
||||
}
|
||||
|
||||
// Info writes v into access log.
|
||||
func Info(v ...interface{}) {
|
||||
infoSync(fmt.Sprint(v...))
|
||||
infoTextSync(fmt.Sprint(v...))
|
||||
}
|
||||
|
||||
// Infof writes v with format into access log.
|
||||
func Infof(format string, v ...interface{}) {
|
||||
infoSync(fmt.Sprintf(format, v...))
|
||||
infoTextSync(fmt.Sprintf(format, v...))
|
||||
}
|
||||
|
||||
// Infov writes v into access log with json content.
|
||||
func Infov(v interface{}) {
|
||||
infoAnySync(v)
|
||||
}
|
||||
|
||||
// Must checks if err is nil, otherwise logs the err and exits.
|
||||
@@ -242,7 +263,7 @@ func Must(err error) {
|
||||
if err != nil {
|
||||
msg := formatWithCaller(err.Error(), 3)
|
||||
log.Print(msg)
|
||||
output(severeLog, levelFatal, msg)
|
||||
outputText(severeLog, levelFatal, msg)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
@@ -264,12 +285,17 @@ func Severef(format string, v ...interface{}) {
|
||||
|
||||
// Slow writes v into slow log.
|
||||
func Slow(v ...interface{}) {
|
||||
slowSync(fmt.Sprint(v...))
|
||||
slowTextSync(fmt.Sprint(v...))
|
||||
}
|
||||
|
||||
// Slowf writes v with format into slow log.
|
||||
func Slowf(format string, v ...interface{}) {
|
||||
slowSync(fmt.Sprintf(format, v...))
|
||||
slowTextSync(fmt.Sprintf(format, v...))
|
||||
}
|
||||
|
||||
// Slowv writes v into slow log with json content.
|
||||
func Slowv(v interface{}) {
|
||||
slowAnySync(v)
|
||||
}
|
||||
|
||||
// Stat writes v into stat log.
|
||||
@@ -312,8 +338,14 @@ func createOutput(path string) (io.WriteCloser, error) {
|
||||
options.gzipEnabled), options.gzipEnabled)
|
||||
}
|
||||
|
||||
func errorSync(msg string, callDepth int) {
|
||||
if shouldLog(ErrorLevel) {
|
||||
func errorAnySync(v interface{}) {
|
||||
if shallLog(ErrorLevel) {
|
||||
outputAny(errorLog, levelError, v)
|
||||
}
|
||||
}
|
||||
|
||||
func errorTextSync(msg string, callDepth int) {
|
||||
if shallLog(ErrorLevel) {
|
||||
outputError(errorLog, msg, callDepth)
|
||||
}
|
||||
}
|
||||
@@ -362,13 +394,28 @@ func handleOptions(opts []LogOption) {
|
||||
}
|
||||
}
|
||||
|
||||
func infoSync(msg string) {
|
||||
if shouldLog(InfoLevel) {
|
||||
output(infoLog, levelInfo, msg)
|
||||
func infoAnySync(val interface{}) {
|
||||
if shallLog(InfoLevel) {
|
||||
outputAny(infoLog, levelInfo, val)
|
||||
}
|
||||
}
|
||||
|
||||
func output(writer io.Writer, level, msg string) {
|
||||
func infoTextSync(msg string) {
|
||||
if shallLog(InfoLevel) {
|
||||
outputText(infoLog, levelInfo, msg)
|
||||
}
|
||||
}
|
||||
|
||||
func outputAny(writer io.Writer, level string, val interface{}) {
|
||||
info := logEntry{
|
||||
Timestamp: getTimestamp(),
|
||||
Level: level,
|
||||
Content: val,
|
||||
}
|
||||
outputJson(writer, info)
|
||||
}
|
||||
|
||||
func outputText(writer io.Writer, level, msg string) {
|
||||
info := logEntry{
|
||||
Timestamp: getTimestamp(),
|
||||
Level: level,
|
||||
@@ -379,7 +426,7 @@ func output(writer io.Writer, level, msg string) {
|
||||
|
||||
func outputError(writer io.Writer, msg string, callDepth int) {
|
||||
content := formatWithCaller(msg, callDepth)
|
||||
output(writer, levelError, content)
|
||||
outputText(writer, levelError, content)
|
||||
}
|
||||
|
||||
func outputJson(writer io.Writer, info interface{}) {
|
||||
@@ -481,30 +528,40 @@ func setupWithVolume(c LogConf) error {
|
||||
}
|
||||
|
||||
func severeSync(msg string) {
|
||||
if shouldLog(SevereLevel) {
|
||||
output(severeLog, levelSevere, fmt.Sprintf("%s\n%s", msg, string(debug.Stack())))
|
||||
if shallLog(SevereLevel) {
|
||||
outputText(severeLog, levelSevere, fmt.Sprintf("%s\n%s", msg, string(debug.Stack())))
|
||||
}
|
||||
}
|
||||
|
||||
func shouldLog(level uint32) bool {
|
||||
func shallLog(level uint32) bool {
|
||||
return atomic.LoadUint32(&logLevel) <= level
|
||||
}
|
||||
|
||||
func slowSync(msg string) {
|
||||
if shouldLog(ErrorLevel) {
|
||||
output(slowLog, levelSlow, msg)
|
||||
func shallLogStat() bool {
|
||||
return atomic.LoadUint32(&disableStat) == 0
|
||||
}
|
||||
|
||||
func slowAnySync(v interface{}) {
|
||||
if shallLog(ErrorLevel) {
|
||||
outputAny(slowLog, levelSlow, v)
|
||||
}
|
||||
}
|
||||
|
||||
func slowTextSync(msg string) {
|
||||
if shallLog(ErrorLevel) {
|
||||
outputText(slowLog, levelSlow, msg)
|
||||
}
|
||||
}
|
||||
|
||||
func stackSync(msg string) {
|
||||
if shouldLog(ErrorLevel) {
|
||||
output(stackLog, levelError, fmt.Sprintf("%s\n%s", msg, string(debug.Stack())))
|
||||
if shallLog(ErrorLevel) {
|
||||
outputText(stackLog, levelError, fmt.Sprintf("%s\n%s", msg, string(debug.Stack())))
|
||||
}
|
||||
}
|
||||
|
||||
func statSync(msg string) {
|
||||
if shouldLog(InfoLevel) {
|
||||
output(statLog, levelStat, msg)
|
||||
if shallLogStat() && shallLog(InfoLevel) {
|
||||
outputText(statLog, levelStat, msg)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@ package logx
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@@ -92,6 +93,30 @@ func TestStructedLogAlert(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestStructedLogError(t *testing.T) {
|
||||
doTestStructedLog(t, levelError, func(writer io.WriteCloser) {
|
||||
errorLog = writer
|
||||
}, func(v ...interface{}) {
|
||||
Error(v...)
|
||||
})
|
||||
}
|
||||
|
||||
func TestStructedLogErrorf(t *testing.T) {
|
||||
doTestStructedLog(t, levelError, func(writer io.WriteCloser) {
|
||||
errorLog = writer
|
||||
}, func(v ...interface{}) {
|
||||
Errorf("%s", fmt.Sprint(v...))
|
||||
})
|
||||
}
|
||||
|
||||
func TestStructedLogErrorv(t *testing.T) {
|
||||
doTestStructedLog(t, levelError, func(writer io.WriteCloser) {
|
||||
errorLog = writer
|
||||
}, func(v ...interface{}) {
|
||||
Errorv(fmt.Sprint(v...))
|
||||
})
|
||||
}
|
||||
|
||||
func TestStructedLogInfo(t *testing.T) {
|
||||
doTestStructedLog(t, levelInfo, func(writer io.WriteCloser) {
|
||||
infoLog = writer
|
||||
@@ -100,6 +125,22 @@ func TestStructedLogInfo(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestStructedLogInfof(t *testing.T) {
|
||||
doTestStructedLog(t, levelInfo, func(writer io.WriteCloser) {
|
||||
infoLog = writer
|
||||
}, func(v ...interface{}) {
|
||||
Infof("%s", fmt.Sprint(v...))
|
||||
})
|
||||
}
|
||||
|
||||
func TestStructedLogInfov(t *testing.T) {
|
||||
doTestStructedLog(t, levelInfo, func(writer io.WriteCloser) {
|
||||
infoLog = writer
|
||||
}, func(v ...interface{}) {
|
||||
Infov(fmt.Sprint(v...))
|
||||
})
|
||||
}
|
||||
|
||||
func TestStructedLogSlow(t *testing.T) {
|
||||
doTestStructedLog(t, levelSlow, func(writer io.WriteCloser) {
|
||||
slowLog = writer
|
||||
@@ -116,6 +157,14 @@ func TestStructedLogSlowf(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestStructedLogSlowv(t *testing.T) {
|
||||
doTestStructedLog(t, levelSlow, func(writer io.WriteCloser) {
|
||||
slowLog = writer
|
||||
}, func(v ...interface{}) {
|
||||
Slowv(fmt.Sprint(v...))
|
||||
})
|
||||
}
|
||||
|
||||
func TestStructedLogStat(t *testing.T) {
|
||||
doTestStructedLog(t, levelStat, func(writer io.WriteCloser) {
|
||||
statLog = writer
|
||||
@@ -194,6 +243,16 @@ func TestSetLevelWithDuration(t *testing.T) {
|
||||
assert.Equal(t, 0, writer.builder.Len())
|
||||
}
|
||||
|
||||
func TestErrorfWithWrappedError(t *testing.T) {
|
||||
SetLevel(ErrorLevel)
|
||||
const message = "there"
|
||||
writer := new(mockWriter)
|
||||
errorLog = writer
|
||||
atomic.StoreUint32(&initialized, 1)
|
||||
Errorf("hello %w", errors.New(message))
|
||||
assert.True(t, strings.Contains(writer.builder.String(), "hello there"))
|
||||
}
|
||||
|
||||
func TestMustNil(t *testing.T) {
|
||||
Must(nil)
|
||||
}
|
||||
@@ -246,6 +305,17 @@ func TestDisable(t *testing.T) {
|
||||
assert.Nil(t, Close())
|
||||
}
|
||||
|
||||
func TestDisableStat(t *testing.T) {
|
||||
DisableStat()
|
||||
|
||||
const message = "hello there"
|
||||
writer := new(mockWriter)
|
||||
statLog = writer
|
||||
atomic.StoreUint32(&initialized, 1)
|
||||
Stat(message)
|
||||
assert.Equal(t, 0, writer.builder.Len())
|
||||
}
|
||||
|
||||
func TestWithGzip(t *testing.T) {
|
||||
fn := WithGzip()
|
||||
var opt logOptions
|
||||
@@ -357,7 +427,9 @@ func doTestStructedLog(t *testing.T, level string, setup func(writer io.WriteClo
|
||||
t.Error(err)
|
||||
}
|
||||
assert.Equal(t, level, entry.Level)
|
||||
assert.True(t, strings.Contains(entry.Content, message))
|
||||
val, ok := entry.Content.(string)
|
||||
assert.True(t, ok)
|
||||
assert.True(t, strings.Contains(val, message))
|
||||
}
|
||||
|
||||
func testSetLevelTwiceWithMode(t *testing.T, mode string) {
|
||||
|
||||
@@ -47,7 +47,6 @@ type (
|
||||
done chan lang.PlaceholderType
|
||||
rule RotateRule
|
||||
compress bool
|
||||
keepDays int
|
||||
// can't use threading.RoutineGroup because of cycle import
|
||||
waitGroup sync.WaitGroup
|
||||
closeOnce sync.Once
|
||||
|
||||
@@ -3,6 +3,7 @@ package logx
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -97,7 +98,13 @@ func TestRotateLoggerRotate(t *testing.T) {
|
||||
}()
|
||||
}
|
||||
err = logger.rotate()
|
||||
assert.Nil(t, err)
|
||||
switch v := err.(type) {
|
||||
case *os.LinkError:
|
||||
// avoid rename error on docker container
|
||||
assert.Equal(t, syscall.EXDEV, v.Err)
|
||||
default:
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRotateLoggerWrite(t *testing.T) {
|
||||
|
||||
@@ -44,5 +44,5 @@ func captureOutput(f func()) string {
|
||||
func getContent(jsonStr string) string {
|
||||
var entry logEntry
|
||||
json.Unmarshal([]byte(jsonStr), &entry)
|
||||
return entry.Content
|
||||
return entry.Content.(string)
|
||||
}
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/tal-tech/go-zero/core/timex"
|
||||
"github.com/tal-tech/go-zero/core/trace/tracespec"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
)
|
||||
|
||||
type traceLogger struct {
|
||||
@@ -18,50 +18,68 @@ type traceLogger struct {
|
||||
}
|
||||
|
||||
func (l *traceLogger) Error(v ...interface{}) {
|
||||
if shouldLog(ErrorLevel) {
|
||||
if shallLog(ErrorLevel) {
|
||||
l.write(errorLog, levelError, formatWithCaller(fmt.Sprint(v...), durationCallerDepth))
|
||||
}
|
||||
}
|
||||
|
||||
func (l *traceLogger) Errorf(format string, v ...interface{}) {
|
||||
if shouldLog(ErrorLevel) {
|
||||
if shallLog(ErrorLevel) {
|
||||
l.write(errorLog, levelError, formatWithCaller(fmt.Sprintf(format, v...), durationCallerDepth))
|
||||
}
|
||||
}
|
||||
|
||||
func (l *traceLogger) Errorv(v interface{}) {
|
||||
if shallLog(ErrorLevel) {
|
||||
l.write(errorLog, levelError, v)
|
||||
}
|
||||
}
|
||||
|
||||
func (l *traceLogger) Info(v ...interface{}) {
|
||||
if shouldLog(InfoLevel) {
|
||||
if shallLog(InfoLevel) {
|
||||
l.write(infoLog, levelInfo, fmt.Sprint(v...))
|
||||
}
|
||||
}
|
||||
|
||||
func (l *traceLogger) Infof(format string, v ...interface{}) {
|
||||
if shouldLog(InfoLevel) {
|
||||
if shallLog(InfoLevel) {
|
||||
l.write(infoLog, levelInfo, fmt.Sprintf(format, v...))
|
||||
}
|
||||
}
|
||||
|
||||
func (l *traceLogger) Infov(v interface{}) {
|
||||
if shallLog(InfoLevel) {
|
||||
l.write(infoLog, levelInfo, v)
|
||||
}
|
||||
}
|
||||
|
||||
func (l *traceLogger) Slow(v ...interface{}) {
|
||||
if shouldLog(ErrorLevel) {
|
||||
if shallLog(ErrorLevel) {
|
||||
l.write(slowLog, levelSlow, fmt.Sprint(v...))
|
||||
}
|
||||
}
|
||||
|
||||
func (l *traceLogger) Slowf(format string, v ...interface{}) {
|
||||
if shouldLog(ErrorLevel) {
|
||||
if shallLog(ErrorLevel) {
|
||||
l.write(slowLog, levelSlow, fmt.Sprintf(format, v...))
|
||||
}
|
||||
}
|
||||
|
||||
func (l *traceLogger) Slowv(v interface{}) {
|
||||
if shallLog(ErrorLevel) {
|
||||
l.write(slowLog, levelSlow, v)
|
||||
}
|
||||
}
|
||||
|
||||
func (l *traceLogger) WithDuration(duration time.Duration) Logger {
|
||||
l.Duration = timex.ReprOfDuration(duration)
|
||||
return l
|
||||
}
|
||||
|
||||
func (l *traceLogger) write(writer io.Writer, level, content string) {
|
||||
func (l *traceLogger) write(writer io.Writer, level string, val interface{}) {
|
||||
l.Timestamp = getTimestamp()
|
||||
l.Level = level
|
||||
l.Content = content
|
||||
l.Content = val
|
||||
l.Trace = traceIdFromContext(l.ctx)
|
||||
l.Span = spanIdFromContext(l.ctx)
|
||||
outputJson(writer, l)
|
||||
@@ -75,19 +93,19 @@ func WithContext(ctx context.Context) Logger {
|
||||
}
|
||||
|
||||
func spanIdFromContext(ctx context.Context) string {
|
||||
t, ok := ctx.Value(tracespec.TracingKey).(tracespec.Trace)
|
||||
if !ok {
|
||||
return ""
|
||||
spanCtx := trace.SpanContextFromContext(ctx)
|
||||
if spanCtx.HasSpanID() {
|
||||
return spanCtx.SpanID().String()
|
||||
}
|
||||
|
||||
return t.SpanId()
|
||||
return ""
|
||||
}
|
||||
|
||||
func traceIdFromContext(ctx context.Context) string {
|
||||
t, ok := ctx.Value(tracespec.TracingKey).(tracespec.Trace)
|
||||
if !ok {
|
||||
return ""
|
||||
spanCtx := trace.SpanContextFromContext(ctx)
|
||||
if spanCtx.HasTraceID() {
|
||||
return spanCtx.TraceID().String()
|
||||
}
|
||||
|
||||
return t.TraceId()
|
||||
return ""
|
||||
}
|
||||
|
||||
@@ -9,71 +9,90 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/tal-tech/go-zero/core/trace/tracespec"
|
||||
"go.opentelemetry.io/otel"
|
||||
sdktrace "go.opentelemetry.io/otel/sdk/trace"
|
||||
)
|
||||
|
||||
const (
|
||||
mockTraceID = "mock-trace-id"
|
||||
mockSpanID = "mock-span-id"
|
||||
traceKey = "trace"
|
||||
spanKey = "span"
|
||||
)
|
||||
|
||||
var mock tracespec.Trace = new(mockTrace)
|
||||
|
||||
func TestTraceLog(t *testing.T) {
|
||||
var buf mockWriter
|
||||
atomic.StoreUint32(&initialized, 1)
|
||||
ctx := context.WithValue(context.Background(), tracespec.TracingKey, mock)
|
||||
otp := otel.GetTracerProvider()
|
||||
tp := sdktrace.NewTracerProvider(sdktrace.WithSampler(sdktrace.AlwaysSample()))
|
||||
otel.SetTracerProvider(tp)
|
||||
defer otel.SetTracerProvider(otp)
|
||||
|
||||
ctx, _ := tp.Tracer("foo").Start(context.Background(), "bar")
|
||||
WithContext(ctx).(*traceLogger).write(&buf, levelInfo, testlog)
|
||||
assert.True(t, strings.Contains(buf.String(), mockTraceID))
|
||||
assert.True(t, strings.Contains(buf.String(), mockSpanID))
|
||||
assert.True(t, strings.Contains(buf.String(), traceKey))
|
||||
assert.True(t, strings.Contains(buf.String(), spanKey))
|
||||
}
|
||||
|
||||
func TestTraceError(t *testing.T) {
|
||||
var buf mockWriter
|
||||
atomic.StoreUint32(&initialized, 1)
|
||||
errorLog = newLogWriter(log.New(&buf, "", flags))
|
||||
ctx := context.WithValue(context.Background(), tracespec.TracingKey, mock)
|
||||
otp := otel.GetTracerProvider()
|
||||
tp := sdktrace.NewTracerProvider(sdktrace.WithSampler(sdktrace.AlwaysSample()))
|
||||
otel.SetTracerProvider(tp)
|
||||
defer otel.SetTracerProvider(otp)
|
||||
|
||||
ctx, _ := tp.Tracer("foo").Start(context.Background(), "bar")
|
||||
l := WithContext(ctx).(*traceLogger)
|
||||
SetLevel(InfoLevel)
|
||||
l.WithDuration(time.Second).Error(testlog)
|
||||
assert.True(t, strings.Contains(buf.String(), mockTraceID))
|
||||
assert.True(t, strings.Contains(buf.String(), mockSpanID))
|
||||
assert.True(t, strings.Contains(buf.String(), traceKey))
|
||||
assert.True(t, strings.Contains(buf.String(), spanKey))
|
||||
buf.Reset()
|
||||
l.WithDuration(time.Second).Errorf(testlog)
|
||||
assert.True(t, strings.Contains(buf.String(), mockTraceID))
|
||||
assert.True(t, strings.Contains(buf.String(), mockSpanID))
|
||||
assert.True(t, strings.Contains(buf.String(), traceKey))
|
||||
assert.True(t, strings.Contains(buf.String(), spanKey))
|
||||
}
|
||||
|
||||
func TestTraceInfo(t *testing.T) {
|
||||
var buf mockWriter
|
||||
atomic.StoreUint32(&initialized, 1)
|
||||
infoLog = newLogWriter(log.New(&buf, "", flags))
|
||||
ctx := context.WithValue(context.Background(), tracespec.TracingKey, mock)
|
||||
otp := otel.GetTracerProvider()
|
||||
tp := sdktrace.NewTracerProvider(sdktrace.WithSampler(sdktrace.AlwaysSample()))
|
||||
otel.SetTracerProvider(tp)
|
||||
defer otel.SetTracerProvider(otp)
|
||||
|
||||
ctx, _ := tp.Tracer("foo").Start(context.Background(), "bar")
|
||||
l := WithContext(ctx).(*traceLogger)
|
||||
SetLevel(InfoLevel)
|
||||
l.WithDuration(time.Second).Info(testlog)
|
||||
assert.True(t, strings.Contains(buf.String(), mockTraceID))
|
||||
assert.True(t, strings.Contains(buf.String(), mockSpanID))
|
||||
assert.True(t, strings.Contains(buf.String(), traceKey))
|
||||
assert.True(t, strings.Contains(buf.String(), spanKey))
|
||||
buf.Reset()
|
||||
l.WithDuration(time.Second).Infof(testlog)
|
||||
assert.True(t, strings.Contains(buf.String(), mockTraceID))
|
||||
assert.True(t, strings.Contains(buf.String(), mockSpanID))
|
||||
assert.True(t, strings.Contains(buf.String(), traceKey))
|
||||
assert.True(t, strings.Contains(buf.String(), spanKey))
|
||||
}
|
||||
|
||||
func TestTraceSlow(t *testing.T) {
|
||||
var buf mockWriter
|
||||
atomic.StoreUint32(&initialized, 1)
|
||||
slowLog = newLogWriter(log.New(&buf, "", flags))
|
||||
ctx := context.WithValue(context.Background(), tracespec.TracingKey, mock)
|
||||
otp := otel.GetTracerProvider()
|
||||
tp := sdktrace.NewTracerProvider(sdktrace.WithSampler(sdktrace.AlwaysSample()))
|
||||
otel.SetTracerProvider(tp)
|
||||
defer otel.SetTracerProvider(otp)
|
||||
|
||||
ctx, _ := tp.Tracer("foo").Start(context.Background(), "bar")
|
||||
l := WithContext(ctx).(*traceLogger)
|
||||
SetLevel(InfoLevel)
|
||||
l.WithDuration(time.Second).Slow(testlog)
|
||||
assert.True(t, strings.Contains(buf.String(), mockTraceID))
|
||||
assert.True(t, strings.Contains(buf.String(), mockSpanID))
|
||||
assert.True(t, strings.Contains(buf.String(), traceKey))
|
||||
assert.True(t, strings.Contains(buf.String(), spanKey))
|
||||
buf.Reset()
|
||||
l.WithDuration(time.Second).Slowf(testlog)
|
||||
assert.True(t, strings.Contains(buf.String(), mockTraceID))
|
||||
assert.True(t, strings.Contains(buf.String(), mockSpanID))
|
||||
assert.True(t, strings.Contains(buf.String(), traceKey))
|
||||
assert.True(t, strings.Contains(buf.String(), spanKey))
|
||||
}
|
||||
|
||||
func TestTraceWithoutContext(t *testing.T) {
|
||||
@@ -83,34 +102,10 @@ func TestTraceWithoutContext(t *testing.T) {
|
||||
l := WithContext(context.Background()).(*traceLogger)
|
||||
SetLevel(InfoLevel)
|
||||
l.WithDuration(time.Second).Info(testlog)
|
||||
assert.False(t, strings.Contains(buf.String(), mockTraceID))
|
||||
assert.False(t, strings.Contains(buf.String(), mockSpanID))
|
||||
assert.False(t, strings.Contains(buf.String(), traceKey))
|
||||
assert.False(t, strings.Contains(buf.String(), spanKey))
|
||||
buf.Reset()
|
||||
l.WithDuration(time.Second).Infof(testlog)
|
||||
assert.False(t, strings.Contains(buf.String(), mockTraceID))
|
||||
assert.False(t, strings.Contains(buf.String(), mockSpanID))
|
||||
}
|
||||
|
||||
type mockTrace struct{}
|
||||
|
||||
func (t mockTrace) TraceId() string {
|
||||
return mockTraceID
|
||||
}
|
||||
|
||||
func (t mockTrace) SpanId() string {
|
||||
return mockSpanID
|
||||
}
|
||||
|
||||
func (t mockTrace) Finish() {
|
||||
}
|
||||
|
||||
func (t mockTrace) Fork(ctx context.Context, serviceName, operationName string) (context.Context, tracespec.Trace) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (t mockTrace) Follow(ctx context.Context, serviceName, operationName string) (context.Context, tracespec.Trace) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (t mockTrace) Visit(fn func(key, val string) bool) {
|
||||
assert.False(t, strings.Contains(buf.String(), traceKey))
|
||||
assert.False(t, strings.Contains(buf.String(), spanKey))
|
||||
}
|
||||
|
||||
@@ -43,7 +43,8 @@ type (
|
||||
UnmarshalOption func(*unmarshalOptions)
|
||||
|
||||
unmarshalOptions struct {
|
||||
fromString bool
|
||||
fromString bool
|
||||
canonicalKey func(key string) string
|
||||
}
|
||||
|
||||
keyCache map[string][]string
|
||||
@@ -206,6 +207,8 @@ func (u *Unmarshaler) processFieldNotFromString(field reflect.StructField, value
|
||||
switch {
|
||||
case valueKind == reflect.Map && typeKind == reflect.Struct:
|
||||
return u.processFieldStruct(field, value, mapValue, fullName)
|
||||
case valueKind == reflect.Map && typeKind == reflect.Map:
|
||||
return u.fillMap(field, value, mapValue)
|
||||
case valueKind == reflect.String && typeKind == reflect.Slice:
|
||||
return u.fillSliceFromString(fieldType, value, mapValue)
|
||||
case valueKind == reflect.String && derefedFieldType == durationType:
|
||||
@@ -229,7 +232,7 @@ func (u *Unmarshaler) processFieldPrimitive(field reflect.StructField, value ref
|
||||
default:
|
||||
switch v := mapValue.(type) {
|
||||
case json.Number:
|
||||
return u.processFieldPrimitiveWithJsonNumber(field, value, v, opts, fullName)
|
||||
return u.processFieldPrimitiveWithJSONNumber(field, value, v, opts, fullName)
|
||||
default:
|
||||
if typeKind == valueKind {
|
||||
if err := validateValueInOptions(opts.options(), mapValue); err != nil {
|
||||
@@ -244,7 +247,7 @@ func (u *Unmarshaler) processFieldPrimitive(field reflect.StructField, value ref
|
||||
return newTypeMismatchError(fullName)
|
||||
}
|
||||
|
||||
func (u *Unmarshaler) processFieldPrimitiveWithJsonNumber(field reflect.StructField, value reflect.Value,
|
||||
func (u *Unmarshaler) processFieldPrimitiveWithJSONNumber(field reflect.StructField, value reflect.Value,
|
||||
v json.Number, opts *fieldOptionsWithContext, fullName string) error {
|
||||
fieldType := field.Type
|
||||
fieldKind := fieldType.Kind()
|
||||
@@ -323,7 +326,11 @@ func (u *Unmarshaler) processNamedField(field reflect.StructField, value reflect
|
||||
}
|
||||
|
||||
fullName = join(fullName, key)
|
||||
mapValue, hasValue := getValue(m, key)
|
||||
canonicalKey := key
|
||||
if u.opts.canonicalKey != nil {
|
||||
canonicalKey = u.opts.canonicalKey(key)
|
||||
}
|
||||
mapValue, hasValue := getValue(m, canonicalKey)
|
||||
if hasValue {
|
||||
return u.processNamedFieldWithValue(field, value, mapValue, key, opts, fullName)
|
||||
}
|
||||
@@ -513,14 +520,14 @@ func (u *Unmarshaler) fillSliceValue(slice reflect.Value, index int, baseKind re
|
||||
target.Set(reflect.ValueOf(value))
|
||||
ithVal.Set(target.Addr())
|
||||
return nil
|
||||
} else {
|
||||
if ithVal.Kind() != reflect.TypeOf(value).Kind() {
|
||||
return errTypeMismatch
|
||||
}
|
||||
|
||||
ithVal.Set(reflect.ValueOf(value))
|
||||
return nil
|
||||
}
|
||||
|
||||
if ithVal.Kind() != reflect.TypeOf(value).Kind() {
|
||||
return errTypeMismatch
|
||||
}
|
||||
|
||||
ithVal.Set(reflect.ValueOf(value))
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -579,6 +586,8 @@ func (u *Unmarshaler) generateMap(keyType, elemType reflect.Type, mapValue inter
|
||||
targetValue.SetMapIndex(key, innerValue)
|
||||
default:
|
||||
switch v := keythData.(type) {
|
||||
case bool:
|
||||
targetValue.SetMapIndex(key, reflect.ValueOf(v))
|
||||
case string:
|
||||
targetValue.SetMapIndex(key, reflect.ValueOf(v))
|
||||
case json.Number:
|
||||
@@ -621,6 +630,13 @@ func WithStringValues() UnmarshalOption {
|
||||
}
|
||||
}
|
||||
|
||||
// WithCanonicalKeyFunc customizes a Unmarshaler with Canonical Key func
|
||||
func WithCanonicalKeyFunc(f func(string) string) UnmarshalOption {
|
||||
return func(opt *unmarshalOptions) {
|
||||
opt.canonicalKey = f
|
||||
}
|
||||
}
|
||||
|
||||
func fillDurationValue(fieldKind reflect.Kind, value reflect.Value, dur string) error {
|
||||
d, err := time.ParseDuration(dur)
|
||||
if err != nil {
|
||||
|
||||
@@ -209,6 +209,12 @@ func TestRepr(t *testing.T) {
|
||||
newMockPtr(),
|
||||
"mockptr",
|
||||
},
|
||||
{
|
||||
&mockOpacity{
|
||||
val: 1,
|
||||
},
|
||||
"{1}",
|
||||
},
|
||||
{
|
||||
true,
|
||||
"true",
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"k8s.io/utils/io"
|
||||
)
|
||||
|
||||
func TestUnmarshalYamlBytes(t *testing.T) {
|
||||
@@ -18,6 +19,22 @@ func TestUnmarshalYamlBytes(t *testing.T) {
|
||||
assert.Equal(t, "liao", c.Name)
|
||||
}
|
||||
|
||||
func TestUnmarshalYamlBytesErrorInput(t *testing.T) {
|
||||
var c struct {
|
||||
Name string
|
||||
}
|
||||
content := []byte(`liao`)
|
||||
assert.NotNil(t, UnmarshalYamlBytes(content, &c))
|
||||
}
|
||||
|
||||
func TestUnmarshalYamlBytesEmptyInput(t *testing.T) {
|
||||
var c struct {
|
||||
Name string
|
||||
}
|
||||
content := []byte(``)
|
||||
assert.NotNil(t, UnmarshalYamlBytes(content, &c))
|
||||
}
|
||||
|
||||
func TestUnmarshalYamlBytesOptional(t *testing.T) {
|
||||
var c struct {
|
||||
Name string
|
||||
@@ -918,3 +935,82 @@ func TestUnmarshalYamlReaderError(t *testing.T) {
|
||||
err := UnmarshalYamlReader(reader, &v)
|
||||
assert.NotNil(t, err)
|
||||
}
|
||||
|
||||
func TestUnmarshalYamlBadReader(t *testing.T) {
|
||||
var v struct {
|
||||
Any string
|
||||
}
|
||||
|
||||
err := UnmarshalYamlReader(new(badReader), &v)
|
||||
assert.NotNil(t, err)
|
||||
}
|
||||
|
||||
func TestUnmarshalYamlMapBool(t *testing.T) {
|
||||
text := `machine:
|
||||
node1: true
|
||||
node2: true
|
||||
node3: true
|
||||
`
|
||||
var v struct {
|
||||
Machine map[string]bool `json:"machine,optional"`
|
||||
}
|
||||
reader := strings.NewReader(text)
|
||||
assert.Nil(t, UnmarshalYamlReader(reader, &v))
|
||||
assert.True(t, v.Machine["node1"])
|
||||
assert.True(t, v.Machine["node2"])
|
||||
assert.True(t, v.Machine["node3"])
|
||||
}
|
||||
|
||||
func TestUnmarshalYamlMapInt(t *testing.T) {
|
||||
text := `machine:
|
||||
node1: 1
|
||||
node2: 2
|
||||
node3: 3
|
||||
`
|
||||
var v struct {
|
||||
Machine map[string]int `json:"machine,optional"`
|
||||
}
|
||||
reader := strings.NewReader(text)
|
||||
assert.Nil(t, UnmarshalYamlReader(reader, &v))
|
||||
assert.Equal(t, 1, v.Machine["node1"])
|
||||
assert.Equal(t, 2, v.Machine["node2"])
|
||||
assert.Equal(t, 3, v.Machine["node3"])
|
||||
}
|
||||
|
||||
func TestUnmarshalYamlMapByte(t *testing.T) {
|
||||
text := `machine:
|
||||
node1: 1
|
||||
node2: 2
|
||||
node3: 3
|
||||
`
|
||||
var v struct {
|
||||
Machine map[string]byte `json:"machine,optional"`
|
||||
}
|
||||
reader := strings.NewReader(text)
|
||||
assert.Nil(t, UnmarshalYamlReader(reader, &v))
|
||||
assert.Equal(t, byte(1), v.Machine["node1"])
|
||||
assert.Equal(t, byte(2), v.Machine["node2"])
|
||||
assert.Equal(t, byte(3), v.Machine["node3"])
|
||||
}
|
||||
|
||||
func TestUnmarshalYamlMapRune(t *testing.T) {
|
||||
text := `machine:
|
||||
node1: 1
|
||||
node2: 2
|
||||
node3: 3
|
||||
`
|
||||
var v struct {
|
||||
Machine map[string]rune `json:"machine,optional"`
|
||||
}
|
||||
reader := strings.NewReader(text)
|
||||
assert.Nil(t, UnmarshalYamlReader(reader, &v))
|
||||
assert.Equal(t, rune(1), v.Machine["node1"])
|
||||
assert.Equal(t, rune(2), v.Machine["node2"])
|
||||
assert.Equal(t, rune(3), v.Machine["node3"])
|
||||
}
|
||||
|
||||
type badReader struct{}
|
||||
|
||||
func (b *badReader) Read(p []byte) (n int, err error) {
|
||||
return 0, io.ErrLimitReached
|
||||
}
|
||||
|
||||
@@ -112,6 +112,12 @@ func MapReduceWithSource(source <-chan interface{}, mapper MapperFunc, reducer R
|
||||
opts ...Option) (interface{}, error) {
|
||||
options := buildOptions(opts...)
|
||||
output := make(chan interface{})
|
||||
defer func() {
|
||||
for range output {
|
||||
panic("more than one element written in reducer")
|
||||
}
|
||||
}()
|
||||
|
||||
collector := make(chan interface{}, options.workers)
|
||||
done := syncx.NewDoneChan()
|
||||
writer := newGuardedWriter(output, done.Done())
|
||||
|
||||
@@ -202,6 +202,22 @@ func TestMapReduce(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestMapReduceWithReduerWriteMoreThanOnce(t *testing.T) {
|
||||
assert.Panics(t, func() {
|
||||
MapReduce(func(source chan<- interface{}) {
|
||||
for i := 0; i < 10; i++ {
|
||||
source <- i
|
||||
}
|
||||
}, func(item interface{}, writer Writer, cancel func(error)) {
|
||||
writer.Write(item)
|
||||
}, func(pipe <-chan interface{}, writer Writer, cancel func(error)) {
|
||||
drain(pipe)
|
||||
writer.Write("one")
|
||||
writer.Write("two")
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func TestMapReduceVoid(t *testing.T) {
|
||||
var value uint32
|
||||
tests := []struct {
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package proc
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
//go:build linux || darwin
|
||||
// +build linux darwin
|
||||
|
||||
package proc
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package proc
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
//go:build linux || darwin
|
||||
// +build linux darwin
|
||||
|
||||
package proc
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package proc
|
||||
@@ -14,5 +15,5 @@ func AddWrapUpListener(fn func()) func() {
|
||||
return fn
|
||||
}
|
||||
|
||||
func SetTimeoutToForceQuit(duration time.Duration) {
|
||||
func SetTimeToForceQuit(duration time.Duration) {
|
||||
}
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
//go:build linux || darwin
|
||||
// +build linux darwin
|
||||
|
||||
package proc
|
||||
|
||||
10
core/proc/signals+polyfill.go
Normal file
10
core/proc/signals+polyfill.go
Normal file
@@ -0,0 +1,10 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package proc
|
||||
|
||||
import "context"
|
||||
|
||||
func Done() <-chan struct{} {
|
||||
return context.Background().Done()
|
||||
}
|
||||
@@ -1,3 +1,4 @@
|
||||
//go:build linux || darwin
|
||||
// +build linux darwin
|
||||
|
||||
package proc
|
||||
@@ -12,6 +13,8 @@ import (
|
||||
|
||||
const timeFormat = "0102150405"
|
||||
|
||||
var done = make(chan struct{})
|
||||
|
||||
func init() {
|
||||
go func() {
|
||||
var profiler Stopper
|
||||
@@ -33,6 +36,13 @@ func init() {
|
||||
profiler = nil
|
||||
}
|
||||
case syscall.SIGTERM:
|
||||
select {
|
||||
case <-done:
|
||||
// already closed
|
||||
default:
|
||||
close(done)
|
||||
}
|
||||
|
||||
gracefulStop(signals)
|
||||
default:
|
||||
logx.Error("Got unregistered signal:", v)
|
||||
@@ -40,3 +50,8 @@ func init() {
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Done returns the channel that notifies the process quitting.
|
||||
func Done() <-chan struct{} {
|
||||
return done
|
||||
}
|
||||
|
||||
16
core/proc/signals_test.go
Normal file
16
core/proc/signals_test.go
Normal file
@@ -0,0 +1,16 @@
|
||||
package proc
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestDone(t *testing.T) {
|
||||
select {
|
||||
case <-Done():
|
||||
assert.Fail(t, "should run")
|
||||
default:
|
||||
}
|
||||
assert.NotNil(t, Done())
|
||||
}
|
||||
@@ -23,11 +23,11 @@ func Enabled() bool {
|
||||
|
||||
// StartAgent starts a prometheus agent.
|
||||
func StartAgent(c Config) {
|
||||
once.Do(func() {
|
||||
if len(c.Host) == 0 {
|
||||
return
|
||||
}
|
||||
if len(c.Host) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
once.Do(func() {
|
||||
enabled.Set(true)
|
||||
threading.GoSafe(func() {
|
||||
http.Handle(c.Path, promhttp.Handler())
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
//go:build debug
|
||||
// +build debug
|
||||
|
||||
package search
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"github.com/tal-tech/go-zero/core/logx"
|
||||
"github.com/tal-tech/go-zero/core/prometheus"
|
||||
"github.com/tal-tech/go-zero/core/stat"
|
||||
"github.com/tal-tech/go-zero/core/trace"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -29,6 +30,7 @@ type ServiceConf struct {
|
||||
Mode string `json:",default=pro,options=dev|test|rt|pre|pro"`
|
||||
MetricsUrl string `json:",optional"`
|
||||
Prometheus prometheus.Config `json:",optional"`
|
||||
Telemetry trace.Config `json:",optional"`
|
||||
}
|
||||
|
||||
// MustSetUp sets up the service, exits on error.
|
||||
@@ -49,6 +51,12 @@ func (sc ServiceConf) SetUp() error {
|
||||
|
||||
sc.initMode()
|
||||
prometheus.StartAgent(sc.Prometheus)
|
||||
|
||||
if len(sc.Telemetry.Name) == 0 {
|
||||
sc.Telemetry.Name = sc.Name
|
||||
}
|
||||
trace.StartAgent(sc.Telemetry)
|
||||
|
||||
if len(sc.MetricsUrl) > 0 {
|
||||
stat.SetReportWriter(stat.NewRemoteWriter(sc.MetricsUrl))
|
||||
}
|
||||
|
||||
@@ -26,6 +26,7 @@ type (
|
||||
}
|
||||
|
||||
// A ServiceGroup is a group of services.
|
||||
// Attention: the starting order of the added services is not guaranteed.
|
||||
ServiceGroup struct {
|
||||
services []Service
|
||||
stopOnce func()
|
||||
@@ -41,7 +42,8 @@ func NewServiceGroup() *ServiceGroup {
|
||||
|
||||
// Add adds service into sg.
|
||||
func (sg *ServiceGroup) Add(service Service) {
|
||||
sg.services = append(sg.services, service)
|
||||
// push front, stop with reverse order.
|
||||
sg.services = append([]Service{service}, sg.services...)
|
||||
}
|
||||
|
||||
// Start starts the ServiceGroup.
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
//go:build !linux
|
||||
// +build !linux
|
||||
|
||||
package stat
|
||||
@@ -1,3 +1,4 @@
|
||||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
package stat
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
package stat
|
||||
|
||||
@@ -7,7 +7,9 @@ import (
|
||||
)
|
||||
|
||||
func TestRefreshCpu(t *testing.T) {
|
||||
assert.True(t, RefreshCpu() >= 0)
|
||||
assert.NotPanics(t, func() {
|
||||
RefreshCpu()
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkRefreshCpu(b *testing.B) {
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
//go:build !linux
|
||||
// +build !linux
|
||||
|
||||
package internal
|
||||
|
||||
@@ -38,7 +38,9 @@ func init() {
|
||||
atomic.StoreInt64(&cpuUsage, usage)
|
||||
})
|
||||
case <-allTicker.C:
|
||||
printUsage()
|
||||
if logEnabled.True() {
|
||||
printUsage()
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
63
core/stores/builder/builder.go
Normal file
63
core/stores/builder/builder.go
Normal file
@@ -0,0 +1,63 @@
|
||||
package builder
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const dbTag = "db"
|
||||
|
||||
// RawFieldNames converts golang struct field into slice string.
|
||||
func RawFieldNames(in interface{}, postgresSql ...bool) []string {
|
||||
out := make([]string, 0)
|
||||
v := reflect.ValueOf(in)
|
||||
if v.Kind() == reflect.Ptr {
|
||||
v = v.Elem()
|
||||
}
|
||||
|
||||
var pg bool
|
||||
if len(postgresSql) > 0 {
|
||||
pg = postgresSql[0]
|
||||
}
|
||||
|
||||
// we only accept structs
|
||||
if v.Kind() != reflect.Struct {
|
||||
panic(fmt.Errorf("ToMap only accepts structs; got %T", v))
|
||||
}
|
||||
|
||||
typ := v.Type()
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
// gets us a StructField
|
||||
fi := typ.Field(i)
|
||||
if tagv := fi.Tag.Get(dbTag); tagv != "" {
|
||||
if pg {
|
||||
out = append(out, tagv)
|
||||
} else {
|
||||
out = append(out, fmt.Sprintf("`%s`", tagv))
|
||||
}
|
||||
} else {
|
||||
if pg {
|
||||
out = append(out, fi.Name)
|
||||
} else {
|
||||
out = append(out, fmt.Sprintf("`%s`", fi.Name))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
// PostgreSqlJoin concatenates the given elements into a string.
|
||||
func PostgreSqlJoin(elems []string) string {
|
||||
b := new(strings.Builder)
|
||||
for index, e := range elems {
|
||||
b.WriteString(fmt.Sprintf("%s = $%d, ", e, index+2))
|
||||
}
|
||||
|
||||
if b.Len() == 0 {
|
||||
return b.String()
|
||||
}
|
||||
|
||||
return b.String()[0 : b.Len()-2]
|
||||
}
|
||||
24
core/stores/builder/builder_test.go
Normal file
24
core/stores/builder/builder_test.go
Normal file
@@ -0,0 +1,24 @@
|
||||
package builder
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
type mockedUser struct {
|
||||
ID string `db:"id" json:"id,omitempty"`
|
||||
UserName string `db:"user_name" json:"userName,omitempty"`
|
||||
Sex int `db:"sex" json:"sex,omitempty"`
|
||||
UUID string `db:"uuid" uuid:"uuid,omitempty"`
|
||||
Age int `db:"age" json:"age"`
|
||||
}
|
||||
|
||||
func TestFieldNames(t *testing.T) {
|
||||
t.Run("new", func(t *testing.T) {
|
||||
var u mockedUser
|
||||
out := RawFieldNames(&u)
|
||||
expected := []string{"`id`", "`user_name`", "`sex`", "`uuid`", "`age`"}
|
||||
assert.Equal(t, expected, out)
|
||||
})
|
||||
}
|
||||
2
core/stores/cache/cache.go
vendored
2
core/stores/cache/cache.go
vendored
@@ -29,7 +29,7 @@ type (
|
||||
)
|
||||
|
||||
// New returns a Cache.
|
||||
func New(c ClusterConf, barrier syncx.SharedCalls, st *Stat, errNotFound error,
|
||||
func New(c ClusterConf, barrier syncx.SingleFlight, st *Stat, errNotFound error,
|
||||
opts ...Option) Cache {
|
||||
if len(c) == 0 || TotalWeights(c) <= 0 {
|
||||
log.Fatal("no cache nodes")
|
||||
|
||||
6
core/stores/cache/cache_test.go
vendored
6
core/stores/cache/cache_test.go
vendored
@@ -23,6 +23,7 @@ type mockedNode struct {
|
||||
|
||||
func (mc *mockedNode) Del(keys ...string) error {
|
||||
var be errorx.BatchError
|
||||
|
||||
for _, key := range keys {
|
||||
if _, ok := mc.vals[key]; !ok {
|
||||
be.Add(mc.errNotFound)
|
||||
@@ -30,6 +31,7 @@ func (mc *mockedNode) Del(keys ...string) error {
|
||||
delete(mc.vals, key)
|
||||
}
|
||||
}
|
||||
|
||||
return be.Err()
|
||||
}
|
||||
|
||||
@@ -102,7 +104,7 @@ func TestCache_SetDel(t *testing.T) {
|
||||
Weight: 100,
|
||||
},
|
||||
}
|
||||
c := New(conf, syncx.NewSharedCalls(), NewStat("mock"), errPlaceholder)
|
||||
c := New(conf, syncx.NewSingleFlight(), NewStat("mock"), errPlaceholder)
|
||||
for i := 0; i < total; i++ {
|
||||
if i%2 == 0 {
|
||||
assert.Nil(t, c.Set(fmt.Sprintf("key/%d", i), i))
|
||||
@@ -140,7 +142,7 @@ func TestCache_OneNode(t *testing.T) {
|
||||
Weight: 100,
|
||||
},
|
||||
}
|
||||
c := New(conf, syncx.NewSharedCalls(), NewStat("mock"), errPlaceholder)
|
||||
c := New(conf, syncx.NewSingleFlight(), NewStat("mock"), errPlaceholder)
|
||||
for i := 0; i < total; i++ {
|
||||
if i%2 == 0 {
|
||||
assert.Nil(t, c.Set(fmt.Sprintf("key/%d", i), i))
|
||||
|
||||
19
core/stores/cache/cachenode.go
vendored
19
core/stores/cache/cachenode.go
vendored
@@ -29,7 +29,7 @@ type cacheNode struct {
|
||||
rds *redis.Redis
|
||||
expiry time.Duration
|
||||
notFoundExpiry time.Duration
|
||||
barrier syncx.SharedCalls
|
||||
barrier syncx.SingleFlight
|
||||
r *rand.Rand
|
||||
lock *sync.Mutex
|
||||
unstableExpiry mathx.Unstable
|
||||
@@ -43,7 +43,7 @@ type cacheNode struct {
|
||||
// st is used to stat the cache.
|
||||
// errNotFound defines the error that returned on cache not found.
|
||||
// opts are the options that customize the cacheNode.
|
||||
func NewNode(rds *redis.Redis, barrier syncx.SharedCalls, st *Stat,
|
||||
func NewNode(rds *redis.Redis, barrier syncx.SingleFlight, st *Stat,
|
||||
errNotFound error, opts ...Option) Cache {
|
||||
o := newOptions(opts...)
|
||||
return cacheNode{
|
||||
@@ -65,9 +65,18 @@ func (c cacheNode) Del(keys ...string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
if _, err := c.rds.Del(keys...); err != nil {
|
||||
logx.Errorf("failed to clear cache with keys: %q, error: %v", formatKeys(keys), err)
|
||||
c.asyncRetryDelCache(keys...)
|
||||
if len(keys) > 1 && c.rds.Type == redis.ClusterType {
|
||||
for _, key := range keys {
|
||||
if _, err := c.rds.Del(key); err != nil {
|
||||
logx.Errorf("failed to clear cache with key: %q, error: %v", key, err)
|
||||
c.asyncRetryDelCache(key)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if _, err := c.rds.Del(keys...); err != nil {
|
||||
logx.Errorf("failed to clear cache with keys: %q, error: %v", formatKeys(keys), err)
|
||||
c.asyncRetryDelCache(keys...)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
30
core/stores/cache/cachenode_test.go
vendored
30
core/stores/cache/cachenode_test.go
vendored
@@ -29,6 +29,7 @@ func init() {
|
||||
func TestCacheNode_DelCache(t *testing.T) {
|
||||
store, clean, err := redistest.CreateRedis()
|
||||
assert.Nil(t, err)
|
||||
store.Type = redis.ClusterType
|
||||
defer clean()
|
||||
|
||||
cn := cacheNode{
|
||||
@@ -49,13 +50,30 @@ func TestCacheNode_DelCache(t *testing.T) {
|
||||
assert.Nil(t, cn.Del("first", "second"))
|
||||
}
|
||||
|
||||
func TestCacheNode_DelCacheWithErrors(t *testing.T) {
|
||||
store, clean, err := redistest.CreateRedis()
|
||||
assert.Nil(t, err)
|
||||
store.Type = redis.ClusterType
|
||||
clean()
|
||||
|
||||
cn := cacheNode{
|
||||
rds: store,
|
||||
r: rand.New(rand.NewSource(time.Now().UnixNano())),
|
||||
lock: new(sync.Mutex),
|
||||
unstableExpiry: mathx.NewUnstable(expiryDeviation),
|
||||
stat: NewStat("any"),
|
||||
errNotFound: errTestNotFound,
|
||||
}
|
||||
assert.Nil(t, cn.Del("third", "fourth"))
|
||||
}
|
||||
|
||||
func TestCacheNode_InvalidCache(t *testing.T) {
|
||||
s, err := miniredis.Run()
|
||||
assert.Nil(t, err)
|
||||
defer s.Close()
|
||||
|
||||
cn := cacheNode{
|
||||
rds: redis.NewRedis(s.Addr(), redis.NodeType),
|
||||
rds: redis.New(s.Addr()),
|
||||
r: rand.New(rand.NewSource(time.Now().UnixNano())),
|
||||
lock: new(sync.Mutex),
|
||||
unstableExpiry: mathx.NewUnstable(expiryDeviation),
|
||||
@@ -78,7 +96,7 @@ func TestCacheNode_Take(t *testing.T) {
|
||||
cn := cacheNode{
|
||||
rds: store,
|
||||
r: rand.New(rand.NewSource(time.Now().UnixNano())),
|
||||
barrier: syncx.NewSharedCalls(),
|
||||
barrier: syncx.NewSingleFlight(),
|
||||
lock: new(sync.Mutex),
|
||||
unstableExpiry: mathx.NewUnstable(expiryDeviation),
|
||||
stat: NewStat("any"),
|
||||
@@ -105,7 +123,7 @@ func TestCacheNode_TakeNotFound(t *testing.T) {
|
||||
cn := cacheNode{
|
||||
rds: store,
|
||||
r: rand.New(rand.NewSource(time.Now().UnixNano())),
|
||||
barrier: syncx.NewSharedCalls(),
|
||||
barrier: syncx.NewSingleFlight(),
|
||||
lock: new(sync.Mutex),
|
||||
unstableExpiry: mathx.NewUnstable(expiryDeviation),
|
||||
stat: NewStat("any"),
|
||||
@@ -144,7 +162,7 @@ func TestCacheNode_TakeWithExpire(t *testing.T) {
|
||||
cn := cacheNode{
|
||||
rds: store,
|
||||
r: rand.New(rand.NewSource(time.Now().UnixNano())),
|
||||
barrier: syncx.NewSharedCalls(),
|
||||
barrier: syncx.NewSingleFlight(),
|
||||
lock: new(sync.Mutex),
|
||||
unstableExpiry: mathx.NewUnstable(expiryDeviation),
|
||||
stat: NewStat("any"),
|
||||
@@ -171,7 +189,7 @@ func TestCacheNode_String(t *testing.T) {
|
||||
cn := cacheNode{
|
||||
rds: store,
|
||||
r: rand.New(rand.NewSource(time.Now().UnixNano())),
|
||||
barrier: syncx.NewSharedCalls(),
|
||||
barrier: syncx.NewSingleFlight(),
|
||||
lock: new(sync.Mutex),
|
||||
unstableExpiry: mathx.NewUnstable(expiryDeviation),
|
||||
stat: NewStat("any"),
|
||||
@@ -188,7 +206,7 @@ func TestCacheValueWithBigInt(t *testing.T) {
|
||||
cn := cacheNode{
|
||||
rds: store,
|
||||
r: rand.New(rand.NewSource(time.Now().UnixNano())),
|
||||
barrier: syncx.NewSharedCalls(),
|
||||
barrier: syncx.NewSingleFlight(),
|
||||
lock: new(sync.Mutex),
|
||||
unstableExpiry: mathx.NewUnstable(expiryDeviation),
|
||||
stat: NewStat("any"),
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
package clickhouse
|
||||
|
||||
import (
|
||||
// imports the driver.
|
||||
// imports the driver, don't remove this comment, golint requires.
|
||||
_ "github.com/ClickHouse/clickhouse-go"
|
||||
"github.com/tal-tech/go-zero/core/stores/sqlx"
|
||||
)
|
||||
|
||||
@@ -667,10 +667,12 @@ func TestRedis_HyperLogLog(t *testing.T) {
|
||||
assert.NotNil(t, err)
|
||||
|
||||
runOnCluster(t, func(cluster Store) {
|
||||
_, err := cluster.Pfadd("key")
|
||||
assert.NotNil(t, err)
|
||||
_, err = cluster.Pfcount("key")
|
||||
assert.NotNil(t, err)
|
||||
ok, err := cluster.Pfadd("key", "value")
|
||||
assert.Nil(t, err)
|
||||
assert.True(t, ok)
|
||||
val, err := cluster.Pfcount("key")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, int64(1), val)
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
"github.com/tal-tech/go-zero/core/timex"
|
||||
)
|
||||
|
||||
const slowThreshold = time.Millisecond * 500
|
||||
const defaultSlowThreshold = time.Millisecond * 500
|
||||
|
||||
// ErrNotFound is an alias of mgo.ErrNotFound.
|
||||
var ErrNotFound = mgo.ErrNotFound
|
||||
@@ -203,7 +203,7 @@ func (c *decoratedCollection) logDuration(method string, duration time.Duration,
|
||||
if e != nil {
|
||||
logx.Error(err)
|
||||
} else if err != nil {
|
||||
if duration > slowThreshold {
|
||||
if duration > slowThreshold.Load() {
|
||||
logx.WithDuration(duration).Slowf("[MONGO] mongo(%s) - slowcall - %s - fail(%s) - %s",
|
||||
c.name, method, err.Error(), string(content))
|
||||
} else {
|
||||
@@ -211,7 +211,7 @@ func (c *decoratedCollection) logDuration(method string, duration time.Duration,
|
||||
c.name, method, err.Error(), string(content))
|
||||
}
|
||||
} else {
|
||||
if duration > slowThreshold {
|
||||
if duration > slowThreshold.Load() {
|
||||
logx.WithDuration(duration).Slowf("[MONGO] mongo(%s) - slowcall - %s - ok - %s",
|
||||
c.name, method, string(content))
|
||||
} else {
|
||||
|
||||
@@ -5,9 +5,10 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
reflect "reflect"
|
||||
|
||||
mgo "github.com/globalsign/mgo"
|
||||
gomock "github.com/golang/mock/gomock"
|
||||
reflect "reflect"
|
||||
)
|
||||
|
||||
// MockMgoCollection is a mock of MgoCollection interface
|
||||
|
||||
@@ -5,9 +5,10 @@
|
||||
package mongo
|
||||
|
||||
import (
|
||||
reflect "reflect"
|
||||
|
||||
bson "github.com/globalsign/mgo/bson"
|
||||
gomock "github.com/golang/mock/gomock"
|
||||
reflect "reflect"
|
||||
)
|
||||
|
||||
// MockIter is a mock of Iter interface
|
||||
|
||||
@@ -8,23 +8,14 @@ import (
|
||||
"github.com/tal-tech/go-zero/core/breaker"
|
||||
)
|
||||
|
||||
type (
|
||||
options struct {
|
||||
timeout time.Duration
|
||||
}
|
||||
|
||||
// Option defines the method to customize a mongo model.
|
||||
Option func(opts *options)
|
||||
|
||||
// A Model is a mongo model.
|
||||
Model struct {
|
||||
session *concurrentSession
|
||||
db *mgo.Database
|
||||
collection string
|
||||
brk breaker.Breaker
|
||||
opts []Option
|
||||
}
|
||||
)
|
||||
// A Model is a mongo model.
|
||||
type Model struct {
|
||||
session *concurrentSession
|
||||
db *mgo.Database
|
||||
collection string
|
||||
brk breaker.Breaker
|
||||
opts []Option
|
||||
}
|
||||
|
||||
// MustNewModel returns a Model, exits on errors.
|
||||
func MustNewModel(url, collection string, opts ...Option) *Model {
|
||||
|
||||
14
core/stores/mongo/model_test.go
Normal file
14
core/stores/mongo/model_test.go
Normal file
@@ -0,0 +1,14 @@
|
||||
package mongo
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestWithTimeout(t *testing.T) {
|
||||
o := defaultOptions()
|
||||
WithTimeout(time.Second)(o)
|
||||
assert.Equal(t, time.Second, o.timeout)
|
||||
}
|
||||
29
core/stores/mongo/options.go
Normal file
29
core/stores/mongo/options.go
Normal file
@@ -0,0 +1,29 @@
|
||||
package mongo
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/tal-tech/go-zero/core/syncx"
|
||||
)
|
||||
|
||||
var slowThreshold = syncx.ForAtomicDuration(defaultSlowThreshold)
|
||||
|
||||
type (
|
||||
options struct {
|
||||
timeout time.Duration
|
||||
}
|
||||
|
||||
// Option defines the method to customize a mongo model.
|
||||
Option func(opts *options)
|
||||
)
|
||||
|
||||
// SetSlowThreshold sets the slow threshold.
|
||||
func SetSlowThreshold(threshold time.Duration) {
|
||||
slowThreshold.Set(threshold)
|
||||
}
|
||||
|
||||
func defaultOptions() *options {
|
||||
return &options{
|
||||
timeout: defaultTimeout,
|
||||
}
|
||||
}
|
||||
14
core/stores/mongo/options_test.go
Normal file
14
core/stores/mongo/options_test.go
Normal file
@@ -0,0 +1,14 @@
|
||||
package mongo
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestSetSlowThreshold(t *testing.T) {
|
||||
assert.Equal(t, defaultSlowThreshold, slowThreshold.Load())
|
||||
SetSlowThreshold(time.Second)
|
||||
assert.Equal(t, time.Second, slowThreshold.Load())
|
||||
}
|
||||
@@ -57,9 +57,7 @@ func (cs *concurrentSession) putSession(session *mgo.Session) {
|
||||
}
|
||||
|
||||
func (cs *concurrentSession) takeSession(opts ...Option) (*mgo.Session, error) {
|
||||
o := &options{
|
||||
timeout: defaultTimeout,
|
||||
}
|
||||
o := defaultOptions()
|
||||
for _, opt := range opts {
|
||||
opt(o)
|
||||
}
|
||||
|
||||
@@ -11,8 +11,8 @@ var (
|
||||
// ErrNotFound is an alias of mgo.ErrNotFound.
|
||||
ErrNotFound = mgo.ErrNotFound
|
||||
|
||||
// can't use one SharedCalls per conn, because multiple conns may share the same cache key.
|
||||
sharedCalls = syncx.NewSharedCalls()
|
||||
// can't use one SingleFlight per conn, because multiple conns may share the same cache key.
|
||||
sharedCalls = syncx.NewSingleFlight()
|
||||
stats = cache.NewStat("mongoc")
|
||||
)
|
||||
|
||||
|
||||
@@ -120,7 +120,7 @@ func TestStatCacheFails(t *testing.T) {
|
||||
log.SetOutput(ioutil.Discard)
|
||||
defer log.SetOutput(os.Stdout)
|
||||
|
||||
r := redis.NewRedis("localhost:59999", redis.NodeType)
|
||||
r := redis.New("localhost:59999")
|
||||
cach := cache.NewNode(r, sharedCalls, stats, mgo.ErrNotFound)
|
||||
c := newCollection(dummyConn{}, cach)
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
package postgres
|
||||
|
||||
import (
|
||||
// imports the driver.
|
||||
// imports the driver, don't remove this comment, golint requires.
|
||||
_ "github.com/lib/pq"
|
||||
"github.com/tal-tech/go-zero/core/stores/sqlx"
|
||||
)
|
||||
|
||||
@@ -17,7 +17,7 @@ type (
|
||||
Host string
|
||||
Type string `json:",default=node,options=node|cluster"`
|
||||
Pass string `json:",optional"`
|
||||
Tls bool `json:",default=false,options=true|false"`
|
||||
Tls bool `json:",optional"`
|
||||
}
|
||||
|
||||
// A RedisKeyConf is a redis config with key.
|
||||
|
||||
@@ -9,13 +9,13 @@ import (
|
||||
"github.com/tal-tech/go-zero/core/timex"
|
||||
)
|
||||
|
||||
func process(proc func(red.Cmder) error) func(red.Cmder) error {
|
||||
func checkDuration(proc func(red.Cmder) error) func(red.Cmder) error {
|
||||
return func(cmd red.Cmder) error {
|
||||
start := timex.Now()
|
||||
|
||||
defer func() {
|
||||
duration := timex.Since(start)
|
||||
if duration > slowThreshold {
|
||||
if duration > slowThreshold.Load() {
|
||||
var buf strings.Builder
|
||||
for i, arg := range cmd.Args() {
|
||||
if i > 0 {
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
red "github.com/go-redis/redis"
|
||||
"github.com/tal-tech/go-zero/core/breaker"
|
||||
"github.com/tal-tech/go-zero/core/mapping"
|
||||
"github.com/tal-tech/go-zero/core/syncx"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -21,12 +22,14 @@ const (
|
||||
|
||||
blockingQueryTimeout = 5 * time.Second
|
||||
readWriteTimeout = 2 * time.Second
|
||||
|
||||
slowThreshold = time.Millisecond * 100
|
||||
defaultSlowThreshold = time.Millisecond * 100
|
||||
)
|
||||
|
||||
// ErrNilNode is an error that indicates a nil redis node.
|
||||
var ErrNilNode = errors.New("nil redis node")
|
||||
var (
|
||||
// ErrNilNode is an error that indicates a nil redis node.
|
||||
ErrNilNode = errors.New("nil redis node")
|
||||
slowThreshold = syncx.ForAtomicDuration(defaultSlowThreshold)
|
||||
)
|
||||
|
||||
type (
|
||||
// Option defines the method to customize a Redis.
|
||||
@@ -71,6 +74,8 @@ type (
|
||||
IntCmd = red.IntCmd
|
||||
// FloatCmd is an alias of redis.FloatCmd.
|
||||
FloatCmd = red.FloatCmd
|
||||
// StringCmd is an alias of redis.StringCmd.
|
||||
StringCmd = red.StringCmd
|
||||
)
|
||||
|
||||
// New returns a Redis with given options.
|
||||
@@ -88,6 +93,7 @@ func New(addr string, opts ...Option) *Redis {
|
||||
return r
|
||||
}
|
||||
|
||||
// Deprecated: use New instead, will be removed in v2.
|
||||
// NewRedis returns a Redis.
|
||||
func NewRedis(redisAddr, redisType string, redisPass ...string) *Redis {
|
||||
var opts []Option
|
||||
@@ -1755,6 +1761,11 @@ func Cluster() Option {
|
||||
}
|
||||
}
|
||||
|
||||
// SetSlowThreshold sets the slow threshold.
|
||||
func SetSlowThreshold(threshold time.Duration) {
|
||||
slowThreshold.Set(threshold)
|
||||
}
|
||||
|
||||
// WithPass customizes the given Redis with given password.
|
||||
func WithPass(pass string) Option {
|
||||
return func(r *Redis) {
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/alicebob/miniredis/v2"
|
||||
red "github.com/go-redis/redis"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/tal-tech/go-zero/core/stringx"
|
||||
)
|
||||
|
||||
func TestRedis_Exists(t *testing.T) {
|
||||
@@ -186,7 +187,7 @@ func TestRedis_Hscan(t *testing.T) {
|
||||
key := "hash:test"
|
||||
fieldsAndValues := make(map[string]string)
|
||||
for i := 0; i < 1550; i++ {
|
||||
fieldsAndValues["filed_"+strconv.Itoa(i)] = randomStr(i)
|
||||
fieldsAndValues["filed_"+strconv.Itoa(i)] = stringx.Randn(i)
|
||||
}
|
||||
err := client.Hmset(key, fieldsAndValues)
|
||||
assert.Nil(t, err)
|
||||
@@ -256,13 +257,24 @@ func TestRedis_Keys(t *testing.T) {
|
||||
func TestRedis_HyperLogLog(t *testing.T) {
|
||||
runOnRedis(t, func(client *Redis) {
|
||||
client.Ping()
|
||||
r := New(client.Addr, badType())
|
||||
_, err := r.Pfadd("key1")
|
||||
assert.NotNil(t, err)
|
||||
_, err = r.Pfcount("*")
|
||||
assert.NotNil(t, err)
|
||||
err = r.Pfmerge("*")
|
||||
assert.NotNil(t, err)
|
||||
r := New(client.Addr)
|
||||
ok, err := r.Pfadd("key1", "val1")
|
||||
assert.Nil(t, err)
|
||||
assert.True(t, ok)
|
||||
val, err := r.Pfcount("key1")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, int64(1), val)
|
||||
ok, err = r.Pfadd("key2", "val2")
|
||||
assert.Nil(t, err)
|
||||
assert.True(t, ok)
|
||||
val, err = r.Pfcount("key2")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, int64(1), val)
|
||||
err = r.Pfmerge("key1", "key2")
|
||||
assert.Nil(t, err)
|
||||
val, err = r.Pfcount("key1")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, int64(2), val)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -539,7 +551,7 @@ func TestRedis_Sscan(t *testing.T) {
|
||||
key := "list"
|
||||
var list []string
|
||||
for i := 0; i < 1550; i++ {
|
||||
list = append(list, randomStr(i))
|
||||
list = append(list, stringx.Randn(i))
|
||||
}
|
||||
lens, err := client.Sadd(key, list)
|
||||
assert.Nil(t, err)
|
||||
@@ -963,7 +975,7 @@ func TestRedis_Pipelined(t *testing.T) {
|
||||
func TestRedisString(t *testing.T) {
|
||||
runOnRedis(t, func(client *Redis) {
|
||||
client.Ping()
|
||||
_, err := getRedis(NewRedis(client.Addr, ClusterType))
|
||||
_, err := getRedis(New(client.Addr, Cluster()))
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, client.Addr, client.String())
|
||||
assert.NotNil(t, New(client.Addr, badType()).Ping())
|
||||
@@ -1073,9 +1085,15 @@ func TestRedisGeo(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestSetSlowThreshold(t *testing.T) {
|
||||
assert.Equal(t, defaultSlowThreshold, slowThreshold.Load())
|
||||
SetSlowThreshold(time.Second)
|
||||
assert.Equal(t, time.Second, slowThreshold.Load())
|
||||
}
|
||||
|
||||
func TestRedis_WithPass(t *testing.T) {
|
||||
runOnRedis(t, func(client *Redis) {
|
||||
err := NewRedis(client.Addr, NodeType, "any").Ping()
|
||||
err := New(client.Addr, WithPass("any")).Ping()
|
||||
assert.NotNil(t, err)
|
||||
})
|
||||
}
|
||||
@@ -1095,7 +1113,7 @@ func runOnRedis(t *testing.T, fn func(client *Redis)) {
|
||||
client.Close()
|
||||
}
|
||||
}()
|
||||
fn(NewRedis(s.Addr(), NodeType))
|
||||
fn(New(s.Addr()))
|
||||
}
|
||||
|
||||
func runOnRedisTLS(t *testing.T, fn func(client *Redis)) {
|
||||
|
||||
@@ -10,10 +10,10 @@ import (
|
||||
func TestBlockingNode(t *testing.T) {
|
||||
r, err := miniredis.Run()
|
||||
assert.Nil(t, err)
|
||||
node, err := CreateBlockingNode(NewRedis(r.Addr(), NodeType))
|
||||
node, err := CreateBlockingNode(New(r.Addr()))
|
||||
assert.Nil(t, err)
|
||||
node.Close()
|
||||
node, err = CreateBlockingNode(NewRedis(r.Addr(), ClusterType))
|
||||
node, err = CreateBlockingNode(New(r.Addr(), Cluster()))
|
||||
assert.Nil(t, err)
|
||||
node.Close()
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user