Compare commits

...

45 Commits

Author SHA1 Message Date
kevin
1fd2ef9347 make tests faster 2020-10-21 21:43:41 +08:00
kevin
efffb40fa3 update wechat info 2020-10-21 20:26:35 +08:00
kevin
9c8f31cf83 can only specify one origin in cors 2020-10-21 16:47:49 +08:00
kevin
96cb7af728 make tests faster 2020-10-21 15:18:22 +08:00
Keson
41964f9d52 gozero template (#147)
* model/rpc generate code from template cache

* delete unused(deprecated) code

* support template init|update|clean|revert

* model: return the execute result for insert and update operation

* // deprecated: containsAny

* add template test

* add default buildVersion

* update build version
2020-10-21 14:59:35 +08:00
kevin
fe0d0687f5 support cors in rest server 2020-10-21 14:10:36 +08:00
kingxt
1c1e4bca86 optimized generator formatted code (#148)
* rebase upstream

* rebase

* trim no need line

* trim no need line

* trim no need line

* update doc

* remove update

* remove no need

* remove no need

* goctl add jwt support

* goctl add jwt support

* goctl add jwt support

* goctl support import

* goctl support import

* support return ()

* revert

* refactor and rename folder to group

* remove no need

* add anonymous annotation

* optimized

* rename

* rename

* update test

* api add middleware support: usage:

@server(
    middleware: M1, M2
)

* api add middleware support: usage:

@server(
    middleware: M1, M2
)

* simple logic

* optimized

* optimized generator formatted code

* optimized generator formatted code

* add more test

Co-authored-by: kingxt <dream4kingxt@163.com>
2020-10-20 19:43:20 +08:00
kevin
1abe21aa2a export WithUnaryClientInterceptor 2020-10-20 18:03:05 +08:00
kevin
cee170f3e9 fix zrpc client interceptor calling problem 2020-10-20 17:57:41 +08:00
kevin
907efd92c9 let balancer to be customizable 2020-10-20 17:01:53 +08:00
kevin
737cd4751a rename NewPatRouter to NewRouter 2020-10-20 14:23:21 +08:00
kevin
dfe6e88529 use goctl template to generate all kinds of templates 2020-10-19 23:13:18 +08:00
kingxt
85a815bea0 fix name typo and format with newline (#143)
* rebase upstream

* rebase

* trim no need line

* trim no need line

* trim no need line

* update doc

* remove update

* remove no need

* remove no need

* goctl add jwt support

* goctl add jwt support

* goctl add jwt support

* goctl support import

* goctl support import

* support return ()

* revert

* refactor and rename folder to group

* remove no need

* add anonymous annotation

* optimized

* rename

* rename

* update test

* api add middleware support: usage:

@server(
    middleware: M1, M2
)

* api add middleware support: usage:

@server(
    middleware: M1, M2
)

* simple logic

* optimized

* bugs fix for name typo and format with newline

Co-authored-by: kingxt <dream4kingxt@163.com>
2020-10-19 21:05:00 +08:00
kingxt
aa3c391919 api add middleware support (#140)
* rebase upstream

* rebase

* trim no need line

* trim no need line

* trim no need line

* update doc

* remove update

* remove no need

* remove no need

* goctl add jwt support

* goctl add jwt support

* goctl add jwt support

* goctl support import

* goctl support import

* support return ()

* revert

* refactor and rename folder to group

* remove no need

* add anonymous annotation

* optimized

* rename

* rename

* update test

* api add middleware support: usage:

@server(
    middleware: M1, M2
)

* api add middleware support: usage:

@server(
    middleware: M1, M2
)

* simple logic

* should reverse middlewares

* optimized

* optimized

* rename

Co-authored-by: kingxt <dream4kingxt@163.com>
2020-10-19 18:34:10 +08:00
kevin
c9b0ac1ee4 add more tests 2020-10-19 15:49:11 +08:00
mywaystay
33faab61a3 add redis Zrevrank (#137)
* update goctl rpc template log print url

* add redis Zrevrank

Co-authored-by: zhangkai <zhangkai@laoyuegou.com>
2020-10-19 15:30:19 +08:00
kevin
81bf122fa4 update breaker doc 2020-10-17 22:58:30 +08:00
firefantasy
a14bd309a9 to correct breaker interface annotation (#136) 2020-10-17 22:55:36 +08:00
kevin
ea7e410145 update doc 2020-10-17 19:25:30 +08:00
kevin
e81358e7fa update doc 2020-10-17 19:20:01 +08:00
kevin
695ea69bfc add logx.Alert 2020-10-17 19:11:01 +08:00
kevin
d2ed14002c add fx.Split 2020-10-17 12:51:46 +08:00
kingxt
1d9c4a4c4b add anonymous annotation (#134)
* rebase upstream

* rebase

* trim no need line

* trim no need line

* trim no need line

* update doc

* remove update

* remove no need

* remove no need

* goctl add jwt support

* goctl add jwt support

* goctl add jwt support

* goctl support import

* goctl support import

* support return ()

* revert

* refactor and rename folder to group

* remove no need

* add anonymous annotation

* optimized

* rename

* rename

* update test

* optimized new command

Co-authored-by: kingxt <dream4kingxt@163.com>
2020-10-16 19:35:18 +08:00
mywaystay
7e83895c6e update goctl rpc template log print url (#133) 2020-10-16 16:21:22 +08:00
kingxt
dc0534573c print more message when parse error (#131)
* rebase upstream

* rebase

* trim no need line

* trim no need line

* trim no need line

* update doc

* remove update

* remove no need

* remove no need

* goctl add jwt support

* goctl add jwt support

* goctl add jwt support

* goctl support import

* goctl support import

* support return ()

* revert

* refactor and rename folder to group

* print more error info when parse error

* remove no need

* refactor

Co-authored-by: kingxt <dream4kingxt@163.com>
2020-10-16 15:56:29 +08:00
kevin
fe3739b7f3 fix golint issues 2020-10-16 11:13:55 +08:00
kevin
94645481b1 fix golint issues 2020-10-16 10:50:43 +08:00
sjatsh
338caf9927 delete goctl rpc main tpl no use import (#130) 2020-10-16 10:44:04 +08:00
kevin
9cc979960f update doc 2020-10-15 17:39:49 +08:00
kevin
f904710811 support api templates 2020-10-15 16:36:49 +08:00
kevin
8291eabc2c assert len > 0 2020-10-15 14:25:10 +08:00
codingfanlt
901fadb5d3 fix: fx/fn.Head func will forever block when n is less than 1 (#128)
* fix fx/Stream Head func will forever block when n is less than 1

* update test case

* update test case
2020-10-15 14:10:37 +08:00
kevin
c824e9e118 fail fast when rolling window size is zero 2020-10-15 11:40:31 +08:00
codingfanlt
6f49639f80 fix syncx/barrier test case (#123) 2020-10-13 19:29:20 +08:00
Keson
7d4a548d29 fix: template cache key (#121) 2020-10-12 14:34:11 +08:00
kevin
936dd67008 simplify code generation 2020-10-12 11:39:50 +08:00
super_mario
84cc41df42 stop rpc server when main function exit (#120)
add defer s.Stop() to mainTemplate, in order to stop rpc server when main function exit
2020-10-12 11:37:43 +08:00
kevin
da1a93e932 faster the tests 2020-10-11 22:07:50 +08:00
Keson
7e61555d42 Gozero sqlgen patch (#119)
* merge upstream

* optimize insert logic

* reactor functions
2020-10-11 21:55:44 +08:00
kevin
7a134ec64d update readme 2020-10-11 20:13:03 +08:00
kevin
d123b00e73 add qq qrcode 2020-10-11 20:02:06 +08:00
kevin
20d53add46 update readme 2020-10-11 19:42:40 +08:00
kevin
a1b141d31a make tests faster 2020-10-10 18:22:49 +08:00
Keson
0a9c427443 Goctl rpc patch (#117)
* remove mock generation

* add: proto project import

* update document

* remove mock generation

* add: proto project import

* update document

* remove NL

* update document

* optimize code

* add test

* add test
2020-10-10 16:19:46 +08:00
kevin
c32759d735 make tests race-free 2020-10-10 15:36:07 +08:00
206 changed files with 3845 additions and 5388 deletions

View File

@@ -37,7 +37,6 @@ type (
BloomFilter struct {
bits uint
maps uint
bitSet BitSetProvider
}
)

View File

@@ -2,18 +2,18 @@ package bloom
import (
"testing"
"time"
"github.com/alicebob/miniredis"
"github.com/stretchr/testify/assert"
"github.com/tal-tech/go-zero/core/lang"
"github.com/tal-tech/go-zero/core/stores/redis"
)
func TestRedisBitSet_New_Set_Test(t *testing.T) {
s, err := miniredis.Run()
if err != nil {
t.Error("Miniredis could not start")
}
defer s.Close()
s, clean, err := createMiniRedis()
assert.Nil(t, err)
defer clean()
store := redis.NewRedis(s.Addr(), redis.NodeType)
bitSet := newRedisBitSet(store, "test_key", 1024)
@@ -46,11 +46,9 @@ func TestRedisBitSet_New_Set_Test(t *testing.T) {
}
func TestRedisBitSet_Add(t *testing.T) {
s, err := miniredis.Run()
if err != nil {
t.Error("Miniredis could not start")
}
defer s.Close()
s, clean, err := createMiniRedis()
assert.Nil(t, err)
defer clean()
store := redis.NewRedis(s.Addr(), redis.NodeType)
filter := New(store, "test_key", 64)
@@ -60,3 +58,22 @@ func TestRedisBitSet_Add(t *testing.T) {
assert.Nil(t, err)
assert.True(t, ok)
}
func createMiniRedis() (r *miniredis.Miniredis, clean func(), err error) {
r, err = miniredis.Run()
if err != nil {
return nil, nil, err
}
return r, func() {
ch := make(chan lang.PlaceholderType)
go func() {
r.Close()
close(ch)
}()
select {
case <-ch:
case <-time.After(time.Second):
}
}, nil
}

View File

@@ -25,7 +25,7 @@ type (
Acceptable func(err error) bool
Breaker interface {
// Name returns the name of the netflixBreaker.
// Name returns the name of the Breaker.
Name() string
// Allow checks if the request is allowed.
@@ -34,34 +34,34 @@ type (
// If not allow, ErrServiceUnavailable will be returned.
Allow() (Promise, error)
// Do runs the given request if the netflixBreaker accepts it.
// Do returns an error instantly if the netflixBreaker rejects the request.
// If a panic occurs in the request, the netflixBreaker handles it as an error
// Do runs the given request if the Breaker accepts it.
// Do returns an error instantly if the Breaker rejects the request.
// If a panic occurs in the request, the Breaker handles it as an error
// and causes the same panic again.
Do(req func() error) error
// DoWithAcceptable runs the given request if the netflixBreaker accepts it.
// Do returns an error instantly if the netflixBreaker rejects the request.
// If a panic occurs in the request, the netflixBreaker handles it as an error
// DoWithAcceptable runs the given request if the Breaker accepts it.
// DoWithAcceptable returns an error instantly if the Breaker rejects the request.
// If a panic occurs in the request, the Breaker handles it as an error
// and causes the same panic again.
// acceptable checks if it's a successful call, even if the err is not nil.
DoWithAcceptable(req func() error, acceptable Acceptable) error
// DoWithFallback runs the given request if the netflixBreaker accepts it.
// DoWithFallback runs the fallback if the netflixBreaker rejects the request.
// If a panic occurs in the request, the netflixBreaker handles it as an error
// DoWithFallback runs the given request if the Breaker accepts it.
// DoWithFallback runs the fallback if the Breaker rejects the request.
// If a panic occurs in the request, the Breaker handles it as an error
// and causes the same panic again.
DoWithFallback(req func() error, fallback func(err error) error) error
// DoWithFallbackAcceptable runs the given request if the netflixBreaker accepts it.
// DoWithFallback runs the fallback if the netflixBreaker rejects the request.
// If a panic occurs in the request, the netflixBreaker handles it as an error
// DoWithFallbackAcceptable runs the given request if the Breaker accepts it.
// DoWithFallbackAcceptable runs the fallback if the Breaker rejects the request.
// If a panic occurs in the request, the Breaker handles it as an error
// and causes the same panic again.
// acceptable checks if it's a successful call, even if the err is not nil.
DoWithFallbackAcceptable(req func() error, fallback func(err error) error, acceptable Acceptable) error
}
BreakerOption func(breaker *circuitBreaker)
Option func(breaker *circuitBreaker)
Promise interface {
Accept()
@@ -89,7 +89,7 @@ type (
}
)
func NewBreaker(opts ...BreakerOption) Breaker {
func NewBreaker(opts ...Option) Breaker {
var b circuitBreaker
for _, opt := range opts {
opt(&b)
@@ -127,7 +127,7 @@ func (cb *circuitBreaker) Name() string {
return cb.name
}
func WithName(name string) BreakerOption {
func WithName(name string) Option {
return func(b *circuitBreaker) {
b.name = name
}

View File

@@ -2,7 +2,6 @@ package breaker
import (
"errors"
"math"
"math/rand"
"testing"
"time"
@@ -157,7 +156,7 @@ func TestGoogleBreakerSelfProtection(t *testing.T) {
t.Run("total request > 100, total < 2 * success", func(t *testing.T) {
b := getGoogleBreaker()
size := rand.Intn(10000)
accepts := int(math.Ceil(float64(size))) + 1
accepts := size + 1
markSuccess(b, accepts)
markFailed(b, size-accepts)
assert.Nil(t, b.accept())

View File

@@ -6,6 +6,8 @@ import (
"io"
)
const unzipLimit = 100 * 1024 * 1024 // 100MB
func Gzip(bs []byte) []byte {
var b bytes.Buffer
@@ -24,8 +26,7 @@ func Gunzip(bs []byte) ([]byte, error) {
defer r.Close()
var c bytes.Buffer
_, err = io.Copy(&c, r)
if err != nil {
if _, err = io.Copy(&c, io.LimitReader(r, unzipLimit)); err != nil {
return nil, err
}

View File

@@ -30,8 +30,7 @@ FstHGSkUYFLe+nl1dEKHbD+/Zt95L757J3xGTrwoTc7KCTxbrgn+stn0w52BNjj/
kIE2ko4lbh/v8Fl14AyVR9msfKtKOnhe5FCT72mdtApr+qvzcC3q9hfXwkyQU32p
v7q5UimZ205iKSBmgQIDAQAB
-----END PUBLIC KEY-----`
testBody = `this is the content`
encryptedBody = `49e7bc15640e5d927fd3f129b749536d0755baf03a0f35fc914ff1b7b8ce659e5fe3a598442eb908c5995e28bacd3d76e4420bb05b6bfc177040f66c6976f680f7123505d626ab96a9db1151f45c93bc0262db9087b9fb6801715f76f902e644a20029262858f05b0d10540842204346ac1d6d8f29cc5d47dab79af75d922ef2`
testBody = `this is the content`
)
func TestCryption(t *testing.T) {

View File

@@ -29,7 +29,6 @@ type (
name string
lock sync.Mutex
data map[string]interface{}
evicts *list.List
expire time.Duration
timingWheel *TimingWheel
lruCache lru
@@ -278,18 +277,15 @@ func (cs *cacheStat) statLoop() {
ticker := time.NewTicker(statInterval)
defer ticker.Stop()
for {
select {
case <-ticker.C:
hit := atomic.SwapUint64(&cs.hit, 0)
miss := atomic.SwapUint64(&cs.miss, 0)
total := hit + miss
if total == 0 {
continue
}
percent := 100 * float32(hit) / float32(total)
logx.Statf("cache(%s) - qpm: %d, hit_ratio: %.1f%%, elements: %d, hit: %d, miss: %d",
cs.name, total, percent, cs.sizeCallback(), hit, miss)
for range ticker.C {
hit := atomic.SwapUint64(&cs.hit, 0)
miss := atomic.SwapUint64(&cs.miss, 0)
total := hit + miss
if total == 0 {
continue
}
percent := 100 * float32(hit) / float32(total)
logx.Statf("cache(%s) - qpm: %d, hit_ratio: %.1f%%, elements: %d, hit: %d, miss: %d",
cs.name, total, percent, cs.sizeCallback(), hit, miss)
}
}

View File

@@ -6,6 +6,10 @@ type Ring struct {
}
func NewRing(n int) *Ring {
if n < 1 {
panic("n should be greater than 0")
}
return &Ring{
elements: make([]interface{}, n),
}

View File

@@ -6,6 +6,12 @@ import (
"github.com/stretchr/testify/assert"
)
func TestNewRing(t *testing.T) {
assert.Panics(t, func() {
NewRing(0)
})
}
func TestRingLess(t *testing.T) {
ring := NewRing(5)
for i := 0; i < 3; i++ {

View File

@@ -22,6 +22,10 @@ type (
)
func NewRollingWindow(size int, interval time.Duration, opts ...RollingWindowOption) *RollingWindow {
if size < 1 {
panic("size must be greater than 0")
}
w := &RollingWindow{
size: size,
win: newWindow(size),

View File

@@ -11,6 +11,13 @@ import (
const duration = time.Millisecond * 50
func TestNewRollingWindow(t *testing.T) {
assert.NotNil(t, NewRollingWindow(10, time.Second))
assert.Panics(t, func() {
NewRollingWindow(0, time.Second)
})
}
func TestRollingWindowAdd(t *testing.T) {
const size = 3
r := NewRollingWindow(size, duration)
@@ -81,7 +88,7 @@ func TestRollingWindowReduce(t *testing.T) {
for _, test := range tests {
t.Run(stringx.Rand(), func(t *testing.T) {
r := test.win
for x := 0; x < size; x = x + 1 {
for x := 0; x < size; x++ {
for i := 0; i <= x; i++ {
r.Add(float64(i))
}

View File

@@ -10,8 +10,10 @@ import (
func TestShrinkDeadlineLess(t *testing.T) {
deadline := time.Now().Add(time.Second)
ctx, _ := context.WithDeadline(context.Background(), deadline)
ctx, _ = ShrinkDeadline(ctx, time.Minute)
ctx, cancel := context.WithDeadline(context.Background(), deadline)
defer cancel()
ctx, cancel = ShrinkDeadline(ctx, time.Minute)
defer cancel()
dl, ok := ctx.Deadline()
assert.True(t, ok)
assert.Equal(t, deadline, dl)
@@ -19,8 +21,10 @@ func TestShrinkDeadlineLess(t *testing.T) {
func TestShrinkDeadlineMore(t *testing.T) {
deadline := time.Now().Add(time.Minute)
ctx, _ := context.WithDeadline(context.Background(), deadline)
ctx, _ = ShrinkDeadline(ctx, time.Second)
ctx, cancel := context.WithDeadline(context.Background(), deadline)
defer cancel()
ctx, cancel = ShrinkDeadline(ctx, time.Second)
defer cancel()
dl, ok := ctx.Deadline()
assert.True(t, ok)
assert.True(t, dl.Before(deadline))

View File

@@ -12,7 +12,8 @@ func TestContextCancel(t *testing.T) {
c := context.WithValue(context.Background(), "key", "value")
c1, cancel := context.WithCancel(c)
o := ValueOnlyFrom(c1)
c2, _ := context.WithCancel(o)
c2, cancel2 := context.WithCancel(o)
defer cancel2()
contexts := []context.Context{c1, c2}
for _, c := range contexts {
@@ -35,7 +36,8 @@ func TestContextCancel(t *testing.T) {
}
func TestContextDeadline(t *testing.T) {
c, _ := context.WithDeadline(context.Background(), time.Now().Add(10*time.Millisecond))
c, cancel := context.WithDeadline(context.Background(), time.Now().Add(10*time.Millisecond))
cancel()
o := ValueOnlyFrom(c)
select {
case <-time.After(100 * time.Millisecond):
@@ -43,9 +45,11 @@ func TestContextDeadline(t *testing.T) {
t.Fatal("ValueOnlyContext: context should not have timed out")
}
c, _ = context.WithDeadline(context.Background(), time.Now().Add(10*time.Millisecond))
c, cancel = context.WithDeadline(context.Background(), time.Now().Add(10*time.Millisecond))
cancel()
o = ValueOnlyFrom(c)
c, _ = context.WithDeadline(o, time.Now().Add(20*time.Millisecond))
c, cancel = context.WithDeadline(o, time.Now().Add(20*time.Millisecond))
defer cancel()
select {
case <-time.After(100 * time.Millisecond):
t.Fatal("ValueOnlyContext+Deadline: context should have timed out")

View File

@@ -8,7 +8,7 @@ import (
)
const (
indexOfKey = iota
_ = iota
indexOfId
)

11
core/errorx/callchain.go Normal file
View File

@@ -0,0 +1,11 @@
package errorx
func Chain(fns ...func() error) error {
for _, fn := range fns {
if err := fn(); err != nil {
return err
}
}
return nil
}

View File

@@ -0,0 +1,27 @@
package errorx
import (
"errors"
"testing"
"github.com/stretchr/testify/assert"
)
func TestChain(t *testing.T) {
var errDummy = errors.New("dummy")
assert.Nil(t, Chain(func() error {
return nil
}, func() error {
return nil
}))
assert.Equal(t, errDummy, Chain(func() error {
return errDummy
}, func() error {
return nil
}))
assert.Equal(t, errDummy, Chain(func() error {
return nil
}, func() error {
return errDummy
}))
}

View File

@@ -86,9 +86,7 @@ func TestBuldExecutorFlushSlowTasks(t *testing.T) {
time.Sleep(time.Millisecond * 100)
lock.Lock()
defer lock.Unlock()
for _, i := range tasks {
result = append(result, i)
}
result = append(result, tasks...)
}, WithBulkTasks(1000))
for i := 0; i < total; i++ {
assert.Nil(t, exec.Add(i))

View File

@@ -68,6 +68,7 @@ func Range(source <-chan interface{}) Stream {
}
// Buffer buffers the items into a queue with size n.
// It can balance the producer and the consumer if their processing throughput don't match.
func (p Stream) Buffer(n int) Stream {
if n < 0 {
n = 0
@@ -159,6 +160,10 @@ func (p Stream) Group(fn KeyFunc) Stream {
}
func (p Stream) Head(n int64) Stream {
if n < 1 {
panic("n must be greater than 0")
}
source := make(chan interface{})
go func() {
@@ -243,7 +248,37 @@ func (p Stream) Sort(less LessFunc) Stream {
return Just(items...)
}
// Split splits the elements into chunk with size up to n,
// might be less than n on tailing elements.
func (p Stream) Split(n int) Stream {
if n < 1 {
panic("n should be greater than 0")
}
source := make(chan interface{})
go func() {
var chunk []interface{}
for item := range p.source {
chunk = append(chunk, item)
if len(chunk) == n {
source <- chunk
chunk = nil
}
}
if chunk != nil {
source <- chunk
}
close(source)
}()
return Range(source)
}
func (p Stream) Tail(n int64) Stream {
if n < 1 {
panic("n should be greater than 0")
}
source := make(chan interface{})
go func() {

View File

@@ -169,6 +169,14 @@ func TestHead(t *testing.T) {
assert.Equal(t, 3, result)
}
func TestHeadZero(t *testing.T) {
assert.Panics(t, func() {
Just(1, 2, 3, 4).Head(0).Reduce(func(pipe <-chan interface{}) (interface{}, error) {
return nil, nil
})
})
}
func TestHeadMore(t *testing.T) {
var result int
Just(1, 2, 3, 4).Head(6).Reduce(func(pipe <-chan interface{}) (interface{}, error) {
@@ -275,6 +283,22 @@ func TestSort(t *testing.T) {
})
}
func TestSplit(t *testing.T) {
assert.Panics(t, func() {
Just(1, 2, 3, 4, 5, 6, 7, 8, 9, 10).Split(0).Done()
})
var chunks [][]interface{}
Just(1, 2, 3, 4, 5, 6, 7, 8, 9, 10).Split(4).ForEach(func(item interface{}) {
chunk := item.([]interface{})
chunks = append(chunks, chunk)
})
assert.EqualValues(t, [][]interface{}{
{1, 2, 3, 4},
{5, 6, 7, 8},
{9, 10},
}, chunks)
}
func TestTail(t *testing.T) {
var result int
Just(1, 2, 3, 4).Tail(2).Reduce(func(pipe <-chan interface{}) (interface{}, error) {
@@ -286,6 +310,14 @@ func TestTail(t *testing.T) {
assert.Equal(t, 7, result)
}
func TestTailZero(t *testing.T) {
assert.Panics(t, func() {
Just(1, 2, 3, 4).Tail(0).Reduce(func(pipe <-chan interface{}) (interface{}, error) {
return nil, nil
})
})
}
func TestWalk(t *testing.T) {
var result int
Just(1, 2, 3, 4, 5).Walk(func(item interface{}, pipe chan<- interface{}) {

View File

@@ -153,13 +153,10 @@ func (lim *TokenLimiter) waitForRedis() {
lim.rescueLock.Unlock()
}()
for {
select {
case <-ticker.C:
if lim.store.Ping() {
atomic.StoreUint32(&lim.redisAlive, 1)
return
}
for range ticker.C {
if lim.store.Ping() {
atomic.StoreUint32(&lim.redisAlive, 1)
return
}
}
}

View File

@@ -43,6 +43,7 @@ const (
consoleMode = "console"
volumeMode = "volume"
levelAlert = "alert"
levelInfo = "info"
levelError = "error"
levelSevere = "severe"
@@ -121,6 +122,10 @@ func SetUp(c LogConf) error {
}
}
func Alert(v string) {
output(errorLog, levelAlert, v)
}
func Close() error {
if writeConsole {
return nil

View File

@@ -84,6 +84,14 @@ func TestFileLineConsoleMode(t *testing.T) {
assert.True(t, writer.Contains(fmt.Sprintf("%s:%d", file, line+1)))
}
func TestStructedLogAlert(t *testing.T) {
doTestStructedLog(t, levelAlert, func(writer io.WriteCloser) {
errorLog = writer
}, func(v ...interface{}) {
Alert(fmt.Sprint(v...))
})
}
func TestStructedLogInfo(t *testing.T) {
doTestStructedLog(t, levelInfo, func(writer io.WriteCloser) {
infoLog = writer
@@ -229,8 +237,10 @@ func TestSetup(t *testing.T) {
func TestDisable(t *testing.T) {
Disable()
WithKeepDays(1)
WithGzip()
var opt logOptions
WithKeepDays(1)(&opt)
WithGzip()(&opt)
assert.Nil(t, Close())
writeConsole = false
assert.Nil(t, Close())

View File

@@ -15,7 +15,7 @@ const testlog = "Stay hungry, stay foolish."
func TestCollectSysLog(t *testing.T) {
CollectSysLog()
content := getContent(captureOutput(func() {
log.Printf(testlog)
log.Print(testlog)
}))
assert.True(t, strings.Contains(content, testlog))
}

View File

@@ -29,11 +29,11 @@ func TestTraceLog(t *testing.T) {
func TestTraceError(t *testing.T) {
var buf mockWriter
atomic.StoreUint32(&initialized, 1)
errorLog = newLogWriter(log.New(&buf, "", flags))
ctx := context.WithValue(context.Background(), tracespec.TracingKey, mock)
l := WithContext(ctx).(*traceLogger)
SetLevel(InfoLevel)
atomic.StoreUint32(&initialized, 1)
errorLog = newLogWriter(log.New(&buf, "", flags))
l.WithDuration(time.Second).Error(testlog)
assert.True(t, strings.Contains(buf.String(), mockTraceId))
assert.True(t, strings.Contains(buf.String(), mockSpanId))
@@ -45,11 +45,11 @@ func TestTraceError(t *testing.T) {
func TestTraceInfo(t *testing.T) {
var buf mockWriter
atomic.StoreUint32(&initialized, 1)
infoLog = newLogWriter(log.New(&buf, "", flags))
ctx := context.WithValue(context.Background(), tracespec.TracingKey, mock)
l := WithContext(ctx).(*traceLogger)
SetLevel(InfoLevel)
atomic.StoreUint32(&initialized, 1)
infoLog = newLogWriter(log.New(&buf, "", flags))
l.WithDuration(time.Second).Info(testlog)
assert.True(t, strings.Contains(buf.String(), mockTraceId))
assert.True(t, strings.Contains(buf.String(), mockSpanId))
@@ -61,11 +61,11 @@ func TestTraceInfo(t *testing.T) {
func TestTraceSlow(t *testing.T) {
var buf mockWriter
atomic.StoreUint32(&initialized, 1)
slowLog = newLogWriter(log.New(&buf, "", flags))
ctx := context.WithValue(context.Background(), tracespec.TracingKey, mock)
l := WithContext(ctx).(*traceLogger)
SetLevel(InfoLevel)
atomic.StoreUint32(&initialized, 1)
slowLog = newLogWriter(log.New(&buf, "", flags))
l.WithDuration(time.Second).Slow(testlog)
assert.True(t, strings.Contains(buf.String(), mockTraceId))
assert.True(t, strings.Contains(buf.String(), mockSpanId))
@@ -77,10 +77,10 @@ func TestTraceSlow(t *testing.T) {
func TestTraceWithoutContext(t *testing.T) {
var buf mockWriter
l := WithContext(context.Background()).(*traceLogger)
SetLevel(InfoLevel)
atomic.StoreUint32(&initialized, 1)
infoLog = newLogWriter(log.New(&buf, "", flags))
l := WithContext(context.Background()).(*traceLogger)
SetLevel(InfoLevel)
l.WithDuration(time.Second).Info(testlog)
assert.False(t, strings.Contains(buf.String(), mockTraceId))
assert.False(t, strings.Contains(buf.String(), mockSpanId))

View File

@@ -31,6 +31,7 @@ func TestMaxInt(t *testing.T) {
}
for _, each := range cases {
each := each
t.Run(stringx.Rand(), func(t *testing.T) {
actual := MaxInt(each.a, each.b)
assert.Equal(t, each.expect, actual)

View File

@@ -26,10 +26,6 @@ var started uint32
// Profile represents an active profiling session.
type Profile struct {
// path holds the base path where various profiling files are written.
// If blank, the base path will be generated by ioutil.TempDir.
path string
// closers holds cleanup functions that run after each profile
closers []func()

View File

@@ -27,7 +27,7 @@ func newMockedService(multiplier int) *mockedService {
func (s *mockedService) Start() {
mutex.Lock()
number = number * s.multiplier
number *= s.multiplier
mutex.Unlock()
done <- struct{}{}
<-s.quit

View File

@@ -11,6 +11,7 @@ import (
"time"
"github.com/tal-tech/go-zero/core/executors"
"github.com/tal-tech/go-zero/core/logx"
"github.com/tal-tech/go-zero/core/proc"
"github.com/tal-tech/go-zero/core/sysx"
"github.com/tal-tech/go-zero/core/timex"
@@ -23,7 +24,7 @@ const (
)
var (
reporter func(string)
reporter = logx.Alert
lock sync.RWMutex
lessExecutor = executors.NewLessExecutor(time.Minute * 5)
dropped int32

View File

@@ -8,7 +8,6 @@ import (
"testing"
"time"
"github.com/alicebob/miniredis"
"github.com/stretchr/testify/assert"
"github.com/tal-tech/go-zero/core/errorx"
"github.com/tal-tech/go-zero/core/hash"
@@ -76,12 +75,12 @@ func (mc *mockedNode) TakeWithExpire(v interface{}, key string, query func(v int
func TestCache_SetDel(t *testing.T) {
const total = 1000
r1 := miniredis.NewMiniRedis()
assert.Nil(t, r1.Start())
defer r1.Close()
r2 := miniredis.NewMiniRedis()
assert.Nil(t, r2.Start())
defer r2.Close()
r1, clean1, err := createMiniRedis()
assert.Nil(t, err)
defer clean1()
r2, clean2, err := createMiniRedis()
assert.Nil(t, err)
defer clean2()
conf := ClusterConf{
{
RedisConf: redis.RedisConf{
@@ -124,9 +123,9 @@ func TestCache_SetDel(t *testing.T) {
func TestCache_OneNode(t *testing.T) {
const total = 1000
r := miniredis.NewMiniRedis()
assert.Nil(t, r.Start())
defer r.Close()
r, clean, err := createMiniRedis()
assert.Nil(t, err)
defer clean()
conf := ClusterConf{
{
RedisConf: redis.RedisConf{

View File

@@ -26,9 +26,9 @@ func init() {
}
func TestCacheNode_DelCache(t *testing.T) {
s, err := miniredis.Run()
s, clean, err := createMiniRedis()
assert.Nil(t, err)
defer s.Close()
defer clean()
cn := cacheNode{
rds: redis.NewRedis(s.Addr(), redis.NodeType),
@@ -49,9 +49,9 @@ func TestCacheNode_DelCache(t *testing.T) {
}
func TestCacheNode_InvalidCache(t *testing.T) {
s, err := miniredis.Run()
s, clean, err := createMiniRedis()
assert.Nil(t, err)
defer s.Close()
defer clean()
cn := cacheNode{
rds: redis.NewRedis(s.Addr(), redis.NodeType),
@@ -70,9 +70,9 @@ func TestCacheNode_InvalidCache(t *testing.T) {
}
func TestCacheNode_Take(t *testing.T) {
s, err := miniredis.Run()
s, clean, err := createMiniRedis()
assert.Nil(t, err)
defer s.Close()
defer clean()
cn := cacheNode{
rds: redis.NewRedis(s.Addr(), redis.NodeType),
@@ -97,9 +97,9 @@ func TestCacheNode_Take(t *testing.T) {
}
func TestCacheNode_TakeNotFound(t *testing.T) {
s, err := miniredis.Run()
s, clean, err := createMiniRedis()
assert.Nil(t, err)
defer s.Close()
defer clean()
cn := cacheNode{
rds: redis.NewRedis(s.Addr(), redis.NodeType),
@@ -136,9 +136,9 @@ func TestCacheNode_TakeNotFound(t *testing.T) {
}
func TestCacheNode_TakeWithExpire(t *testing.T) {
s, err := miniredis.Run()
s, clean, err := createMiniRedis()
assert.Nil(t, err)
defer s.Close()
defer clean()
cn := cacheNode{
rds: redis.NewRedis(s.Addr(), redis.NodeType),
@@ -163,9 +163,9 @@ func TestCacheNode_TakeWithExpire(t *testing.T) {
}
func TestCacheNode_String(t *testing.T) {
s, err := miniredis.Run()
s, clean, err := createMiniRedis()
assert.Nil(t, err)
defer s.Close()
defer clean()
cn := cacheNode{
rds: redis.NewRedis(s.Addr(), redis.NodeType),
@@ -180,11 +180,9 @@ func TestCacheNode_String(t *testing.T) {
}
func TestCacheValueWithBigInt(t *testing.T) {
s, err := miniredis.Run()
if err != nil {
t.Error(err)
}
defer s.Close()
s, clean, err := createMiniRedis()
assert.Nil(t, err)
defer clean()
cn := cacheNode{
rds: redis.NewRedis(s.Addr(), redis.NodeType),

View File

@@ -48,20 +48,17 @@ func (cs *CacheStat) statLoop() {
ticker := time.NewTicker(statInterval)
defer ticker.Stop()
for {
select {
case <-ticker.C:
total := atomic.SwapUint64(&cs.Total, 0)
if total == 0 {
continue
}
hit := atomic.SwapUint64(&cs.Hit, 0)
percent := 100 * float32(hit) / float32(total)
miss := atomic.SwapUint64(&cs.Miss, 0)
dbf := atomic.SwapUint64(&cs.DbFails, 0)
logx.Statf("dbcache(%s) - qpm: %d, hit_ratio: %.1f%%, hit: %d, miss: %d, db_fails: %d",
cs.name, total, percent, hit, miss, dbf)
for range ticker.C {
total := atomic.SwapUint64(&cs.Total, 0)
if total == 0 {
continue
}
hit := atomic.SwapUint64(&cs.Hit, 0)
percent := 100 * float32(hit) / float32(total)
miss := atomic.SwapUint64(&cs.Miss, 0)
dbf := atomic.SwapUint64(&cs.DbFails, 0)
logx.Statf("dbcache(%s) - qpm: %d, hit_ratio: %.1f%%, hit: %d, miss: %d, db_fails: %d",
cs.name, total, percent, hit, miss, dbf)
}
}

View File

@@ -2,8 +2,11 @@ package cache
import (
"testing"
"time"
"github.com/alicebob/miniredis"
"github.com/stretchr/testify/assert"
"github.com/tal-tech/go-zero/core/lang"
)
func TestFormatKeys(t *testing.T) {
@@ -24,3 +27,22 @@ func TestTotalWeights(t *testing.T) {
})
assert.Equal(t, 1, val)
}
func createMiniRedis() (r *miniredis.Miniredis, clean func(), err error) {
r, err = miniredis.Run()
if err != nil {
return nil, nil, err
}
return r, func() {
ch := make(chan lang.PlaceholderType)
go func() {
r.Close()
close(ch)
}()
select {
case <-ch:
case <-time.After(time.Second):
}
}, nil
}

View File

@@ -73,6 +73,7 @@ type (
ZrevrangebyscoreWithScores(key string, start, stop int64) ([]redis.Pair, error)
ZrevrangebyscoreWithScoresAndLimit(key string, start, stop int64, page, size int) ([]redis.Pair, error)
Zscore(key string, value string) (int64, error)
Zrevrank(key, field string) (int64, error)
}
clusterStore struct {
@@ -635,6 +636,15 @@ func (cs clusterStore) ZrevrangebyscoreWithScoresAndLimit(key string, start, sto
return node.ZrevrangebyscoreWithScoresAndLimit(key, start, stop, page, size)
}
func (cs clusterStore) Zrevrank(key, field string) (int64, error) {
node, err := cs.getRedis(key)
if err != nil {
return 0, err
}
return node.Zrevrank(key, field)
}
func (cs clusterStore) Zscore(key string, value string) (int64, error) {
node, err := cs.getRedis(key)
if err != nil {

View File

@@ -516,6 +516,8 @@ func TestRedis_SortedSet(t *testing.T) {
assert.NotNil(t, err)
_, err = store.ZrevrangebyscoreWithScoresAndLimit("key", 5, 8, 1, 1)
assert.NotNil(t, err)
_, err = store.Zrevrank("key", "value")
assert.NotNil(t, err)
_, err = store.Zadds("key", redis.Pair{
Key: "value2",
Score: 6,
@@ -640,6 +642,9 @@ func TestRedis_SortedSet(t *testing.T) {
Score: 5,
},
}, pairs)
rank, err = client.Zrevrank("key", "value1")
assert.Nil(t, err)
assert.Equal(t, int64(1), rank)
val, err = client.Zadds("key", redis.Pair{
Key: "value2",
Score: 6,

View File

@@ -1273,6 +1273,20 @@ func (s *Redis) ZrevrangebyscoreWithScoresAndLimit(key string, start, stop int64
return
}
func (s *Redis) Zrevrank(key string, field string) (val int64, err error) {
err = s.brk.DoWithAcceptable(func() error {
conn, err := getRedis(s)
if err != nil {
return err
}
val, err = conn.ZRevRank(key, field).Result()
return err
}, acceptable)
return
}
func (s *Redis) String() string {
return s.Addr
}

View File

@@ -584,6 +584,9 @@ func TestRedis_SortedSet(t *testing.T) {
rank, err := client.Zrank("key", "value2")
assert.Nil(t, err)
assert.Equal(t, int64(1), rank)
rank, err = client.Zrevrank("key", "value1")
assert.Nil(t, err)
assert.Equal(t, int64(2), rank)
_, err = NewRedis(client.Addr, "").Zrank("key", "value4")
assert.NotNil(t, err)
_, err = client.Zrank("key", "value4")
@@ -710,6 +713,8 @@ func TestRedis_SortedSet(t *testing.T) {
pairs, err = client.ZrevrangebyscoreWithScoresAndLimit("key", 5, 8, 1, 0)
assert.Nil(t, err)
assert.Equal(t, 0, len(pairs))
_, err = NewRedis(client.Addr, "").Zrevrank("key", "value")
assert.NotNil(t, err)
})
}

View File

@@ -16,6 +16,7 @@ import (
"github.com/alicebob/miniredis"
"github.com/stretchr/testify/assert"
"github.com/tal-tech/go-zero/core/lang"
"github.com/tal-tech/go-zero/core/logx"
"github.com/tal-tech/go-zero/core/stat"
"github.com/tal-tech/go-zero/core/stores/cache"
@@ -30,10 +31,9 @@ func init() {
func TestCachedConn_GetCache(t *testing.T) {
resetStats()
s, err := miniredis.Run()
if err != nil {
t.Error(err)
}
s, clean, err := createMiniRedis()
assert.Nil(t, err)
defer clean()
r := redis.NewRedis(s.Addr(), redis.NodeType)
c := NewNodeConn(dummySqlConn{}, r, cache.WithExpiry(time.Second*10))
@@ -48,10 +48,9 @@ func TestCachedConn_GetCache(t *testing.T) {
func TestStat(t *testing.T) {
resetStats()
s, err := miniredis.Run()
if err != nil {
t.Error(err)
}
s, clean, err := createMiniRedis()
assert.Nil(t, err)
defer clean()
r := redis.NewRedis(s.Addr(), redis.NodeType)
c := NewNodeConn(dummySqlConn{}, r, cache.WithExpiry(time.Second*10))
@@ -73,10 +72,9 @@ func TestStat(t *testing.T) {
func TestCachedConn_QueryRowIndex_NoCache(t *testing.T) {
resetStats()
s, err := miniredis.Run()
if err != nil {
t.Error(err)
}
s, clean, err := createMiniRedis()
assert.Nil(t, err)
defer clean()
r := redis.NewRedis(s.Addr(), redis.NodeType)
c := NewConn(dummySqlConn{}, cache.CacheConf{
@@ -124,10 +122,9 @@ func TestCachedConn_QueryRowIndex_NoCache(t *testing.T) {
func TestCachedConn_QueryRowIndex_HasCache(t *testing.T) {
resetStats()
s, err := miniredis.Run()
if err != nil {
t.Error(err)
}
s, clean, err := createMiniRedis()
assert.Nil(t, err)
defer clean()
r := redis.NewRedis(s.Addr(), redis.NodeType)
c := NewNodeConn(dummySqlConn{}, r, cache.WithExpiry(time.Second*10),
@@ -213,11 +210,9 @@ func TestCachedConn_QueryRowIndex_HasCache_IntPrimary(t *testing.T) {
},
}
s, err := miniredis.Run()
if err != nil {
t.Error(err)
}
defer s.Close()
s, clean, err := createMiniRedis()
assert.Nil(t, err)
defer clean()
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
@@ -261,12 +256,9 @@ func TestCachedConn_QueryRowIndex_HasWrongCache(t *testing.T) {
for k, v := range caches {
t.Run(k+"/"+v, func(t *testing.T) {
resetStats()
s, err := miniredis.Run()
if err != nil {
t.Error(err)
}
s.FlushAll()
defer s.Close()
s, clean, err := createMiniRedis()
assert.Nil(t, err)
defer clean()
r := redis.NewRedis(s.Addr(), redis.NodeType)
c := NewNodeConn(dummySqlConn{}, r, cache.WithExpiry(time.Second*10),
@@ -320,10 +312,9 @@ func TestStatCacheFails(t *testing.T) {
func TestStatDbFails(t *testing.T) {
resetStats()
s, err := miniredis.Run()
if err != nil {
t.Error(err)
}
s, clean, err := createMiniRedis()
assert.Nil(t, err)
defer clean()
r := redis.NewRedis(s.Addr(), redis.NodeType)
c := NewNodeConn(dummySqlConn{}, r, cache.WithExpiry(time.Second*10))
@@ -343,10 +334,9 @@ func TestStatDbFails(t *testing.T) {
func TestStatFromMemory(t *testing.T) {
resetStats()
s, err := miniredis.Run()
if err != nil {
t.Error(err)
}
s, clean, err := createMiniRedis()
assert.Nil(t, err)
defer clean()
r := redis.NewRedis(s.Addr(), redis.NodeType)
c := NewNodeConn(dummySqlConn{}, r, cache.WithExpiry(time.Second*10))
@@ -403,10 +393,9 @@ func TestStatFromMemory(t *testing.T) {
}
func TestCachedConnQueryRow(t *testing.T) {
s, err := miniredis.Run()
if err != nil {
t.Error(err)
}
s, clean, err := createMiniRedis()
assert.Nil(t, err)
defer clean()
const (
key = "user"
@@ -433,10 +422,9 @@ func TestCachedConnQueryRow(t *testing.T) {
}
func TestCachedConnQueryRowFromCache(t *testing.T) {
s, err := miniredis.Run()
if err != nil {
t.Error(err)
}
s, clean, err := createMiniRedis()
assert.Nil(t, err)
defer clean()
const (
key = "user"
@@ -464,10 +452,9 @@ func TestCachedConnQueryRowFromCache(t *testing.T) {
}
func TestQueryRowNotFound(t *testing.T) {
s, err := miniredis.Run()
if err != nil {
t.Error(err)
}
s, clean, err := createMiniRedis()
assert.Nil(t, err)
defer clean()
const key = "user"
var conn trackedConn
@@ -486,10 +473,9 @@ func TestQueryRowNotFound(t *testing.T) {
}
func TestCachedConnExec(t *testing.T) {
s, err := miniredis.Run()
if err != nil {
t.Error(err)
}
s, clean, err := createMiniRedis()
assert.Nil(t, err)
defer clean()
var conn trackedConn
r := redis.NewRedis(s.Addr(), redis.NodeType)
@@ -500,10 +486,9 @@ func TestCachedConnExec(t *testing.T) {
}
func TestCachedConnExecDropCache(t *testing.T) {
s, err := miniredis.Run()
if err != nil {
t.Error(err)
}
s, clean, err := createMiniRedis()
assert.Nil(t, err)
defer clean()
const (
key = "user"
@@ -539,10 +524,9 @@ func TestCachedConnExecDropCacheFailed(t *testing.T) {
}
func TestCachedConnQueryRows(t *testing.T) {
s, err := miniredis.Run()
if err != nil {
t.Error(err)
}
s, clean, err := createMiniRedis()
assert.Nil(t, err)
defer clean()
var conn trackedConn
r := redis.NewRedis(s.Addr(), redis.NodeType)
@@ -554,10 +538,9 @@ func TestCachedConnQueryRows(t *testing.T) {
}
func TestCachedConnTransact(t *testing.T) {
s, err := miniredis.Run()
if err != nil {
t.Error(err)
}
s, clean, err := createMiniRedis()
assert.Nil(t, err)
defer clean()
var conn trackedConn
r := redis.NewRedis(s.Addr(), redis.NodeType)
@@ -570,10 +553,9 @@ func TestCachedConnTransact(t *testing.T) {
}
func TestQueryRowNoCache(t *testing.T) {
s, err := miniredis.Run()
if err != nil {
t.Error(err)
}
s, clean, err := createMiniRedis()
assert.Nil(t, err)
defer clean()
const (
key = "user"
@@ -657,3 +639,22 @@ func (c *trackedConn) Transact(fn func(session sqlx.Session) error) error {
c.transactValue = true
return c.dummySqlConn.Transact(fn)
}
func createMiniRedis() (r *miniredis.Miniredis, clean func(), err error) {
r, err = miniredis.Run()
if err != nil {
return nil, nil, err
}
return r, func() {
ch := make(chan lang.PlaceholderType)
go func() {
r.Close()
close(ch)
}()
select {
case <-ch:
case <-time.After(time.Second):
}
}, nil
}

View File

@@ -1,6 +1,7 @@
package syncx
import (
"sync"
"testing"
"github.com/stretchr/testify/assert"
@@ -10,11 +11,15 @@ func TestBarrier_Guard(t *testing.T) {
const total = 10000
var barrier Barrier
var count int
var wg sync.WaitGroup
wg.Add(total)
for i := 0; i < total; i++ {
barrier.Guard(func() {
go barrier.Guard(func() {
count++
wg.Done()
})
}
wg.Wait()
assert.Equal(t, total, count)
}
@@ -22,10 +27,14 @@ func TestBarrierPtr_Guard(t *testing.T) {
const total = 10000
barrier := new(Barrier)
var count int
wg := new(sync.WaitGroup)
wg.Add(total)
for i := 0; i < total; i++ {
barrier.Guard(func() {
go barrier.Guard(func() {
count++
wg.Done()
})
}
wg.Wait()
assert.Equal(t, total, count)
}

View File

@@ -67,7 +67,3 @@ func TestSignalNoWait(t *testing.T) {
func sleep(millisecond int) {
time.Sleep(time.Duration(millisecond) * time.Millisecond)
}
func currentTimeMillis() int64 {
return time.Now().UnixNano() / int64(time.Millisecond)
}

View File

@@ -95,7 +95,8 @@ func TestExclusiveCallDoDiffDupSuppress(t *testing.T) {
close(broadcast)
wg.Wait()
if got := atomic.LoadInt32(&calls); got != 5 { // five letters
if got := atomic.LoadInt32(&calls); got != 5 {
// five letters
t.Errorf("number of calls = %d; want 5", got)
}
}

View File

@@ -16,7 +16,7 @@ func TestRelativeTime(t *testing.T) {
}
func TestRelativeTime_Time(t *testing.T) {
diff := Time().Sub(time.Now())
diff := time.Until(Time())
if diff > 0 {
assert.True(t, diff < time.Second)
} else {

View File

@@ -27,12 +27,9 @@ func TestFakeTicker(t *testing.T) {
var count int32
go func() {
for {
select {
case <-ticker.Chan():
if atomic.AddInt32(&count, 1) == total {
ticker.Done()
}
for range ticker.Chan() {
if atomic.AddInt32(&count, 1) == total {
ticker.Done()
}
}
}()

View File

@@ -18,7 +18,7 @@ func TestHttpPropagator_Extract(t *testing.T) {
assert.Equal(t, "trace", carrier.Get(traceIdKey))
assert.Equal(t, "span", carrier.Get(spanIdKey))
carrier, err = Extract(HttpFormat, req)
_, err = Extract(HttpFormat, req)
assert.Equal(t, ErrInvalidCarrier, err)
}
@@ -31,7 +31,7 @@ func TestHttpPropagator_Inject(t *testing.T) {
assert.Equal(t, "trace", carrier.Get(traceIdKey))
assert.Equal(t, "span", carrier.Get(spanIdKey))
carrier, err = Inject(HttpFormat, req)
_, err = Inject(HttpFormat, req)
assert.Equal(t, ErrInvalidCarrier, err)
}
@@ -45,9 +45,9 @@ func TestGrpcPropagator_Extract(t *testing.T) {
assert.Equal(t, "trace", carrier.Get(traceIdKey))
assert.Equal(t, "span", carrier.Get(spanIdKey))
carrier, err = Extract(GrpcFormat, 1)
_, err = Extract(GrpcFormat, 1)
assert.Equal(t, ErrInvalidCarrier, err)
carrier, err = Extract(nil, 1)
_, err = Extract(nil, 1)
assert.Equal(t, ErrInvalidCarrier, err)
}
@@ -61,8 +61,8 @@ func TestGrpcPropagator_Inject(t *testing.T) {
assert.Equal(t, "trace", carrier.Get(traceIdKey))
assert.Equal(t, "span", carrier.Get(spanIdKey))
carrier, err = Inject(GrpcFormat, 1)
_, err = Inject(GrpcFormat, 1)
assert.Equal(t, ErrInvalidCarrier, err)
carrier, err = Inject(nil, 1)
_, err = Inject(nil, 1)
assert.Equal(t, ErrInvalidCarrier, err)
}

View File

@@ -17,7 +17,6 @@ const (
clientFlag = "client"
serverFlag = "server"
spanSepRune = '.'
timeFormat = "2006-01-02 15:04:05.000"
)
var spanSep = string([]byte{spanSepRune})
@@ -37,9 +36,7 @@ func newServerSpan(carrier Carrier, serviceName, operationName string) tracespec
return carrier.Get(traceIdKey)
}
return ""
}, func() string {
return stringx.RandId()
})
}, stringx.RandId)
spanId := stringx.TakeWithPriority(func() string {
if carrier != nil {
return carrier.Get(spanIdKey)

View File

@@ -30,6 +30,7 @@ func TestCompareVersions(t *testing.T) {
}
for _, each := range cases {
each := each
t.Run(each.ver1, func(t *testing.T) {
actual := CompareVersions(each.ver1, each.operator, each.ver2)
assert.Equal(t, each.out, actual, fmt.Sprintf("%s vs %s", each.ver1, each.ver2))

View File

@@ -1,624 +0,0 @@
# Rapid development of microservices - multiple RPCs
English | [简体中文](bookstore.md)
## 0. Why building microservices are so difficult
To build a well working microservice, we need lots of knowledges from different aspects.
* basic functionalities
1. concurrency control and rate limit, to avoid being brought down by unexpected inbound
2. service discovery, make sure new or terminated nodes are detected asap
3. load balancing, balance the traffic base on the throughput of nodes
4. timeout control, avoid the nodes continue to process the timed out requests
5. circuit breaker, load shedding, fail fast, protects the failure nodes to recover asap
* advanced functionalities
1. authorization, make sure users can only access their own data
2. tracing, to understand the whole system and locate the specific problem quickly
3. logging, collects data and helps to backtrace problems
4. observability, no metrics, no optimization
For any point listed above, we need a long article to describe the theory and the implementation. But for us, the developers, its very difficult to understand all the concepts and make it happen in our systems. Although, we can use the frameworks that have been well served busy sites. [go-zero](https://github.com/tal-tech/go-zero) is born for this purpose, especially for cloud-native microservice systems.
As well, we always adhere to the idea that **prefer tools over conventions and documents**. We hope to reduce the boilerplate code as much as possible, and let developers focus on developing the business related code. For this purpose, we developed the tool `goctl`.
Lets take the shorturl microservice as a quick example to demonstrate how to quickly create microservices by using [go-zero](https://github.com/tal-tech/go-zero). After finishing this tutorial, youll find that its so easy to write microservices!
## 1. What is a bookstore service
For simplicity, the bookstore service only contains two functionalities, adding books and quering prices.
Writting this bookstore service is to demonstrate the complete flow of creating a microservice by using go-zero. But algorithms and detail implementations are quite simplified, and this bookstore service is not suitable for production use.
## 2. Architecture of shorturl microservice
<img src="images/bookstore-arch.png" alt="architecture" width="800" />
## 3. goctl generated code overview
All modules with green background are generated, and will be enabled when necessary. The modules with red background are handwritten code, which is typically business logic code.
* API Gateway
<img src="images/api-gen.png" alt="api" width="800" />
* RPC
<img src="images/rpc-gen.png" alt="rpc" width="800" />
* model
<img src="images/model-gen.png" alt="model" width="800" />
And now, lets walk through the complete flow of quickly create a microservice with go-zero.
## 4. Get started
* install etcd, mysql, redis
* install protoc-gen-go
```shell
go get -u github.com/golang/protobuf/protoc-gen-go
```
* install goctl
```shell
GO111MODULE=on go get -u github.com/tal-tech/go-zero/tools/goctl
```
* create the working dir `bookstore` and `bookstore/api`
* in `bookstore` dir, execute `go mod init bookstore` to initialize `go.mod``
## 5. Write code for API Gateway
* use goctl to generate `api/bookstore.api`
```Plain Text
goctl api -o bookstore.api
```
for simplicity, the leading `info` block is removed, and the code looks like:
```go
type (
addReq struct {
book string `form:"book"`
price int64 `form:"price"`
}
addResp struct {
ok bool `json:"ok"`
}
)
type (
checkReq struct {
book string `form:"book"`
}
checkResp struct {
found bool `json:"found"`
price int64 `json:"price"`
}
)
service bookstore-api {
@server(
handler: AddHandler
)
get /add(addReq) returns(addResp)
@server(
handler: CheckHandler
)
get /check(checkReq) returns(checkResp)
}
```
the usage of `type` keyword is the same as that in go, service is used to define get/post/head/delete api requests, described below:
* `service bookstore-api { defines the service name
* `@server` defines the properties that used in server side
* `handler` defines the handler name
* `get /add(addReq) returns(addResp)` defines this is a GET request, the request parameters, and the response parameters
* generate the code for API Gateway by using goctl
```shell
goctl api go -api bookstore.api -dir .
```
the generated file structure looks like:
```Plain Text
api
├── bookstore.api // api definition
├── bookstore.go // main entrance
├── etc
│ └── bookstore-api.yaml // configuration file
└── internal
├── config
│ └── config.go // configuration definition
├── handler
│ ├── addhandler.go // implements addHandler
│ ├── checkhandler.go // implements checkHandler
│ └── routes.go // routes definition
├── logic
│ ├── addlogic.go // implements AddLogic
│ └── checklogic.go // implements CheckLogic
├── svc
│ └── servicecontext.go // defines ServiceContext
└── types
└── types.go // defines request/response
```
* start API Gateway service, listens on port 8888 by default
```shell
go run bookstore.go -f etc/bookstore-api.yaml
```
* test API Gateway service
```shell
curl -i "http://localhost:8888/check?book=go-zero"
```
response like:
```http
HTTP/1.1 200 OK
Content-Type: application/json
Date: Thu, 03 Sep 2020 06:46:18 GMT
Content-Length: 25
{"found":false,"price":0}
```
You can see that the API Gateway service did nothing except returned a zero value. And lets implement the business logic in rpc service.
* you can modify `internal/svc/servicecontext.go` to pass dependencies if needed
* implement logic in package `internal/logic`
* you can use goctl to generate code for clients base on the .api file
* till now, the client engineer can work with the api, dont need to wait for the implementation of server side
## 6. Write code for add rpc service
- under directory `bookstore` create dir `rpc`
* under directory `rpc/add` create `add.proto` file
```shell
goctl rpc template -o add.proto
```
edit the file and make the code looks like:
```protobuf
syntax = "proto3";
package add;
message addReq {
string book = 1;
int64 price = 2;
}
message addResp {
bool ok = 1;
}
service adder {
rpc add(addReq) returns(addResp);
}
```
* use goctl to generate the rpc code, execute the following command in `rpc/add`
```shell
goctl rpc proto -src add.proto
```
the generated file structure looks like:
```Plain Text
rpc/add
├── add.go // rpc main entrance
├── add.proto // rpc definition
├── adder
│ ├── adder.go // defines how rpc clients call this service
│ ├── adder_mock.go // mock file, for test purpose
│ └── types.go // request/response definition
├── etc
│ └── add.yaml // configuration file
├── internal
│ ├── config
│ │ └── config.go // configuration definition
│ ├── logic
│ │ └── addlogic.go // add logic here
│ ├── server
│ │ └── adderserver.go // rpc handler
│ └── svc
│ └── servicecontext.go // defines service context, like dependencies
└── pb
└── add.pb.go
```
just run it, looks like:
```shell
$ go run add.go -f etc/add.yaml
Starting rpc server at 127.0.0.1:8080...
```
you can change the listening port in file `etc/add.yaml`.
## 7. Write code for check rpc service
* under directory `rpc/check` create `check.proto` file
```shell
goctl rpc template -o check.proto
```
edit the file and make the code looks like:
```protobuf
syntax = "proto3";
package check;
message checkReq {
string book = 1;
}
message checkResp {
bool found = 1;
int64 price = 2;
}
service checker {
rpc check(checkReq) returns(checkResp);
}
```
* use goctl to generate the rpc code, execute the following command in `rpc/check`
```shell
goctl rpc proto -src check.proto
```
the generated file structure looks like:
```Plain Text
rpc/check
├── check.go // rpc main entrance
├── check.proto // rpc definition
├── checker
│ ├── checker.go // defines how rpc clients call this service
│ ├── checker_mock.go // mock file, for test purpose
│ └── types.go // request/response definition
├── etc
│ └── check.yaml // configuration file
├── internal
│ ├── config
│ │ └── config.go // configuration definition
│ ├── logic
│ │ └── checklogic.go // check logic here
│ ├── server
│ │ └── checkerserver.go // rpc handler
│ └── svc
│ └── servicecontext.go // defines service context, like dependencies
└── pb
└── check.pb.go
```
you can change the listening port in `etc/check.yaml`.
we need to change the port in `etc/check.yaml` to `8081`, because `8080` is used by `add` service.
just run it, looks like:
```shell
$ go run check.go -f etc/check.yaml
Starting rpc server at 127.0.0.1:8081...
```
## 8. Modify API Gateway to call add/check rpc service
* modify the configuration file `bookstore-api.yaml`, add the following:
```yaml
Add:
Etcd:
Hosts:
- localhost:2379
Key: add.rpc
Check:
Etcd:
Hosts:
- localhost:2379
Key: check.rpc
```
automatically discover the add/check service by using etcd.
* modify the file `internal/config/config.go`, add dependency on add/check service:
```go
type Config struct {
rest.RestConf
Add zrpc.RpcClientConf // manual code
Check zrpc.RpcClientConf // manual code
}
```
* modify the file `internal/svc/servicecontext.go`, like below:
```go
type ServiceContext struct {
Config config.Config
Adder adder.Adder // manual code
Checker checker.Checker // manual code
}
func NewServiceContext(c config.Config) *ServiceContext {
return &ServiceContext{
Config: c,
Adder: adder.NewAdder(zrpc.MustNewClient(c.Add)), // manual code
Checker: checker.NewChecker(zrpc.MustNewClient(c.Check)), // manual code
}
}
```
passing the dependencies among services within ServiceContext.
* modify the method `Add` in the file `internal/logic/addlogic.go`, looks like:
```go
func (l *AddLogic) Add(req types.AddReq) (*types.AddResp, error) {
// manual code start
resp, err := l.svcCtx.Adder.Add(l.ctx, &adder.AddReq{
Book: req.Book,
Price: req.Price,
})
if err != nil {
return nil, err
}
return &types.AddResp{
Ok: resp.Ok,
}, nil
// manual code stop
}
```
by calling the method `Add` of `adder` to add books into bookstore.
* modify the file `internal/logic/checklogic.go`, looks like:
```go
func (l *CheckLogic) Check(req types.CheckReq) (*types.CheckResp, error) {
// manual code start
resp, err := l.svcCtx.Checker.Check(l.ctx, &checker.CheckReq{
Book: req.Book,
})
if err != nil {
return nil, err
}
return &types.CheckResp{
Found: resp.Found,
Price: resp.Price,
}, nil
// manual code stop
}
```
by calling the method `Check` of `checker` to check the prices from the bookstore.
Till now, weve done the modification of API Gateway. All the manually added code are marked.
## 9. Define the database schema, generate the code for CRUD+cache
* under bookstore, create the directory `rpc/model`: `mkdir -p rpc/model`
* under the directory rpc/model create the file called `book.sql`, contents as below:
```sql
CREATE TABLE `book`
(
`book` varchar(255) NOT NULL COMMENT 'book name',
`price` int NOT NULL COMMENT 'book price',
PRIMARY KEY(`book`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;
```
* create DB and table
```sql
create database gozero;
```
```sql
source book.sql;
```
* under the directory `rpc/model` execute the following command to genrate CRUD+cache code, `-c` means using `redis cache`
```shell
goctl model mysql ddl -c -src book.sql -dir .
```
you can also generate the code from the database url by using `datasource` subcommand instead of `ddl`
the generated file structure looks like:
```Plain Text
rpc/model
├── bookstore.sql
├── bookstoremodel.go // CRUD+cache code
└── vars.go // const and var definition
```
## 10. Modify add/check rpc to call crud+cache
* modify `rpc/add/etc/add.yaml`, add the following:
```yaml
DataSource: root:@tcp(localhost:3306)/gozero
Table: book
Cache:
- Host: localhost:6379
```
you can use multiple redis as cache. redis node and cluster are both supported.
* modify `rpc/add/internal/config.go`, like below:
```go
type Config struct {
zrpc.RpcServerConf
DataSource string // manual code
Table string // manual code
Cache cache.CacheConf // manual code
}
```
added the configuration for mysql and redis cache.
* modify `rpc/add/internal/svc/servicecontext.go` and `rpc/check/internal/svc/servicecontext.go`, like below:
```go
type ServiceContext struct {
c config.Config
Model *model.BookModel // manual code
}
func NewServiceContext(c config.Config) *ServiceContext {
return &ServiceContext{
c: c,
Model: model.NewBookModel(sqlx.NewMysql(c.DataSource), c.Cache, c.Table), // manual code
}
}
```
* modify `rpc/add/internal/logic/addlogic.go`, like below:
```go
func (l *AddLogic) Add(in *add.AddReq) (*add.AddResp, error) {
// manual code start
_, err := l.svcCtx.Model.Insert(model.Book{
Book: in.Book,
Price: in.Price,
})
if err != nil {
return nil, err
}
return &add.AddResp{
Ok: true,
}, nil
// manual code stop
}
```
* modify `rpc/check/internal/logic/checklogic.go`, like below:
```go
func (l *CheckLogic) Check(in *check.CheckReq) (*check.CheckResp, error) {
// manual code start
resp, err := l.svcCtx.Model.FindOne(in.Book)
if err != nil {
return nil, err
}
return &check.CheckResp{
Found: true,
Price: resp.Price,
}, nil
// manual code stop
}
```
till now, we finished modifing the code, all the modified code is marked.
## 11. Call shorten and expand services
* call add api
```shell
curl -i "http://localhost:8888/add?book=go-zero&price=10"
```
response like:
```http
HTTP/1.1 200 OK
Content-Type: application/json
Date: Thu, 03 Sep 2020 09:42:13 GMT
Content-Length: 11
{"ok":true}
```
* call check api
```shell
curl -i "http://localhost:8888/check?book=go-zero"
```
response like:
```http
HTTP/1.1 200 OK
Content-Type: application/json
Date: Thu, 03 Sep 2020 09:47:34 GMT
Content-Length: 25
{"found":true,"price":10}
```
## 12. Benchmark
Because benchmarking the write requests depends on the write throughput of mysql, we only benchmarked the check api. We read the data from mysql and cache it in redis. For simplicity, I only check one book, because of cache, the effect is the same for multiple books.
Before benchmark, we need to change the max open files:
```shel
ulimit -n 20000
```
And change the log level to error, to avoid too many logs affect the benchmark. Add the following in every yaml file:
```yaml
Log:
Level: error
```
![Benchmark](images/bookstore-benchmark.png)
as shown above, in my MacBook Pro, the QPS is like 30K+.
## 13. Full code
[https://github.com/tal-tech/go-zero/tree/master/example/bookstore](https://github.com/tal-tech/go-zero/tree/master/example/bookstore)
## 14. Conclusion
We always adhere to **prefer tools over conventions and documents**.
go-zero is not only a framework, but also a tool to simplify and standardize the building of micoservice systems.
We not only keep the framework simple, but also encapsulate the complexity into the framework. And the developers are free from building the difficult and boilerplate code. Then we get the rapid development and less failure.
For the generated code by goctl, lots of microservice components are included, like concurrency control, adaptive circuit breaker, adaptive load shedding, auto cache control etc. And its easy to deal with the busy sites.
If you have any ideas that can help us to improve the productivity, tell me any time! 👏

View File

@@ -1,624 +0,0 @@
# 快速构建微服务-多RPC版
[English](bookstore-en.md) | 简体中文
## 0. 为什么说做好微服务很难
要想做好微服务,我们需要理解和掌握的知识点非常多,从几个维度上来说:
* 基本功能层面
1. 并发控制&限流,避免服务被突发流量击垮
2. 服务注册与服务发现,确保能够动态侦测增减的节点
3. 负载均衡,需要根据节点承受能力分发流量
4. 超时控制,避免对已超时请求做无用功
5. 熔断设计,快速失败,保障故障节点的恢复能力
* 高阶功能层面
1. 请求认证,确保每个用户只能访问自己的数据
2. 链路追踪,用于理解整个系统和快速定位特定请求的问题
3. 日志,用于数据收集和问题定位
4. 可观测性,没有度量就没有优化
对于其中每一点,我们都需要用很长的篇幅来讲述其原理和实现,那么对我们后端开发者来说,要想把这些知识点都掌握并落实到业务系统里,难度是非常大的,不过我们可以依赖已经被大流量验证过的框架体系。[go-zero微服务框架](https://github.com/tal-tech/go-zero)就是为此而生。
另外,我们始终秉承**工具大于约定和文档**的理念。我们希望尽可能减少开发人员的心智负担,把精力都投入到产生业务价值的代码上,减少重复代码的编写,所以我们开发了`goctl`工具。
下面我通过书店服务来演示通过[go-zero](https://github.com/tal-tech/go-zero)快速的创建微服务的流程,走完一遍,你就会发现:原来编写微服务如此简单!
## 1. 书店服务示例简介
为了教程简单,我们用书店服务做示例,并且只实现其中的增加书目和检查价格功能。
写此书店服务是为了从整体上演示go-zero构建完整微服务的过程实现细节尽可能简化了。
## 2. 书店微服务架构图
<img src="images/bookstore-arch.png" alt="架构图" width="800" />
## 3. goctl各层代码生成一览
所有绿色背景的功能模块是自动生成的,按需激活,红色模块是需要自己写的,也就是增加下依赖,编写业务特有逻辑,各层示意图分别如下:
* API Gateway
<img src="images/bookstore-api.png" alt="api" width="800" />
* RPC
<img src="images/bookstore-rpc.png" alt="架构图" width="800" />
* model
<img src="images/bookstore-model.png" alt="model" width="800" />
下面我们来一起完整走一遍快速构建微服务的流程Lets `Go`!🏃‍♂️
## 4. 准备工作
* 安装etcd, mysql, redis
* 安装`protoc-gen-go`
```shell
go get -u github.com/golang/protobuf/protoc-gen-go
```
* 安装goctl工具
```shell
GO111MODULE=on GOPROXY=https://goproxy.cn/,direct go get -u github.com/tal-tech/go-zero/tools/goctl
```
* 创建工作目录 `bookstore` 和 `bookstore/api`
* 在`bookstore`目录下执行`go mod init bookstore`初始化`go.mod`
## 5. 编写API Gateway代码
* 在`bookstore/api`目录下通过goctl生成`api/bookstore.api`
```bash
goctl api -o bookstore.api
```
编辑`bookstore.api`,为了简洁,去除了文件开头的`info`,代码如下:
```go
type (
addReq struct {
book string `form:"book"`
price int64 `form:"price"`
}
addResp struct {
ok bool `json:"ok"`
}
)
type (
checkReq struct {
book string `form:"book"`
}
checkResp struct {
found bool `json:"found"`
price int64 `json:"price"`
}
)
service bookstore-api {
@server(
handler: AddHandler
)
get /add(addReq) returns(addResp)
@server(
handler: CheckHandler
)
get /check(checkReq) returns(checkResp)
}
```
type用法和go一致service用来定义get/post/head/delete等api请求解释如下
* `service bookstore-api {`这一行定义了service名字
* `@server`部分用来定义server端用到的属性
* `handler`定义了服务端handler名字
* `get /add(addReq) returns(addResp)`定义了get方法的路由、请求参数、返回参数等
* 使用goctl生成API Gateway代码
```shell
goctl api go -api bookstore.api -dir .
```
生成的文件结构如下:
```Plain Text
api
├── bookstore.api // api定义
├── bookstore.go // main入口定义
├── etc
│ └── bookstore-api.yaml // 配置文件
└── internal
├── config
│ └── config.go // 定义配置
├── handler
│ ├── addhandler.go // 实现addHandler
│ ├── checkhandler.go // 实现checkHandler
│ └── routes.go // 定义路由处理
├── logic
│ ├── addlogic.go // 实现AddLogic
│ └── checklogic.go // 实现CheckLogic
├── svc
│ └── servicecontext.go // 定义ServiceContext
└── types
└── types.go // 定义请求、返回结构体
```
* 启动API Gateway服务默认侦听在8888端口
```shell
go run bookstore.go -f etc/bookstore-api.yaml
```
* 测试API Gateway服务
```shell
curl -i "http://localhost:8888/check?book=go-zero"
```
返回如下:
```http
HTTP/1.1 200 OK
Content-Type: application/json
Date: Thu, 03 Sep 2020 06:46:18 GMT
Content-Length: 25
{"found":false,"price":0}
```
可以看到我们API Gateway其实啥也没干就返回了个空值接下来我们会在rpc服务里实现业务逻辑
* 可以修改`internal/svc/servicecontext.go`来传递服务依赖(如果需要)
* 实现逻辑可以修改`internal/logic`下的对应文件
* 可以通过`goctl`生成各种客户端语言的api调用代码
* 到这里你已经可以通过goctl生成客户端代码给客户端同学并行开发了支持多种语言详见文档
## 6. 编写add rpc服务
- 在 `bookstore` 下创建 `rpc` 目录
* 在`rpc/add`目录下编写`add.proto`文件
可以通过命令生成proto文件模板
```shell
goctl rpc template -o add.proto
```
修改后文件内容如下:
```protobuf
syntax = "proto3";
package add;
message addReq {
string book = 1;
int64 price = 2;
}
message addResp {
bool ok = 1;
}
service adder {
rpc add(addReq) returns(addResp);
}
```
* 用`goctl`生成rpc代码在`rpc/add`目录下执行命令
```shell
goctl rpc proto -src add.proto
```
文件结构如下:
```Plain Text
rpc/add
├── add.go // rpc服务main函数
├── add.proto // rpc接口定义
├── adder
│ ├── adder.go // 提供了外部调用方法,无需修改
│ ├── adder_mock.go // mock方法测试用
│ └── types.go // request/response结构体定义
├── etc
│ └── add.yaml // 配置文件
├── internal
│ ├── config
│ │ └── config.go // 配置定义
│ ├── logic
│ │ └── addlogic.go // add业务逻辑在这里实现
│ ├── server
│ │ └── adderserver.go // 调用入口, 不需要修改
│ └── svc
│ └── servicecontext.go // 定义ServiceContext传递依赖
└── pb
└── add.pb.go
```
直接可以运行,如下:
```shell
$ go run add.go -f etc/add.yaml
Starting rpc server at 127.0.0.1:8080...
```
`etc/add.yaml`文件里可以修改侦听端口等配置
## 7. 编写check rpc服务
* 在`rpc/check`目录下编写`check.proto`文件
可以通过命令生成proto文件模板
```shell
goctl rpc template -o check.proto
```
修改后文件内容如下:
```protobuf
syntax = "proto3";
package check;
message checkReq {
string book = 1;
}
message checkResp {
bool found = 1;
int64 price = 2;
}
service checker {
rpc check(checkReq) returns(checkResp);
}
```
* 用`goctl`生成rpc代码在`rpc/check`目录下执行命令
```shell
goctl rpc proto -src check.proto
```
文件结构如下:
```Plain Text
rpc/check
├── check.go // rpc服务main函数
├── check.proto // rpc接口定义
├── checker
│ ├── checker.go // 提供了外部调用方法,无需修改
│ ├── checker_mock.go // mock方法测试用
│ └── types.go // request/response结构体定义
├── etc
│ └── check.yaml // 配置文件
├── internal
│ ├── config
│ │ └── config.go // 配置定义
│ ├── logic
│ │ └── checklogic.go // check业务逻辑在这里实现
│ ├── server
│ │ └── checkerserver.go // 调用入口, 不需要修改
│ └── svc
│ └── servicecontext.go // 定义ServiceContext传递依赖
└── pb
└── check.pb.go
```
`etc/check.yaml`文件里可以修改侦听端口等配置
需要修改`etc/check.yaml`的端口为`8081`,因为`8080`已经被`add`服务使用了,直接可以运行,如下:
```shell
$ go run check.go -f etc/check.yaml
Starting rpc server at 127.0.0.1:8081...
```
## 8. 修改API Gateway代码调用add/check rpc服务
* 修改配置文件`bookstore-api.yaml`,增加如下内容
```yaml
Add:
Etcd:
Hosts:
- localhost:2379
Key: add.rpc
Check:
Etcd:
Hosts:
- localhost:2379
Key: check.rpc
```
通过etcd自动去发现可用的add/check服务
* 修改`internal/config/config.go`如下增加add/check服务依赖
```go
type Config struct {
rest.RestConf
Add zrpc.RpcClientConf // 手动代码
Check zrpc.RpcClientConf // 手动代码
}
```
* 修改`internal/svc/servicecontext.go`,如下:
```go
type ServiceContext struct {
Config config.Config
Adder adder.Adder // 手动代码
Checker checker.Checker // 手动代码
}
func NewServiceContext(c config.Config) *ServiceContext {
return &ServiceContext{
Config: c,
Adder: adder.NewAdder(zrpc.MustNewClient(c.Add)), // 手动代码
Checker: checker.NewChecker(zrpc.MustNewClient(c.Check)), // 手动代码
}
}
```
通过ServiceContext在不同业务逻辑之间传递依赖
* 修改`internal/logic/addlogic.go`里的`Add`方法,如下:
```go
func (l *AddLogic) Add(req types.AddReq) (*types.AddResp, error) {
// 手动代码开始
resp, err := l.svcCtx.Adder.Add(l.ctx, &adder.AddReq{
Book: req.Book,
Price: req.Price,
})
if err != nil {
return nil, err
}
return &types.AddResp{
Ok: resp.Ok,
}, nil
// 手动代码结束
}
```
通过调用`adder`的`Add`方法实现添加图书到bookstore系统
* 修改`internal/logic/checklogic.go`里的`Check`方法,如下:
```go
func (l *CheckLogic) Check(req types.CheckReq) (*types.CheckResp, error) {
// 手动代码开始
resp, err := l.svcCtx.Checker.Check(l.ctx, &checker.CheckReq{
Book: req.Book,
})
if err != nil {
return nil, err
}
return &types.CheckResp{
Found: resp.Found,
Price: resp.Price,
}, nil
// 手动代码结束
}
```
通过调用`checker`的`Check`方法实现从bookstore系统中查询图书的价格
## 9. 定义数据库表结构并生成CRUD+cache代码
* bookstore下创建`rpc/model`目录:`mkdir -p rpc/model`
* 在rpc/model目录下编写创建book表的sql文件`book.sql`,如下:
```sql
CREATE TABLE `book`
(
`book` varchar(255) NOT NULL COMMENT 'book name',
`price` int NOT NULL COMMENT 'book price',
PRIMARY KEY(`book`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;
```
* 创建DB和table
```sql
create database gozero;
```
```sql
source book.sql;
```
* 在`rpc/model`目录下执行如下命令生成CRUD+cache代码`-c`表示使用`redis cache`
```shell
goctl model mysql ddl -c -src book.sql -dir .
```
也可以用`datasource`命令代替`ddl`来指定数据库链接直接从schema生成
生成后的文件结构如下:
```Plain Text
rpc/model
├── bookstore.sql
├── bookstoremodel.go // CRUD+cache代码
└── vars.go // 定义常量和变量
```
## 10. 修改add/check rpc代码调用crud+cache代码
* 修改`rpc/add/etc/add.yaml`和`rpc/check/etc/check.yaml`,增加如下内容:
```yaml
DataSource: root:@tcp(localhost:3306)/gozero
Table: book
Cache:
- Host: localhost:6379
```
可以使用多个redis作为cache支持redis单点或者redis集群
* 修改`rpc/add/internal/config.go`和`rpc/check/internal/config.go`,如下:
```go
type Config struct {
zrpc.RpcServerConf
DataSource string // 手动代码
Table string // 手动代码
Cache cache.CacheConf // 手动代码
}
```
增加了mysql和redis cache配置
* 修改`rpc/add/internal/svc/servicecontext.go`和`rpc/check/internal/svc/servicecontext.go`,如下:
```go
type ServiceContext struct {
c config.Config
Model *model.BookModel // 手动代码
}
func NewServiceContext(c config.Config) *ServiceContext {
return &ServiceContext{
c: c,
Model: model.NewBookModel(sqlx.NewMysql(c.DataSource), c.Cache, c.Table), // 手动代码
}
}
```
* 修改`rpc/add/internal/logic/addlogic.go`,如下:
```go
func (l *AddLogic) Add(in *add.AddReq) (*add.AddResp, error) {
// 手动代码开始
_, err := l.svcCtx.Model.Insert(model.Book{
Book: in.Book,
Price: in.Price,
})
if err != nil {
return nil, err
}
return &add.AddResp{
Ok: true,
}, nil
// 手动代码结束
}
```
* 修改`rpc/check/internal/logic/checklogic.go`,如下:
```go
func (l *CheckLogic) Check(in *check.CheckReq) (*check.CheckResp, error) {
// 手动代码开始
resp, err := l.svcCtx.Model.FindOne(in.Book)
if err != nil {
return nil, err
}
return &check.CheckResp{
Found: true,
Price: resp.Price,
}, nil
// 手动代码结束
}
```
至此代码修改完成,凡是手动修改的代码我加了标注
## 11. 完整调用演示
* add api调用
```shell
curl -i "http://localhost:8888/add?book=go-zero&price=10"
```
返回如下:
```http
HTTP/1.1 200 OK
Content-Type: application/json
Date: Thu, 03 Sep 2020 09:42:13 GMT
Content-Length: 11
{"ok":true}
```
* check api调用
```shell
curl -i "http://localhost:8888/check?book=go-zero"
```
返回如下:
```http
HTTP/1.1 200 OK
Content-Type: application/json
Date: Thu, 03 Sep 2020 09:47:34 GMT
Content-Length: 25
{"found":true,"price":10}
```
## 12. Benchmark
因为写入依赖于mysql的写入速度就相当于压mysql了所以压测只测试了check接口相当于从mysql里读取并利用缓存为了方便直接压这一本书因为有缓存多本书也是一样的对压测结果没有影响。
压测之前,让我们先把打开文件句柄数调大:
```shel
ulimit -n 20000
```
并日志的等级改为`error`防止过多的info影响压测结果在每个yaml配置文件里加上如下
```yaml
Log:
Level: error
```
![Benchmark](images/bookstore-benchmark.png)
可以看出在我的MacBook Pro上能达到3万+的qps。
## 13. 完整代码
[https://github.com/tal-tech/go-zero/tree/master/example/bookstore](https://github.com/tal-tech/go-zero/tree/master/example/bookstore)
## 14. 总结
我们一直强调**工具大于约定和文档**。
go-zero不只是一个框架更是一个建立在框架+工具基础上的,简化和规范了整个微服务构建的技术体系。
我们在保持简单的同时也尽可能把微服务治理的复杂度封装到了框架内部,极大的降低了开发人员的心智负担,使得业务开发得以快速推进。
通过go-zero+goctl生成的代码包含了微服务治理的各种组件包括并发控制、自适应熔断、自适应降载、自动缓存控制等可以轻松部署以承载巨大访问量。
有任何好的提升工程效率的想法,随时欢迎交流!👏

View File

@@ -1,5 +0,0 @@
# 熔断机制设计
## 设计目的
* 依赖的服务出现大规模故障时,调用方应该尽可能少调用,降低故障服务的压力,使之尽快恢复服务

View File

@@ -1,111 +0,0 @@
# 通过 collection.Cache 进行缓存
go-zero微服务框架中提供了许多开箱即用的工具好的工具不仅能提升服务的性能而且还能提升代码的鲁棒性避免出错实现代码风格的统一方便他人阅读等等本系列文章将分别介绍go-zero框架中工具的使用及其实现原理
## 进程内缓存工具[collection.Cache](https://github.com/tal-tech/go-zero/tree/master/core/collection/cache.go)
在做服务器开发的时候相信都会遇到使用缓存的情况go-zero 提供的简单的缓存封装 **collection.Cache**,简单使用方式如下
```go
// 初始化 cache其中 WithLimit 可以指定最大缓存的数量
c, err := collection.NewCache(time.Minute, collection.WithLimit(10000))
if err != nil {
panic(err)
}
// 设置缓存
c.Set("key", user)
// 获取缓存ok是否存在
v, ok := c.Get("key")
// 删除缓存
c.Del("key")
// 获取缓存,如果 key 不存在的,则会调用 func 去生成缓存
v, err := c.Take("key", func() (interface{}, error) {
return user, nil
})
```
cache 实现的建的功能包括
* 缓存自动失效,可以指定过期时间
* 缓存大小限制,可以指定缓存个数
* 缓存增删改
* 缓存命中率统计
* 并发安全
* 缓存击穿
实现原理:
Cache 自动失效,是采用 TimingWheel(https://github.com/tal-tech/go-zero/blob/master/core/collection/timingwheel.go) 进行管理的
``` go
timingWheel, err := NewTimingWheel(time.Second, slots, func(k, v interface{}) {
key, ok := k.(string)
if !ok {
return
}
cache.Del(key)
})
```
Cache 大小限制,是采用 LRU 淘汰策略,在新增缓存的时候会去检查是否已经超出过限制,具体代码在 keyLru 中实现
``` go
func (klru *keyLru) add(key string) {
if elem, ok := klru.elements[key]; ok {
klru.evicts.MoveToFront(elem)
return
}
// Add new item
elem := klru.evicts.PushFront(key)
klru.elements[key] = elem
// Verify size not exceeded
if klru.evicts.Len() > klru.limit {
klru.removeOldest()
}
}
```
Cache 的命中率统计,是在代码中实现 cacheStat,在缓存命中丢失的时候自动统计,并且会定时打印使用的命中率, qps 等状态.
打印的具体效果如下
```go
cache(proc) - qpm: 2, hit_ratio: 50.0%, elements: 0, hit: 1, miss: 1
```
缓存击穿包含是使用 syncx.SharedCalls(https://github.com/tal-tech/go-zero/blob/master/core/syncx/sharedcalls.go) 进行实现的,就是将同时请求同一个 key 的请求, 关于 sharedcalls 后续会继续补充。 相关具体实现是在:
```go
func (c *Cache) Take(key string, fetch func() (interface{}, error)) (interface{}, error) {
val, fresh, err := c.barrier.DoEx(key, func() (interface{}, error) {
v, e := fetch()
if e != nil {
return nil, e
}
c.Set(key, v)
return v, nil
})
if err != nil {
return nil, err
}
if fresh {
c.stats.IncrementMiss()
return val, nil
} else {
// got the result from previous ongoing query
c.stats.IncrementHit()
}
return val, nil
}
```
本文主要介绍了go-zero框架中的 Cache 工具,在实际的项目中非常实用。用好工具对于提升服务性能和开发效率都有很大的帮助,希望本篇文章能给大家带来一些收获。

View File

@@ -1,272 +0,0 @@
# Goctl Model
goctl model 为go-zero下的工具模块中的组件之一目前支持识别mysql ddl进行model层代码生成通过命令行或者idea插件即将支持可以有选择地生成带redis cache或者不带redis cache的代码逻辑。
## 快速开始
* 通过ddl生成
```shell script
goctl model mysql ddl -src="./sql/user.sql" -dir="./sql/model" -c=true
```
执行上述命令后即可快速生成CURD代码。
```Plain Text
model
│   ├── error.go
│   └── usermodel.go
```
* 通过datasource生成
```shell script
goctl model mysql datasource -url="user:password@tcp(127.0.0.1:3306)/database" -table="table1,table2" -dir="./model"
```
* 生成代码示例
``` go
package model
import (
"database/sql"
"fmt"
"strings"
"time"
"github.com/tal-tech/go-zero/core/stores/cache"
"github.com/tal-tech/go-zero/core/stores/sqlc"
"github.com/tal-tech/go-zero/core/stores/sqlx"
"github.com/tal-tech/go-zero/core/stringx"
"github.com/tal-tech/go-zero/tools/goctl/model/sql/builderx"
)
var (
userFieldNames = builderx.FieldNames(&User{})
userRows = strings.Join(userFieldNames, ",")
userRowsExpectAutoSet = strings.Join(stringx.Remove(userFieldNames, "id", "create_time", "update_time"), ",")
userRowsWithPlaceHolder = strings.Join(stringx.Remove(userFieldNames, "id", "create_time", "update_time"), "=?,") + "=?"
cacheUserMobilePrefix = "cache#User#mobile#"
cacheUserIdPrefix = "cache#User#id#"
cacheUserNamePrefix = "cache#User#name#"
)
type (
UserModel struct {
sqlc.CachedConn
table string
}
User struct {
Id int64 `db:"id"`
Name string `db:"name"` // 用户名称
Password string `db:"password"` // 用户密码
Mobile string `db:"mobile"` // 手机号
Gender string `db:"gender"` // 男|女|未公开
Nickname string `db:"nickname"` // 用户昵称
CreateTime time.Time `db:"create_time"`
UpdateTime time.Time `db:"update_time"`
}
)
func NewUserModel(conn sqlx.SqlConn, c cache.CacheConf, table string) *UserModel {
return &UserModel{
CachedConn: sqlc.NewConn(conn, c),
table: table,
}
}
func (m *UserModel) Insert(data User) (sql.Result, error) {
query := `insert into ` + m.table + `(` + userRowsExpectAutoSet + `) value (?, ?, ?, ?, ?)`
return m.ExecNoCache(query, data.Name, data.Password, data.Mobile, data.Gender, data.Nickname)
}
func (m *UserModel) FindOne(id int64) (*User, error) {
userIdKey := fmt.Sprintf("%s%v", cacheUserIdPrefix, id)
var resp User
err := m.QueryRow(&resp, userIdKey, func(conn sqlx.SqlConn, v interface{}) error {
query := `select ` + userRows + ` from ` + m.table + ` where id = ? limit 1`
return conn.QueryRow(v, query, id)
})
switch err {
case nil:
return &resp, nil
case sqlc.ErrNotFound:
return nil, ErrNotFound
default:
return nil, err
}
}
func (m *UserModel) FindOneByName(name string) (*User, error) {
userNameKey := fmt.Sprintf("%s%v", cacheUserNamePrefix, name)
var resp User
err := m.QueryRowIndex(&resp, userNameKey, func(primary interface{}) string {
return fmt.Sprintf("%s%v", cacheUserIdPrefix, primary)
}, func(conn sqlx.SqlConn, v interface{}) (i interface{}, e error) {
query := `select ` + userRows + ` from ` + m.table + ` where name = ? limit 1`
if err := conn.QueryRow(&resp, query, name); err != nil {
return nil, err
}
return resp.Id, nil
}, func(conn sqlx.SqlConn, v, primary interface{}) error {
query := `select ` + userRows + ` from ` + m.table + ` where id = ? limit 1`
return conn.QueryRow(v, query, primary)
})
switch err {
case nil:
return &resp, nil
case sqlc.ErrNotFound:
return nil, ErrNotFound
default:
return nil, err
}
}
func (m *UserModel) FindOneByMobile(mobile string) (*User, error) {
userMobileKey := fmt.Sprintf("%s%v", cacheUserMobilePrefix, mobile)
var resp User
err := m.QueryRowIndex(&resp, userMobileKey, func(primary interface{}) string {
return fmt.Sprintf("%s%v", cacheUserIdPrefix, primary)
}, func(conn sqlx.SqlConn, v interface{}) (i interface{}, e error) {
query := `select ` + userRows + ` from ` + m.table + ` where mobile = ? limit 1`
if err := conn.QueryRow(&resp, query, mobile); err != nil {
return nil, err
}
return resp.Id, nil
}, func(conn sqlx.SqlConn, v, primary interface{}) error {
query := `select ` + userRows + ` from ` + m.table + ` where id = ? limit 1`
return conn.QueryRow(v, query, primary)
})
switch err {
case nil:
return &resp, nil
case sqlc.ErrNotFound:
return nil, ErrNotFound
default:
return nil, err
}
}
func (m *UserModel) Update(data User) error {
userIdKey := fmt.Sprintf("%s%v", cacheUserIdPrefix, data.Id)
_, err := m.Exec(func(conn sqlx.SqlConn) (result sql.Result, err error) {
query := `update ` + m.table + ` set ` + userRowsWithPlaceHolder + ` where id = ?`
return conn.Exec(query, data.Name, data.Password, data.Mobile, data.Gender, data.Nickname, data.Id)
}, userIdKey)
return err
}
func (m *UserModel) Delete(id int64) error {
data, err := m.FindOne(id)
if err != nil {
return err
}
userIdKey := fmt.Sprintf("%s%v", cacheUserIdPrefix, id)
userNameKey := fmt.Sprintf("%s%v", cacheUserNamePrefix, data.Name)
userMobileKey := fmt.Sprintf("%s%v", cacheUserMobilePrefix, data.Mobile)
_, err = m.Exec(func(conn sqlx.SqlConn) (result sql.Result, err error) {
query := `delete from ` + m.table + ` where id = ?`
return conn.Exec(query, id)
}, userIdKey, userNameKey, userMobileKey)
return err
}
```
### 用法
```Plain Text
goctl model mysql -h
```
```Plain Text
NAME:
goctl model mysql - generate mysql model"
USAGE:
goctl model mysql command [command options] [arguments...]
COMMANDS:
ddl generate mysql model from ddl"
datasource generate model from datasource"
OPTIONS:
--help, -h show help
```
## 生成规则
* 默认规则
我们默认用户在建表时会创建createTime、updateTime字段(忽略大小写、下划线命名风格)且默认值均为`CURRENT_TIMESTAMP`而updateTime支持`ON UPDATE CURRENT_TIMESTAMP`,对于这两个字段生成`insert`、`update`时会被移除,不在赋值范畴内,当然,如果你不需要这两个字段那也无大碍。
* 带缓存模式
* ddl
```shell script
goctl model mysql -src={filename} -dir={dir} -cache=true
```
* datasource
```shell script
goctl model mysql datasource -url={datasource} -table={tables} -dir={dir} -cache=true
```
目前仅支持redis缓存如果选择带缓存模式即生成的`FindOne(ByXxx)`&`Delete`代码会生成带缓存逻辑的代码目前仅支持单索引字段除全文索引外对于联合索引我们默认认为不需要带缓存且不属于通用型代码因此没有放在代码生成行列如example中user表中的`id`、`name`、`mobile`字段均属于单字段索引。
* 不带缓存模式
* ddl
```shell script
goctl model -src={filename} -dir={dir}
```
* datasource
```shell script
goctl model mysql datasource -url={datasource} -table={tables} -dir={dir}
```
or
* ddl
```shell script
goctl model -src={filename} -dir={dir} -cache=false
```
* datasource
```shell script
goctl model mysql datasource -url={datasource} -table={tables} -dir={dir} -cache=false
```
生成代码仅基本的CURD结构。
## 缓存
对于缓存这一块我选择用一问一答的形式进行罗列。我想这样能够更清晰的描述model中缓存的功能。
* 缓存会缓存哪些信息?
对于主键字段缓存,会缓存整个结构体信息,而对于单索引字段(除全文索引)则缓存主键字段值。
* 数据有更新(`update`)操作会清空缓存吗?
但仅清空主键缓存的信息why这里就不做详细赘述了。
* 为什么不按照单索引字段生成`updateByXxx`和`deleteByXxx`的代码?
理论上是没任何问题但是我们认为对于model层的数据操作均是以整个结构体为单位包括查询我不建议只查询某部分字段不反对否则我们的缓存就没有意义了。
* 为什么不支持`findPageLimit`、`findAll`这么模式代码生层?
目前我认为除了基本的CURD外其他的代码均属于<i>业务型</i>代码,这个我觉得开发人员根据业务需要进行编写更好。
## QA
* goctl model除了命令行模式支持插件模式吗
很快支持idea插件。

View File

@@ -1,238 +0,0 @@
# Rpc Generation
Goctl Rpc是`goctl`脚手架下的一个rpc服务代码生成模块支持proto模板生成和rpc服务代码生成通过此工具生成代码你只需要关注业务逻辑编写而不用去编写一些重复性的代码。这使得我们把精力重心放在业务上从而加快了开发效率且降低了代码出错率。
## 特性
* 简单易用
* 快速提升开发效率
* 出错率低
## 快速开始
### 方式一快速生成greet服务
通过命令 `goctl rpc new ${servieName}`生成
如生成greet rpc服务
```shell script
goctl rpc new greet
```
执行后代码结构如下:
```golang
└── greet
├── etc
│   └── greet.yaml
├── go.mod
├── go.sum
├── greet
│   ├── greet.go
│   ├── greet_mock.go
│   └── types.go
├── greet.go
├── greet.proto
├── internal
│   ├── config
│   │   └── config.go
│   ├── logic
│   │   └── pinglogic.go
│   ├── server
│   │   └── greetserver.go
│   └── svc
│   └── servicecontext.go
└── pb
└── greet.pb.go
```
rpc一键生成常见问题解决见 <a href="#常见问题解决">常见问题解决</a>
### 方式二通过指定proto生成rpc服务
* 生成proto模板
```shell script
goctl rpc template -o=user.proto
```
```golang
syntax = "proto3";
package remote;
message Request {
// 用户名
string username = 1;
// 用户密码
string password = 2;
}
message Response {
// 用户名称
string name = 1;
// 用户性别
string gender = 2;
}
service User {
// 登录
rpc Login(Request)returns(Response);
}
```
* 生成rpc服务代码
```shell script
goctl rpc proto -src=user.proto
```
代码tree
```Plain Text
user
├── etc
│   └── user.json
├── internal
│   ├── config
│   │   └── config.go
│   ├── handler
│   │   ├── loginhandler.go
│   ├── logic
│   │   └── loginlogic.go
│   └── svc
│   └── servicecontext.go
├── pb
│   └── user.pb.go
├── shared
│   ├── mockusermodel.go
│   ├── types.go
│   └── usermodel.go
├── user.go
└── user.proto
```
## 准备工作
* 安装了go环境
* 安装了protoc&protoc-gen-go并且已经设置环境变量
* mockgen(可选,将移除)
* 更多问题请见 <a href="#注意事项">注意事项</a>
## 用法
### rpc服务生成用法
```shell script
goctl rpc proto -h
```
```shell script
NAME:
goctl rpc proto - generate rpc from proto
USAGE:
goctl rpc proto [command options] [arguments...]
OPTIONS:
--src value, -s value the file path of the proto source file
--dir value, -d value the target path of the code,default path is "${pwd}". [option]
--service value, --srv value the name of rpc service. [option]
--shared[已废弃] value the dir of the shared file,default path is "${pwd}/shared. [option]"
--idea whether the command execution environment is from idea plugin. [option]
```
### 参数说明
* --src 必填proto数据源目前暂时支持单个proto文件生成这里不支持不建议外部依赖
* --dir 非必填默认为proto文件所在目录生成代码的目标目录
* --service 服务名称非必填默认为proto文件所在目录名称但是如果proto所在目录为一下结构
```shell script
user
├── cmd
│   └── rpc
│   └── user.proto
```
则服务名称亦为user而非proto所在文件夹名称了这里推荐使用这种结构可以方便在同一个服务名下建立不同类型的服务(api、rpc、mq等),便于代码管理与维护。
* --shared[⚠️已废弃] 非必填,默认为$dir(xxx.proto)/sharedrpc client逻辑代码存放目录。
> 注意这里的shared文件夹名称将会是代码中的package名称。
* --idea 非必填是否为idea插件中执行保留字段终端执行可以忽略
## 开发人员需要做什么
关注业务代码编写将重复性、与业务无关的工作交给goctl生成好rpc服务代码后开饭人员仅需要修改
* 服务中的配置文件编写(etc/xx.json、internal/config/config.go)
* 服务中业务逻辑编写(internal/logic/xxlogic.go)
* 服务中资源上下文的编写(internal/svc/servicecontext.go)
## 扩展
对于需要进行rpc mock的开发人员在安装了`mockgen`工具的前提下可以在rpc的shared文件中生成好对应的mock文件。
## 注意事项
* `google.golang.org/grpc`需要降级到v1.26.0,且protoc-gen-go版本不能高于v1.3.2see [https://github.com/grpc/grpc-go/issues/3347](https://github.com/grpc/grpc-go/issues/3347))即
```shell script
replace google.golang.org/grpc => google.golang.org/grpc v1.26.0
```
* proto不支持暂多文件同时生成
* proto不支持外部依赖包引入message不支持inline
* 目前main文件、shared文件、handler文件会被强制覆盖而和开发人员手动需要编写的则不会覆盖生成这一类在代码头部均有
```shell script
// Code generated by goctl. DO NOT EDIT!
// Source: xxx.proto
```
的标识,请注意不要将也写业务性代码写在里面。
## 常见问题解决(go mod工程)
* 错误一:
```golang
pb/xx.pb.go:220:7: undefined: grpc.ClientConnInterface
pb/xx.pb.go:224:11: undefined: grpc.SupportPackageIsVersion6
pb/xx.pb.go:234:5: undefined: grpc.ClientConnInterface
pb/xx.pb.go:237:24: undefined: grpc.ClientConnInterface
```
解决方法:请将`protoc-gen-go`版本降至v1.3.2及一下
* 错误二:
```golang
# go.etcd.io/etcd/clientv3/balancer/picker
../../../go/pkg/mod/go.etcd.io/etcd@v0.0.0-20200402134248-51bdeb39e698/clientv3/balancer/picker/err.go:25:9: cannot use &errPicker literal (type *errPicker) as type Picker in return argument:*errPicker does not implement Picker (wrong type for Pick method)
have Pick(context.Context, balancer.PickInfo) (balancer.SubConn, func(balancer.DoneInfo), error)
want Pick(balancer.PickInfo) (balancer.PickResult, error)
../../../go/pkg/mod/go.etcd.io/etcd@v0.0.0-20200402134248-51bdeb39e698/clientv3/balancer/picker/roundrobin_balanced.go:33:9: cannot use &rrBalanced literal (type *rrBalanced) as type Picker in return argument:
*rrBalanced does not implement Picker (wrong type for Pick method)
have Pick(context.Context, balancer.PickInfo) (balancer.SubConn, func(balancer.DoneInfo), error)
want Pick(balancer.PickInfo) (balancer.PickResult, error)
#github.com/tal-tech/go-zero/zrpc/internal/balancer/p2c
../../../go/pkg/mod/github.com/tal-tech/go-zero@v1.0.12/zrpc/internal/balancer/p2c/p2c.go:41:32: not enough arguments in call to base.NewBalancerBuilder
have (string, *p2cPickerBuilder)
want (string, base.PickerBuilder, base.Config)
../../../go/pkg/mod/github.com/tal-tech/go-zero@v1.0.12/zrpc/internal/balancer/p2c/p2c.go:58:9: cannot use &p2cPicker literal (type *p2cPicker) as type balancer.Picker in return argument:
*p2cPicker does not implement balancer.Picker (wrong type for Pick method)
have Pick(context.Context, balancer.PickInfo) (balancer.SubConn, func(balancer.DoneInfo), error)
want Pick(balancer.PickInfo) (balancer.PickResult, error)
```
解决方法:
```golang
replace google.golang.org/grpc => google.golang.org/grpc v1.26.0
```

View File

@@ -1,299 +0,0 @@
# goctl使用
## goctl用途
* 定义api请求
* 根据定义的api自动生成golang(后端), java(iOS & Android), typescript(web & 晓程序)dart(flutter)
* 生成MySQL CURD+Cache
* 生成MongoDB CURD+Cache
## goctl使用说明
### 快速生成服务
* api: goctl api new xxxx
* rpc: goctl rpc new xxxx
#### goctl参数说明
`goctl api [go/java/ts] [-api user/user.api] [-dir ./src]`
> api 后面接生成的语言现支持go/java/typescript
>
> -api 自定义api所在路径
>
> -dir 自定义生成目录
#### 保持goctl总是最新版
第一次运行会在~/.goctl里增加下面两行
```Plain Text
url = http://47.97.184.41:7777/
```
#### API 语法说明
``` golang
info(
title: doc title
desc: >
doc description first part,
doc description second part<
version: 1.0
)
type int userType
type user struct {
name string `json:"user"` // 用户姓名
}
type student struct {
name string `json:"name"` // 学生姓名
}
type teacher struct {
}
type (
address struct {
city string `json:"city"`
}
innerType struct {
image string `json:"image"`
}
createRequest struct {
innerType
name string `form:"name"`
age int `form:"age,optional"`
address []address `json:"address,optional"`
}
getRequest struct {
name string `path:"name"`
age int `form:"age,optional"`
}
getResponse struct {
code int `json:"code"`
desc string `json:"desc,omitempty"`
address address `json:"address"`
service int `json:"service"`
}
)
service user-api {
@doc(
summary: user title
desc: >
user description first part,
user description second part,
user description second line
)
@server(
handler: GetUserHandler
group: user
)
get /api/user/:name(getRequest) returns(getResponse)
@server(
handler: CreateUserHandler
group: user
)
post /api/users/create(createRequest)
}
@server(
jwt: Auth
group: profile
)
service user-api {
@doc(summary: user title)
@server(
handler: GetProfileHandler
)
get /api/profile/:name(getRequest) returns(getResponse)
@server(
handler: CreateProfileHandler
)
post /api/profile/create(createRequest)
}
service user-api {
@doc(summary: desc in one line)
@server(
handler: PingHandler
)
head /api/ping()
}
```
1. info部分描述了api基本信息比如Authapi是哪个用途。
2. type部分type类型声明和golang语法兼容。
3. service部分service代表一组服务一个服务可以由多组名称相同的service组成可以针对每一组service配置jwt和auth认证另外通过group属性可以指定service生成所在子目录。
service里面包含api路由比如上面第一组service的第一个路由doc用来描述此路由的用途GetProfileHandler表示处理这个路由的handler
`get /api/profile/:name(getRequest) returns(getResponse)` 中get代表api的请求方式get/post/put/delete, `/api/profile/:name` 描述了路由path`:name`通过
请求getRequest里面的属性赋值getResponse为返回的结构体这两个类型都定义在2描述的类型中。
#### api vscode插件
开发者可以在vscode中搜索goctl的api插件它提供了api语法高亮语法检测和格式化相关功能。
1. 支持语法高亮和类型导航。
2. 语法检测格式化api会自动检测api编写错误地方用vscode默认的格式化快捷键(option+command+F)或者自定义的也可以。
3. 格式化(option+command+F),类似代码格式化,统一样式支持。
#### 根据定义好的api文件生成golang代码
命令如下:
`goctl api go -api user/user.api -dir user`
```Plain Text
.
├── internal
│   ├── config
│   │   └── config.go
│   ├── handler
│   │   ├── pinghandler.go
│   │   ├── profile
│   │   │   ├── createprofilehandler.go
│   │   │   └── getprofilehandler.go
│   │   ├── routes.go
│   │   └── user
│   │   ├── createuserhandler.go
│   │   └── getuserhandler.go
│   ├── logic
│   │   ├── pinglogic.go
│   │   ├── profile
│   │   │   ├── createprofilelogic.go
│   │   │   └── getprofilelogic.go
│   │   └── user
│   │   ├── createuserlogic.go
│   │   └── getuserlogic.go
│   ├── svc
│   │   └── servicecontext.go
│   └── types
│   └── types.go
└── user.go
```
生成的代码可以直接跑,有几个地方需要改:
* 在`servicecontext.go`里面增加需要传递给logic的一些资源比如mysql, redisrpc等
* 在定义的get/post/put/delete等请求的handler和logic里增加处理业务逻辑的代码
#### 根据定义好的api文件生成java代码
```shell
goctl api java -api user/user.api -dir ./src
```
#### 根据定义好的api文件生成typescript代码
```shell
goctl api ts -api user/user.api -dir ./src -webapi ***
ts需要指定webapi所在目录
```
#### 根据定义好的api文件生成Dart代码
```shell
goctl api dart -api user/user.api -dir ./src
```
## 根据mysql ddl或者datasource生成model文件
```shell script
goctl model mysql -src={filename} -dir={dir} -cache={true|false}
```
详情参考[model文档](https://github.com/tal-tech/go-zero/blob/master/tools/goctl/model/sql/README.MD)
## 根据定义好的简单go文件生成mongo代码文件(仅限golang使用)
```shell
goctl model mongo -src {{yourDir}}/xiao/service/xhb/user/model/usermodel.go -cache yes
```
* src需要提供简单的usermodel.go文件里面只需要提供一个结构体即可
* cache 控制是否需要缓存 yes=需要 no=不需要
src 示例代码如下
```go
package model
type User struct {
Name string `o:"find,get,set" c:"姓名"`
Age int `o:"find,get,set" c:"年纪"`
School string `c:"学校"`
}
```
结构体中不需要提供Id,CreateTime,UpdateTime三个字段会自动生成
结构体中每个tag有两个可选标签 c 和 o
c 是该字段的注释
o 是该字段需要生产的操作函数 可以取得get,find,set 分别表示生成返回单个对象的查询方法,返回多个对象的查询方法,设置该字段方法
生成的目标文件会覆盖该简单go文件
## goctl rpc生成业务剥离中暂未开放
命令 `goctl rpc proto -proto ${proto} -service ${serviceName} -project ${projectName} -dir ${directory} -shared ${shared}`
如: `goctl rpc proto -proto test.proto -service test -project xjy -dir .`
参数说明:
* ${proto}: proto文件
* ${serviceName}: rpc服务名称
* ${projectName}: 所属项目如xjy,xhb,crm,hera具体查看help主要为了根据不同项目服务往redis注册key可选
* ${directory}: 输出目录
* ${shared}: shared文件生成目录可选默认为${pwd}/shared
生成目录结构示例:
```Plain Text
.
├── shared [示例目录,可自己指定,强制覆盖更新]
│   └── contentservicemodel.go
├── test
│   ├── etc
│   │   └── test.json
│   ├── internal
│   │   ├── config
│   │   │   └── config.go
│   │   ├── handler [强制覆盖更新]
│   │   │   ├── changeavatarhandler.go
│   │   │   ├── changebirthdayhandler.go
│   │   │   ├── changenamehandler.go
│   │   │   ├── changepasswordhandler.go
│   │   │   ├── changeuserinfohandler.go
│   │   │   ├── getuserinfohandler.go
│   │   │   ├── loginhandler.go
│   │   │   ├── logouthandler.go
│   │   │   └── testhandler.go
│   │   ├── logic
│   │   │   ├── changeavatarlogic.go
│   │   │   ├── changebirthdaylogic.go
│   │   │   ├── changenamelogic.go
│   │   │   ├── changepasswordlogic.go
│   │   │   ├── changeuserinfologic.go
│   │   │   ├── getuserinfologic.go
│   │   │   ├── loginlogic.go
│   │   │   └── logoutlogic.go
│   │   └── svc
│   │   └── servicecontext.go
│   ├── pb
│   │   └── test.pb.go
│   └── test.go [强制覆盖更新]
└── test.proto
```
注意 目前rpc目录生成的proto文件暂不支持import外部proto文件

Binary file not shown.

Before

Width:  |  Height:  |  Size: 96 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 286 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 333 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 26 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 104 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 159 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 112 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 77 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 92 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 72 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 86 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 62 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 104 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 457 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 109 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 95 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 121 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 99 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 170 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 150 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 83 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 158 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 104 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 126 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 111 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 77 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 92 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 91 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 90 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 141 KiB

View File

@@ -1,140 +0,0 @@
# 基于go-zero实现JWT认证
关于JWT是什么大家可以看看[官网](https://jwt.io/),一句话介绍下:是可以实现服务器无状态的鉴权认证方案,也是目前最流行的跨域认证解决方案。
要实现JWT认证我们需要分成如下两个步骤
* 客户端获取JWT token。
* 服务器对客户端带来的JWT token认证。
## 1. 客户端获取JWT Token
我们定义一个协议供客户端调用获取JWT token我们新建一个目录jwt然后在目录中执行 `goctl api -o jwt.api`将生成的jwt.api改成如下
````go
type JwtTokenRequest struct {
}
type JwtTokenResponse struct {
AccessToken string `json:"access_token"`
AccessExpire int64 `json:"access_expire"`
RefreshAfter int64 `json:"refresh_after"` // 建议客户端刷新token的绝对时间
}
type GetUserRequest struct {
UserId string `json:"userId"`
}
type GetUserResponse struct {
Name string `json:"name"`
}
service jwt-api {
@server(
handler: JwtHandler
)
post /user/token(JwtTokenRequest) returns (JwtTokenResponse)
}
@server(
jwt: JwtAuth
)
service jwt-api {
@server(
handler: GetUserHandler
)
post /user/info(GetUserRequest) returns (GetUserResponse)
}
````
在服务jwt目录中执行`goctl api go -api jwt.api -dir .`
打开jwtlogic.go文件修改 `func (l *JwtLogic) Jwt(req types.JwtTokenRequest) (*types.JwtTokenResponse, error) {` 方法如下:
```go
func (l *JwtLogic) Jwt(req types.JwtTokenRequest) (*types.JwtTokenResponse, error) {
var accessExpire = l.svcCtx.Config.JwtAuth.AccessExpire
now := time.Now().Unix()
accessToken, err := l.GenToken(now, l.svcCtx.Config.JwtAuth.AccessSecret, nil, accessExpire)
if err != nil {
return nil, err
}
return &types.JwtTokenResponse{
AccessToken: accessToken,
AccessExpire: now + accessExpire,
RefreshAfter: now + accessExpire/2,
}, nil
}
func (l *JwtLogic) GenToken(iat int64, secretKey string, payloads map[string]interface{}, seconds int64) (string, error) {
claims := make(jwt.MapClaims)
claims["exp"] = iat + seconds
claims["iat"] = iat
for k, v := range payloads {
claims[k] = v
}
token := jwt.New(jwt.SigningMethodHS256)
token.Claims = claims
return token.SignedString([]byte(secretKey))
}
```
在启动服务之前我们需要修改etc/jwt-api.yaml文件如下
```yaml
Name: jwt-api
Host: 0.0.0.0
Port: 8888
JwtAuth:
AccessSecret: xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
AccessExpire: 604800
```
启动服务器然后测试下获取到的token。
```sh
➜ curl --location --request POST '127.0.0.1:8888/user/token'
{"access_token":"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE2MDEyNjE0MjksImlhdCI6MTYwMDY1NjYyOX0.6u_hpE_4m5gcI90taJLZtvfekwUmjrbNJ-5saaDGeQc","access_expire":1601261429,"refresh_after":1600959029}
```
## 2. 服务器验证JWT token
1. 在api文件中通过`jwt: JwtAuth`标记的service表示激活了jwt认证。
2. 可以阅读rest/handler/authhandler.go文件了解服务器jwt实现。
3. 修改getuserlogic.go如下
```go
func (l *GetUserLogic) GetUser(req types.GetUserRequest) (*types.GetUserResponse, error) {
return &types.GetUserResponse{Name: "kim"}, nil
}
```
* 我们先不带JWT Authorization header请求头测试下返回http status code是401符合预期。
```sh
➜ curl -w "\nhttp: %{http_code} \n" --location --request POST '127.0.0.1:8888/user/info' \
--header 'Content-Type: application/json' \
--data-raw '{
"userId": "a"
}'
http: 401
```
* 加上Authorization header请求头测试。
```sh
➜ curl -w "\nhttp: %{http_code} \n" --location --request POST '127.0.0.1:8888/user/info' \
--header 'Authorization: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE2MDEyNjE0MjksImlhdCI6MTYwMDY1NjYyOX0.6u_hpE_4m5gcI90taJLZtvfekwUmjrbNJ-5saaDGeQc' \
--header 'Content-Type: application/json' \
--data-raw '{
"userId": "a"
}'
{"name":"kim"}
http: 200
```
综上所述基于go-zero的JWT认证完成在真实生产环境部署时候AccessSecret, AccessExpire, RefreshAfter根据业务场景通过配置文件配置RefreshAfter 是告诉客户端什么时候该刷新JWT token了一般都需要设置过期时间前几天。

View File

@@ -1,86 +0,0 @@
# 高效的关键词替换和敏感词过滤工具
## 1. 算法介绍
利用高效的Trie树建立关键词树如下图所示然后依次查找字符串中的相连字符是否形成树的一条路径
<img src="images/trie.png" alt="trie" width="350" />
发现掘金上[这篇文章](https://juejin.im/post/6844903750490914829)写的比较详细,可以一读,具体原理在此不详述。
## 2. 关键词替换
支持关键词重叠,自动选用最长的关键词,代码示例如下:
```go
replacer := stringx.NewReplacer(map[string]string{
"日本": "法国",
"日本的首都": "东京",
"东京": "日本的首都",
})
fmt.Println(replacer.Replace("日本的首都是东京"))
```
可以得到:
```Plain Text
东京是日本的首都
```
示例代码见`example/stringx/replace/replace.go`
## 3. 查找敏感词
代码示例如下:
```go
filter := stringx.NewTrie([]string{
"AV演员",
"苍井空",
"AV",
"日本AV女优",
"AV演员色情",
})
keywords := filter.FindKeywords("日本AV演员兼电视、电影演员。苍井空AV女优是xx出道, 日本AV女优们最精彩的表演是AV演员色情表演")
fmt.Println(keywords)
```
可以得到:
```Plain Text
[苍井空 日本AV女优 AV演员色情 AV AV演员]
```
## 4. 敏感词过滤
代码示例如下:
```go
filter := stringx.NewTrie([]string{
"AV演员",
"苍井空",
"AV",
"日本AV女优",
"AV演员色情",
}, stringx.WithMask('?')) // 默认替换为*
safe, keywords, found := filter.Filter("日本AV演员兼电视、电影演员。苍井空AV女优是xx出道, 日本AV女优们最精彩的表演是AV演员色情表演")
fmt.Println(safe)
fmt.Println(keywords)
fmt.Println(found)
```
可以得到:
```Plain Text
日本????兼电视、电影演员。?????女优是xx出道, ??????们最精彩的表演是??????表演
[苍井空 日本AV女优 AV演员色情 AV AV演员]
true
```
示例代码见`example/stringx/filter/filter.go`
## 5. Benchmark
| Sentences | Keywords | Regex | go-zero |
| --------- | -------- | -------- | ------- |
| 10000 | 10000 | 16min10s | 27.2ms |

View File

@@ -1,51 +0,0 @@
# 服务自适应降载保护设计
## 设计目的
* 保证系统不被过量请求拖垮
* 在保证系统稳定的前提下,尽可能提供更高的吞吐量
## 设计考虑因素
* 如何衡量系统负载
* 是否处于虚机或容器内需要读取cgroup相关负载
* 用1000m表示100%CPU推荐使用800m表示系统高负载
* 尽可能小的Overhead不显著增加RT
* 不考虑服务本身所依赖的DB或者缓存系统问题这类问题通过熔断机制来解决
## 机制设计
* 计算CPU负载时使用滑动平均来降低CPU负载抖动带来的不稳定关于滑动平均见参考资料
* 滑动平均就是取之前连续N次值的近似平均N取值可以通过超参beta来决定
* 当CPU负载大于指定值时触发降载保护机制
* 时间窗口机制用滑动窗口机制来记录之前时间窗口内的QPS和RT(response time)
* 滑动窗口使用5秒钟50个桶的方式每个桶保存100ms时间内的请求循环利用最新的覆盖最老的
* 计算maxQPS和minRT时需要过滤掉最新的时间没有用完的桶防止此桶内只有极少数请求并且RT处于低概率的极小值所以计算maxQPS和minRT时按照上面的50个桶的参数只会算49个
* 满足以下所有条件则拒绝该请求
1. 当前CPU负载超过预设阈值或者上次拒绝时间到现在不超过1秒(冷却期)。冷却期是为了不能让负载刚下来就马上增加压力导致立马又上去的来回抖动
2. `averageFlying > max(1, QPS*minRT/1e3)`
* averageFlying = MovingAverage(flying)
* 在算MovingAverage(flying)的时候超参beta默认取值为0.9表示计算前十次的平均flying值
* 取flying值的时候有三种做法
1. 请求增加后更新一次averageFlying见图中橙色曲线
2. 请求结束后更新一次averageFlying见图中绿色曲线
3. 请求增加后更新一次averageFlying请求结束后更新一次averageFlying
我们使用的是第二种,这样可以更好的防止抖动,如图:
![flying策略对比](images/shedding_flying.jpg)
* QPS = maxPass * bucketsPerSecond
* maxPass表示每个有效桶里的成功的requests
* bucketsPerSecond表示每秒有多少个桶
* 1e3表示1000毫秒minRT单位也是毫秒QPS*minRT/1e3得到的就是平均每个时间点有多少并发请求
## 降载的使用
* 已经在ngin和rpcx框架里增加了可选激活配置
* CpuThreshold如果把值设置为大于0的值则激活该服务的自动降载机制
* 如果请求被drop那么错误日志里会有`dropreq`关键字
## 参考资料
* [滑动平均](https://www.cnblogs.com/wuliytTaotao/p/9479958.html)
* [Sentinel自适应限流](https://github.com/alibaba/Sentinel/wiki/%E7%B3%BB%E7%BB%9F%E8%87%AA%E9%80%82%E5%BA%94%E9%99%90%E6%B5%81)
* [Kratos自适应限流保护](https://github.com/bilibili/kratos/blob/master/doc/wiki-cn/ratelimit.md)

View File

@@ -1,131 +0,0 @@
# 文本序列化和反序列化
go-zero针对文本的序列化和反序列化主要在三个地方使用
* http api请求体的反序列化
* http api返回体的序列化
* 配置文件的反序列化
本文假定读者已经定义过api文件以及修改过配置文件如不熟悉可参照
* [快速构建高并发微服务](shorturl.md)
* [快速构建高并发微服务](bookstore.md)
## 1. http api请求体的反序列化
在反序列化的过程中的针对请求数据的`数据格式`以及`数据校验`需求go-zero实现了自己的一套反序列化机制
### 1.1 `数据格式`以订单order.api文件为例
```go
type (
createOrderReq struct {
token string `path:"token"` // 用户token
productId string `json:"productId"` // 商品ID
num int `json:"num"` // 商品数量
}
createOrderRes struct {
success bool `json:"success"` // 是否成功
}
findOrderReq struct {
token string `path:"token"` // 用户token
page int `form:"page"` // 页数
pageSize int8 `form:"pageSize"` // 页大小
}
findOrderRes struct {
orderInfo []orderInfo `json:"orderInfo"` // 商品ID
}
orderInfo struct {
productId string `json:"productId"` // 商品ID
productName string `json:"productName"` // 商品名称
num int `json:"num"` // 商品数量
}
deleteOrderReq struct {
id string `path:"id"`
}
deleteOrderRes struct {
success bool `json:"success"` // 是否成功
}
)
service order {
@doc(
summary: 创建订单
)
@server(
handler: CreateOrderHandler
)
post /order/add/:token(createOrderReq) returns(createOrderRes)
@doc(
summary: 获取订单
)
@server(
handler: FindOrderHandler
)
get /order/find/:token(findOrderReq) returns(findOrderRes)
@doc(
summary: 删除订单
)
@server(
handler: DeleteOrderHandler
)
delete /order/:id(deleteOrderReq) returns(deleteOrderRes)
}
```
http api请求体的反序列化的tag有三种
* `path`http url 路径中参数反序列化
* `/order/add/1234567`会解析出来token为1234567
* `form`http form表单反序列化需要 header头添加 Content-Type: multipart/form-data
* `/order/find/1234567?page=1&pageSize=20`会解析出来token为1234567page为1pageSize为20
* `json`http request json body反序列化需要 header头添加 Content-Type: application/json
* `{"productId":"321","num":1}`会解析出来productId为321num为1
### 1.2 `数据校验`以用户user.api文件为例
```go
type (
createUserReq struct {
age int8 `json:"age,default=20,range=(12:100]"` // 年龄
name string `json:"name"` // 名字
alias string `json:"alias,optional"` // 别名
sex string `json:"sex,options=male|female"` // 性别
avatar string `json:"avatar,default=default.png"` // 头像
}
createUserRes struct {
success bool `json:"success"` // 是否成功
}
)
service user {
@doc(
summary: 创建订单
)
@server(
handler: CreateUserHandler
)
post /user/add(createUserReq) returns(createUserRes)
}
```
数据校验有很多种方式,包括以下但不限:
* `age`默认不输入为20输入则取值范围为(12:100],前开后闭
* `name`:必填,不可为空
* `alias`:选填,可为空
* `sex`:必填,取值为`male``female`
* `avatar`:选填,默认为`default.png`
更多详情参见[unmarshaler_test.go](../core/mapping/unmarshaler_test.go)
## 2. http api返回体的序列化
* 使用官方默认的`encoding/json`包序列化,在此不再累赘
## 3. 配置文件的反序列化
* `配置文件的反序列化``http api请求体的反序列化`使用同一套解析规则,可参照`http api请求体的反序列化`

View File

@@ -1,190 +0,0 @@
# 通过MapReduce降低服务响应时间
在微服务中开发中api网关扮演对外提供restful api的角色而api的数据往往会依赖其他服务复杂的api更是会依赖多个甚至数十个服务。虽然单个被依赖服务的耗时一般都比较低但如果多个服务串行依赖的话那么整个api的耗时将会大大增加。
那么通过什么手段来优化呢我们首先想到的是通过并发来的方式来处理依赖这样就能降低整个依赖的耗时Go基础库中为我们提供了 [WaitGroup](https://golang.org/pkg/sync/#WaitGroup) 工具用来进行并发控制但实际业务场景中多个依赖如果有一个出错我们期望能立即返回而不是等所有依赖都执行完再返回结果而且WaitGroup中对变量的赋值往往需要加锁每个依赖函数都需要添加Add和Done对于新手来说比较容易出错
基于以上的背景go-zero框架中为我们提供了并发处理工具[MapReduce](https://github.com/tal-tech/go-zero/blob/master/core/mr/mapreduce.go)该工具开箱即用不需要做什么初始化我们通过下图看下使用MapReduce和没使用的耗时对比:
![依赖耗时对比](./images/mr_time.png)
相同的依赖串行处理的话需要200ms使用MapReduce后的耗时等于所有依赖中最大的耗时为100ms可见MapReduce可以大大降低服务耗时而且随着依赖的增加效果就会越明显减少处理耗时的同时并不会增加服务器压力
## 并发处理工具[MapReduce](https://github.com/tal-tech/go-zero/tree/master/core/mr)
[MapReduce](https://zh.wikipedia.org/wiki/MapReduce)是Google提出的一个软件架构用于大规模数据集的并行运算go-zero中的MapReduce工具正是借鉴了这种架构思想
go-zero框架中的MapReduce工具主要用来对批量数据进行并发的处理以此来提升服务的性能
![mapreduce原理图](./images/mr.png)
我们通过几个示例来演示MapReduce的用法
MapReduce主要有三个参数第一个参数为generate用以生产数据第二个参数为mapper用以对数据进行处理第三个参数为reducer用以对mapper后的数据做聚合返回还可以通过opts选项设置并发处理的线程数量
场景一: 某些功能的结果往往需要依赖多个服务比如商品详情的结果往往会依赖用户服务、库存服务、订单服务等等一般被依赖的服务都是以rpc的形式对外提供为了降低依赖的耗时我们往往需要对依赖做并行处理
```go
func productDetail(uid, pid int64) (*ProductDetail, error) {
var pd ProductDetail
err := mr.Finish(func() (err error) {
pd.User, err = userRpc.User(uid)
return
}, func() (err error) {
pd.Store, err = storeRpc.Store(pid)
return
}, func() (err error) {
pd.Order, err = orderRpc.Order(pid)
return
})
if err != nil {
log.Printf("product detail error: %v", err)
return nil, err
}
return &pd, nil
}
```
该示例中返回商品详情依赖了多个服务获取数据,因此做并发的依赖处理,对接口的性能有很大的提升
场景二: 很多时候我们需要对一批数据进行处理比如对一批用户id效验每个用户的合法性并且效验过程中有一个出错就认为效验失败返回的结果为效验合法的用户id
```go
func checkLegal(uids []int64) ([]int64, error) {
r, err := mr.MapReduce(func(source chan<- interface{}) {
for _, uid := range uids {
source <- uid
}
}, func(item interface{}, writer mr.Writer, cancel func(error)) {
uid := item.(int64)
ok, err := check(uid)
if err != nil {
cancel(err)
}
if ok {
writer.Write(uid)
}
}, func(pipe <-chan interface{}, writer mr.Writer, cancel func(error)) {
var uids []int64
for p := range pipe {
uids = append(uids, p.(int64))
}
writer.Write(uids)
})
if err != nil {
log.Printf("check error: %v", err)
return nil, err
}
return r.([]int64), nil
}
func check(uid int64) (bool, error) {
// do something check user legal
return true, nil
}
```
该示例中如果check过程出现错误则通过cancel方法结束效验过程并返回error整个效验过程结束如果某个uid效验结果为false则最终结果不返回该uid
***MapReduce使用注意事项***
* mapper和reducer中都可以调用cancel参数为error调用后立即返回返回结果为nil, error
* mapper中如果不调用writer.Write则item最终不会被reducer聚合
* reducer中如果不调用writer.Wirte则返回结果为nil, ErrReduceNoOutput
* reducer为单线程所有mapper出来的结果在这里串行聚合
***实现原理分析:***
MapReduce中首先通过buildSource方法通过执行generate(参数为无缓冲channel)产生数据并返回无缓冲的channelmapper会从该channel中读取数据
```go
func buildSource(generate GenerateFunc) chan interface{} {
source := make(chan interface{})
go func() {
defer close(source)
generate(source)
}()
return source
}
```
在MapReduceWithSource方法中定义了cancel方法mapper和reducer中都可以调用该方法调用后主线程收到close信号会立马返回
```go
cancel := once(func(err error) {
if err != nil {
retErr.Set(err)
} else {
// 默认的error
retErr.Set(ErrCancelWithNil)
}
drain(source)
// 调用close(ouput)主线程收到Done信号立马返回
finish()
})
```
在mapperDispatcher方法中调用了executeMappersexecuteMappers消费buildSource产生的数据每一个item都会起一个goroutine单独处理默认最大并发数为16可以通过WithWorkers进行设置
```go
var wg sync.WaitGroup
defer func() {
wg.Wait() // 保证所有的item都处理完成
close(collector)
}()
pool := make(chan lang.PlaceholderType, workers)
writer := newGuardedWriter(collector, done) // 将mapper处理完的数据写入collector
for {
select {
case <-done: // 当调用了cancel会触发立即返回
return
case pool <- lang.Placeholder: // 控制最大并发数
item, ok := <-input
if !ok {
<-pool
return
}
wg.Add(1)
go func() {
defer func() {
wg.Done()
<-pool
}()
mapper(item, writer) // 对item进行处理处理完调用writer.Write把结果写入collector对应的channel中
}()
}
}
```
reducer单goroutine对数mapper写入collector的数据进行处理如果reducer中没有手动调用writer.Write则最终会执行finish方法对output进行close避免死锁
```go
go func() {
defer func() {
if r := recover(); r != nil {
cancel(fmt.Errorf("%v", r))
} else {
finish()
}
}()
reducer(collector, writer, cancel)
}()
```
在该工具包中还提供了许多针对不同业务场景的方法实现原理与MapReduce大同小异感兴趣的同学可以查看源码学习
* MapReduceVoid 功能和MapReduce类似但没有结果返回只返回error
* Finish 处理固定数量的依赖返回error有一个error立即返回
* FinishVoid 和Finish方法功能类似没有返回值
* Map 只做generate和mapper处理返回channel
* MapVoid 和Map功能类似无返回
本文主要介绍了go-zero框架中的MapReduce工具在实际的项目中非常实用。用好工具对于提升服务性能和开发效率都有很大的帮助希望本篇文章能给大家带来一些收获。

View File

@@ -1,112 +0,0 @@
# 基于prometheus的微服务指标监控
服务上线后我们往往需要对服务进行监控,以便能及早发现问题并做针对性的优化,监控又可分为多种形式,比如日志监控,调用链监控,指标监控等等。而通过指标监控能清晰的观察出服务指标的变化趋势,了解服务的运行状态,对于保证服务稳定起着非常重要的作用
[prometheus](https://prometheus.io/)是一个开源的系统监控和告警工具支持强大的查询语言PromQL允许用户实时选择和汇聚时间序列数据时间序列数据是服务端通过HTTP协议主动拉取获得也可以通过中间网关来推送时间序列数据可以通过静态配置文件或服务发现来获取监控目标
## Prometheus 的架构
Prometheus 的整体架构以及生态系统组件如下图所示:
![prometheus](./images/prometheus.png)
Prometheus Server直接从监控目标中或者间接通过推送网关来拉取监控指标它在本地存储所有抓取到样本数据并对此数据执行一系列规则以汇总和记录现有数据的新时间序列或生成告警。可以通过 [Grafana](https://grafana.com/) 或者其他工具来实现监控数据的可视化
## go-zero基于prometheus的服务指标监控
[go-zero](https://github.com/tal-tech/go-zero) 框架中集成了基于prometheus的服务指标监控下面我们通过go-zero官方的示例[shorturl](https://github.com/tal-tech/go-zero/blob/master/doc/shorturl.md)来演示是如何对服务指标进行收集监控的:
- 第一步需要先安装Prometheus安装步骤请参考[官方文档](https://prometheus.io/)
- go-zero默认不开启prometheus监控开启方式很简单只需要在shorturl-api.yaml文件中增加配置如下其中Host为Prometheus Server地址为必填配置Port端口不填默认9091Path为用来拉取指标的路径默认为/metrics
```go
Prometheus:
Host: 127.0.0.1
Port: 9091
Path: /metrics
```
- 编辑prometheus的配置文件prometheus.yml添加如下配置并创建targets.json
```go
- job_name: 'file_ds'
file_sd_configs:
- files:
- targets.json
```
- 编辑targets.json文件其中targets为shorturl配置的目标地址并添加了几个默认的标签
```go
[
{
"targets": ["127.0.0.1:9091"],
"labels": {
"job": "shorturl-api",
"app": "shorturl-api",
"env": "test",
"instance": "127.0.0.1:8888"
}
}
]
```
- 启动prometheus服务默认侦听在9090端口
```go
prometheus --config.file=prometheus.yml
```
- 在浏览器输入http://127.0.0.1:9090/然后点击Status -> Targets即可看到状态为Up的Job并且Lables栏可以看到我们配置的默认的标签
![job状态为up](./images/prom_up.png)
通过以上几个步骤我们完成了prometheus对shorturl服务的指标监控收集的配置工作为了演示简单我们进行了手动的配置在实际的生产环境中一般采用定时更新配置文件或者服务发现的方式来配置监控目标篇幅有限这里不展开讲解感兴趣的同学请自行查看相关文档
## go-zero监控的指标类型
go-zero中目前在http的中间件和rpc的拦截器中添加了对请求指标的监控。
主要从请求耗时和请求错误两个维度请求耗时采用了Histogram指标类型定义了多个Buckets方便进行分位统计请求错误采用了Counter类型并在http metric中添加了path标签rpc metric中添加了method标签以便进行细分监控。
接下来演示如何查看监控指标:
首先在命令行多次执行如下命令
```go
curl -i "http://localhost:8888/shorten?url=http://www.xiaoheiban.cn"
```
打开Prometheus切换到Graph界面在输入框中输入{path="/shorten"}指令,即可查看监控指标,如下图
![查询面板](./images/panel.png)
我们通过PromQL语法查询过滤path为/shorten的指标结果中显示了指标名以及指标数值其中http_server_requests_code_total指标中code值为http的状态码200表明请求成功http_server_requests_duration_ms_bucket中对不同bucket结果分别进行了统计还可以看到所有的指标中都添加了我们配置的默认指标
Console界面主要展示了查询的指标结果Graph界面为我们提供了简单的图形化的展示界面在实际的生产环境中我们一般使用Grafana做图形化的展示
## grafana可视化界面
[grafana](https://grafana.com/)是一款可视化工具功能强大支持多种数据来源Prometheus、Elasticsearch、Graphite等安装比较简单请参考[官方文档](https://grafana.com/docs/grafana/latest/)grafana默认端口3000安装好后再浏览器输入http://localhost:3000/默认账号和密码都为admin
下面演示如何基于以上指标进行可视化界面的绘制:
- 点击左侧边栏Configuration->Data Source->Add data source进行数据源添加其中HTTP的URL为数据源的地址
![datasource](./images/datasource.png)
- 点击左侧边栏添加dashboard然后添加Variables方便针对不同的标签进行过滤筛选比如添加app变量用来过滤不同的服务
![variables](./images/variables.png)
- 进入dashboard点击右上角Add panel添加面板以path维度统计接口的qps
![qps](./images/qps.png)
- 最终的效果如下所示可以通过服务名称过滤不同的服务面板展示了path为/shorten的qps变化趋势
![qps panel](./images/qps_panel.png)
## 总结
以上演示了go-zero中基于prometheus+grafana服务指标监控的简单流程生产环境中可以根据实际的场景做不同维度的监控分析。现在go-zero的监控指标主要还是针对http和rpc这对于服务的整体监控显然还是不足的比如容器资源的监控依赖的mysql、redis等资源的监控以及自定义的指标监控等等go-zero在这方面后续还会持续优化。希望这篇文章能够给您带来帮助

View File

@@ -1,15 +0,0 @@
# PeriodicalExecutor设计
## 添加任务
* 当前没有未执行的任务
* 添加并启动定时器
* 已有未执行的任务
* 添加并检查是否到达最大缓存数
* 如到,执行所有缓存任务
* 未到,只添加
## 定时器到期
* 清除并执行所有缓存任务
* 再等待N个定时周期如果等待过程中一直没有新任务则退出

View File

@@ -1,167 +0,0 @@
# 防止缓存击穿之进程内共享调用
go-zero微服务框架中提供了许多开箱即用的工具好的工具不仅能提升服务的性能而且还能提升代码的鲁棒性避免出错实现代码风格的统一方便他人阅读等等。
本文主要讲述进程内共享调用神器[SharedCalls](https://github.com/tal-tech/go-zero/blob/master/core/syncx/sharedcalls.go)。
## 使用场景
并发场景下可能会有多个线程协程同时请求同一份资源如果每个请求都要走一遍资源的请求过程除了比较低效之外还会对资源服务造成并发的压力。举一个具体例子比如缓存失效多个请求同时到达某服务请求某资源该资源在缓存中已经失效此时这些请求会继续访问DB做查询会引起数据库压力瞬间增大。而使用SharedCalls可以使得同时多个请求只需要发起一次拿结果的调用其他请求"坐享其成",这种设计有效减少了资源服务的并发压力,可以有效防止缓存击穿。
高并发场景下当某个热点key缓存失效后多个请求会同时从数据库加载该资源并保存到缓存如果不做防范可能会导致数据库被直接打死。针对这种场景go-zero框架中已经提供了实现具体可参看[sqlc](https://github.com/tal-tech/go-zero/blob/master/core/stores/sqlc/cachedsql.go)和[mongoc](https://github.com/tal-tech/go-zero/blob/master/core/stores/mongoc/cachedcollection.go)等实现代码。
为了简化演示代码我们通过多个线程同时去获取一个id来模拟缓存的场景。如下
```go
func main() {
const round = 5
var wg sync.WaitGroup
barrier := syncx.NewSharedCalls()
wg.Add(round)
for i := 0; i < round; i++ {
// 多个线程同时执行
go func() {
defer wg.Done()
// 可以看到多个线程在同一个key上去请求资源获取资源的实际函数只会被调用一次
val, err := barrier.Do("once", func() (interface{}, error) {
// sleep 1秒为了让多个线程同时取once这个key上的数据
time.Sleep(time.Second)
// 生成了一个随机的id
return stringx.RandId(), nil
})
if err != nil {
fmt.Println(err)
} else {
fmt.Println(val)
}
}()
}
wg.Wait()
}
```
运行,打印结果为:
```
837c577b1008a0db
837c577b1008a0db
837c577b1008a0db
837c577b1008a0db
837c577b1008a0db
```
可以看出只要是同一个key上的同时发起的请求都会共享同一个结果对获取DB数据进缓存等场景特别有用可以有效防止缓存击穿。
## 关键源码分析
- SharedCalls interface提供了Do和DoEx两种方法的抽象
```go
// SharedCalls接口提供了Do和DoEx两种方法
type SharedCalls interface {
Do(key string, fn func() (interface{}, error)) (interface{}, error)
DoEx(key string, fn func() (interface{}, error)) (interface{}, bool, error)
}
```
- SharedCalls interface的具体实现sharedGroup
```go
// call代表对指定资源的一次请求
type call struct {
wg sync.WaitGroup // 用于协调各个请求goroutine之间的资源共享
val interface{} // 用于保存请求的返回值
err error // 用于保存请求过程中发生的错误
}
type sharedGroup struct {
calls map[string]*call
lock sync.Mutex
}
```
- sharedGroup的Do方法
- key参数可以理解为资源的唯一标识。
- fn参数真正获取资源的方法。
- 处理过程分析:
```go
// 当多个请求同时使用Do方法请求资源时
func (g *sharedGroup) Do(key string, fn func() (interface{}, error)) (interface{}, error) {
// 先申请加锁
g.lock.Lock()
// 根据key获取对应的call结果,并用变量c保存
if c, ok := g.calls[key]; ok {
// 拿到call以后释放锁此处call可能还没有实际数据只是一个空的内存占位
g.lock.Unlock()
// 调用wg.Wait判断是否有其他goroutine正在申请资源如果阻塞说明有其他goroutine正在获取资源
c.wg.Wait()
// 当wg.Wait不再阻塞表示资源获取已经结束可以直接返回结果
return c.val, c.err
}
// 没有拿到结果则调用makeCall方法去获取资源注意此处仍然是锁住的可以保证只有一个goroutine可以调用makecall
c := g.makeCall(key, fn)
// 返回调用结果
return c.val, c.err
}
```
- sharedGroup的DoEx方法
- 和Do方法类似只是返回值中增加了布尔值表示值是调用makeCall方法直接获取的还是取的共享成果
```go
func (g *sharedGroup) DoEx(key string, fn func() (interface{}, error)) (val interface{}, fresh bool, err error) {
g.lock.Lock()
if c, ok := g.calls[key]; ok {
g.lock.Unlock()
c.wg.Wait()
return c.val, false, c.err
}
c := g.makeCall(key, fn)
return c.val, true, c.err
}
```
- sharedGroup的makeCall方法
- 该方法由Do和DoEx方法调用是真正发起资源请求的方法。
```go
// 进入makeCall的一定只有一个goroutine因为要拿锁锁住的
func (g *sharedGroup) makeCall(key string, fn func() (interface{}, error)) *call {
// 创建call结构用于保存本次请求的结果
c := new(call)
// wg加1用于通知其他请求资源的goroutine等待本次资源获取的结束
c.wg.Add(1)
// 将用于保存结果的call放入map中以供其他goroutine获取
g.calls[key] = c
// 释放锁这样其他请求的goroutine才能获取call的内存占位
g.lock.Unlock()
defer func() {
// delete key first, done later. can't reverse the order, because if reverse,
// another Do call might wg.Wait() without get notified with wg.Done()
g.lock.Lock()
delete(g.calls, key)
g.lock.Unlock()
// 调用wg.Done通知其他goroutine可以返回结果这样本批次所有请求完成结果的共享
c.wg.Done()
}()
// 调用fn方法将结果填入变量c中
c.val, c.err = fn()
return c
}
```
## 最后
本文主要介绍了go-zero框架中的 SharedCalls工具对其应用场景和关键代码做了简单的梳理希望本篇文章能给大家带来一些收获。

View File

@@ -1,543 +0,0 @@
# Rapid development of microservices
English | [简体中文](shorturl.md)
## 0. Why building microservices are so difficult
To build a well working microservice, we need lots of knowledges from different aspects.
* basic functionalities
1. concurrency control and rate limit, to avoid being brought down by unexpected inbound
2. service discovery, make sure new or terminated nodes are detected asap
3. load balancing, balance the traffic base on the throughput of nodes
4. timeout control, avoid the nodes continue to process the timed out requests
5. circuit breaker, load shedding, fail fast, protects the failure nodes to recover asap
* advanced functionalities
1. authorization, make sure users can only access their own data
2. tracing, to understand the whole system and locate the specific problem quickly
3. logging, collects data and helps to backtrace problems
4. observability, no metrics, no optimization
For any point listed above, we need a long article to describe the theory and the implementation. But for us, the developers, its very difficult to understand all the concepts and make it happen in our systems. Although, we can use the frameworks that have been well served busy sites. [go-zero](https://github.com/tal-tech/go-zero) is born for this purpose, especially for cloud-native microservice systems.
As well, we always adhere to the idea that **prefer tools over conventions and documents**. We hope to reduce the boilerplate code as much as possible, and let developers focus on developing the business related code. For this purpose, we developed the tool `goctl`.
Lets take the shorturl microservice as a quick example to demonstrate how to quickly create microservices by using [go-zero](https://github.com/tal-tech/go-zero). After finishing this tutorial, youll find that its so easy to write microservices!
## 1. What is a shorturl service
A shorturl service is that it converts a long url into a short one, by well designed algorithms.
Writting this shorturl service is to demonstrate the complete flow of creating a microservice by using go-zero. But algorithms and detail implementations are quite simplified, and this shorturl service is not suitable for production use.
## 2. Architecture of shorturl microservice
<img src="images/shorturl-arch.png" alt="Architecture" width="800" />
* In this tutorial, I only use one rpc service, transform, to demonstrate. Its not telling that one API Gateway only can call one RPC service, its only for simplicity here.
* In production, we should try best to isolate the data belongs to services, that means each service should only use its own database.
## 3. goctl generated code overview
All modules with green background are generated, and will be enabled when necessary. The modules with red background are handwritten code, which is typically business logic code.
* API Gateway
<img src="images/api-gen.png" alt="api" width="800" />
* RPC
<img src="images/rpc-gen.png" alt="rpc" width="800" />
* model
<img src="images/model-gen.png" alt="model" width="800" />
And now, lets walk through the complete flow of quickly create a microservice with go-zero.
## 4. Get started
* install etcd, mysql, redis
* install protoc-gen-go
```
go get -u github.com/golang/protobuf/protoc-gen-go
```
* install goctl
```shell
GO111MODULE=on go get -u github.com/tal-tech/go-zero/tools/goctl
```
* create the working dir `shorturl` and `shorturl/api`
* in `shorturl` dir, execute `go mod init shorturl` to initialize `go.mod`
## 5. Write code for API Gateway
* use goctl to generate `api/shorturl.api`
```shell
goctl api -o bookstore.api
```
for simplicity, the leading `info` block is removed, and the code looks like:
```go
type (
expandReq struct {
shorten string `form:"shorten"`
}
expandResp struct {
url string `json:"url"`
}
)
type (
shortenReq struct {
url string `form:"url"`
}
shortenResp struct {
shorten string `json:"shorten"`
}
)
service shorturl-api {
@server(
handler: ShortenHandler
)
get /shorten(shortenReq) returns(shortenResp)
@server(
handler: ExpandHandler
)
get /expand(expandReq) returns(expandResp)
}
```
the usage of `type` keyword is the same as that in go, service is used to define get/post/head/delete api requests, described below:
* `service shorturl-api {` defines the service name
* `@server` defines the properties that used in server side
* `handler` defines the handler name
* `get /shorten(shortenReq) returns(shortenResp)` defines this is a GET request, the request parameters, and the response parameters
* generate the code for API Gateway by using goctl
```shell
goctl api go -api shorturl.api -dir .
```
the generated file structure looks like:
```Plain Text
.
├── api
│   ├── etc
│   │   └── shorturl-api.yaml // configuration file
│   ├── internal
│   │   ├── config
│   │   │   └── config.go // configuration definition
│   │   ├── handler
│   │   │   ├── expandhandler.go // implements expandHandler
│   │   │   ├── routes.go // routes definition
│   │   │   └── shortenhandler.go // implements shortenHandler
│   │   ├── logic
│   │   │   ├── expandlogic.go // implements ExpandLogic
│   │   │   └── shortenlogic.go // implements ShortenLogic
│   │   ├── svc
│   │   │   └── servicecontext.go // defines ServiceContext
│   │   └── types
│   │   └── types.go // defines request/response
│   ├── shorturl.api
│   └── shorturl.go // main entrance
├── go.mod
└── go.sum
```
* start API Gateway service, listens on port 8888 by default
```shell
go run shorturl.go -f etc/shorturl-api.yaml
```
* test API Gateway service
```shell
curl -i "http://localhost:8888/shorten?url=http://www.xiaoheiban.cn"
```
response like:
```http
HTTP/1.1 200 OK
Content-Type: application/json
Date: Thu, 27 Aug 2020 14:31:39 GMT
Content-Length: 15
{"shortUrl":""}
```
You can see that the API Gateway service did nothing except returned a zero value. And lets implement the business logic in rpc service.
* you can modify `internal/svc/servicecontext.go` to pass dependencies if needed
* implement logic in package `internal/logic`
* you can use goctl to generate code for clients base on the .api file
* till now, the client engineer can work with the api, dont need to wait for the implementation of server side
## 6. Write code for transform rpc service
- under directory `shorturl` create dir `rpc`
* under directory `rpc/transform` create `transform.proto` file
```shell
goctl rpc template -o transform.proto
```
edit the file and make the code looks like:
```protobuf
syntax = "proto3";
package transform;
message expandReq {
string shorten = 1;
}
message expandResp {
string url = 1;
}
message shortenReq {
string url = 1;
}
message shortenResp {
string shorten = 1;
}
service transformer {
rpc expand(expandReq) returns(expandResp);
rpc shorten(shortenReq) returns(shortenResp);
}
```
* use goctl to generate the rpc code, execute the following command in `rpc/transofrm`
```shell
goctl rpc proto -src transform.proto
```
the generated file structure looks like:
```Plain Text
rpc/transform
├── etc
│   └── transform.yaml // configuration file
├── internal
│   ├── config
│   │   └── config.go // configuration definition
│   ├── logic
│   │   ├── expandlogic.go // implements expand logic
│   │   └── shortenlogic.go // implements shorten logic
│   ├── server
│   │   └── transformerserver.go // rpc handler
│   └── svc
│   └── servicecontext.go // defines service context, like dependencies
├── pb
│   └── transform.pb.go
├── transform.go // rpc main entrance
├── transform.proto
└── transformer
├── transformer.go // defines how rpc clients call this service
├── transformer_mock.go // mock file, for test purpose
└── types.go // request/response definition
```
just run it, looks like:
```shell
$ go run transform.go -f etc/transform.yaml
Starting rpc server at 127.0.0.1:8080...
```
you can change the listening port in file `etc/transform.yaml`.
## 7. Modify API Gateway to call transform rpc service
* modify the configuration file `shorturl-api.yaml`, add the following:
```yaml
Transform:
Etcd:
Hosts:
- localhost:2379
Key: transform.rpc
```
automatically discover the transform service by using etcd.
* modify the file `internal/config/config.go`, add dependency on transform service:
```go
type Config struct {
rest.RestConf
Transform zrpc.RpcClientConf // manual code
}
```
* modify the file `internal/svc/servicecontext.go`, like below:
```go
type ServiceContext struct {
Config config.Config
Transformer transformer.Transformer // manual code
}
func NewServiceContext(c config.Config) *ServiceContext {
return &ServiceContext{
Config: c,
Transformer: transformer.NewTransformer(zrpc.MustNewClient(c.Transform)), // manual code
}
}
```
passing the dependencies among services within ServiceContext.
* modify the method `Expand` in the file `internal/logic/expandlogic.go`, looks like:
```go
func (l *ExpandLogic) Expand(req types.ExpandReq) (*types.ExpandResp, error) {
// manual code start
resp, err := l.svcCtx.Transformer.Expand(l.ctx, &transformer.ExpandReq{
Shorten: req.Shorten,
})
if err != nil {
return nil, err
}
return &types.ExpandResp{
Url: resp.Url,
}, nil
// manual code stop
}
```
by calling the method `Expand` of `transformer` to restore the shortened url.
* modify the file `internal/logic/shortenlogic.go`, looks like:
```go
func (l *ShortenLogic) Shorten(req types.ShortenReq) (*types.ShortenResp, error) {
// manual code start
resp, err := l.svcCtx.Transformer.Shorten(l.ctx, &transformer.ShortenReq{
Url: req.Url,
})
if err != nil {
return nil, err
}
return &types.ShortenResp{
Shorten: resp.Shorten,
}, nil
// manual code stop
}
```
by calling the method `Shorten` of `transformer` to shorten the url.
Till now, weve done the modification of API Gateway. All the manually added code are marked.
## 8. Define the database schema, generate the code for CRUD+cache
* under shorturl, create the directory `rpc/transform/model`: `mkdir -p rpc/transform/model`
* under the directory rpc/transform/model create the file called shorturl.sql`, contents as below:
```sql
CREATE TABLE `shorturl`
(
`shorten` varchar(255) NOT NULL COMMENT 'shorten key',
`url` varchar(255) NOT NULL COMMENT 'original url',
PRIMARY KEY(`shorten`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;
```
* create DB and table
```sql
create database gozero;
```
```sql
source shorturl.sql;
```
* under the directory `rpc/transform/model` execute the following command to genrate CRUD+cache code, `-c` means using `redis cache`
```shell
goctl model mysql ddl -c -src shorturl.sql -dir .
```
you can also generate the code from the database url by using `datasource` subcommand instead of `ddl`
the generated file structure looks like:
```Plain Text
rpc/transform/model
├── shorturl.sql
├── shorturlmodel.go // CRUD+cache code
└── vars.go // const and var definition
```
## 9. Modify shorten/expand rpc to call crud+cache
* modify `rpc/transform/etc/transform.yaml`, add the following:
```yaml
DataSource: root:@tcp(localhost:3306)/gozero
Table: shorturl
Cache:
- Host: localhost:6379
```
you can use multiple redis as cache. redis node and cluster are both supported.
* modify `rpc/transform/internal/config.go`, like below:
```go
type Config struct {
zrpc.RpcServerConf
DataSource string // manual code
Table string // manual code
Cache cache.CacheConf // manual code
}
```
added the configuration for mysql and redis cache.
* modify `rpc/transform/internal/svc/servicecontext.go`, like below:
```go
type ServiceContext struct {
c config.Config
Model *model.ShorturlModel // manual code
}
func NewServiceContext(c config.Config) *ServiceContext {
return &ServiceContext{
c: c,
Model: model.NewShorturlModel(sqlx.NewMysql(c.DataSource), c.Cache, c.Table), // manual code
}
}
```
* modify `rpc/transform/internal/logic/expandlogic.go`, like below:
```go
func (l *ExpandLogic) Expand(in *transform.ExpandReq) (*transform.ExpandResp, error) {
// manual code start
res, err := l.svcCtx.Model.FindOne(in.Shorten)
if err != nil {
return nil, err
}
return &transform.ExpandResp{
Url: res.Url,
}, nil
// manual code stop
}
```
* modify `rpc/shorten/internal/logic/shortenlogic.go`, looks like:
```go
func (l *ShortenLogic) Shorten(in *transform.ShortenReq) (*transform.ShortenResp, error) {
// manual code start, generates shorturl
key := hash.Md5Hex([]byte(in.Url))[:6]
_, err := l.svcCtx.Model.Insert(model.Shorturl{
Shorten: key,
Url: in.Url,
})
if err != nil {
return nil, err
}
return &transform.ShortenResp{
Shorten: key,
}, nil
// manual code stop
}
```
till now, we finished modifing the code, all the modified code is marked.
## 10. Call shorten and expand services
* call shorten api
```shell
curl -i "http://localhost:8888/shorten?url=http://www.xiaoheiban.cn"
```
response like:
```http
HTTP/1.1 200 OK
Content-Type: application/json
Date: Sat, 29 Aug 2020 10:49:49 GMT
Content-Length: 21
{"shorten":"f35b2a"}
```
* call expand api
```shell
curl -i "http://localhost:8888/expand?shorten=f35b2a"
```
response like:
```http
HTTP/1.1 200 OK
Content-Type: application/json
Date: Sat, 29 Aug 2020 10:51:53 GMT
Content-Length: 34
{"url":"http://www.xiaoheiban.cn"}
```
## 11. Benchmark
Because benchmarking the write requests depends on the write throughput of mysql, we only benchmarked the expand api. We read the data from mysql and cache it in redis. I chose 100 hot keys hardcoded in shorten.lua to generate the benchmark.
![Benchmark](images/shorturl-benchmark.png)
as shown above, in my MacBook Pro, the QPS is like 30K+.
## 12. Full code
[https://github.com/tal-tech/go-zero/tree/master/example/shorturl](https://github.com/tal-tech/go-zero/tree/master/example/shorturl)
## 13. Conclusion
We always adhere to **prefer tools over conventions and documents**.
go-zero is not only a framework, but also a tool to simplify and standardize the building of micoservice systems.
We not only keep the framework simple, but also encapsulate the complexity into the framework. And the developers are free from building the difficult and boilerplate code. Then we get the rapid development and less failure.
For the generated code by goctl, lots of microservice components are included, like concurrency control, adaptive circuit breaker, adaptive load shedding, auto cache control etc. And its easy to deal with the busy sites.
If you have any ideas that can help us to improve the productivity, tell me any time! 👏

View File

@@ -1,545 +0,0 @@
# 快速构建高并发微服务
[English](shorturl-en.md) | 简体中文
## 0. 为什么说做好微服务很难
要想做好微服务,我们需要理解和掌握的知识点非常多,从几个维度上来说:
* 基本功能层面
1. 并发控制&限流,避免服务被突发流量击垮
2. 服务注册与服务发现,确保能够动态侦测增减的节点
3. 负载均衡,需要根据节点承受能力分发流量
4. 超时控制,避免对已超时请求做无用功
5. 熔断设计,快速失败,保障故障节点的恢复能力
* 高阶功能层面
1. 请求认证,确保每个用户只能访问自己的数据
2. 链路追踪,用于理解整个系统和快速定位特定请求的问题
3. 日志,用于数据收集和问题定位
4. 可观测性,没有度量就没有优化
对于其中每一点,我们都需要用很长的篇幅来讲述其原理和实现,那么对我们后端开发者来说,要想把这些知识点都掌握并落实到业务系统里,难度是非常大的,不过我们可以依赖已经被大流量验证过的框架体系。[go-zero微服务框架](https://github.com/tal-tech/go-zero)就是为此而生。
另外,我们始终秉承**工具大于约定和文档**的理念。我们希望尽可能减少开发人员的心智负担,把精力都投入到产生业务价值的代码上,减少重复代码的编写,所以我们开发了`goctl`工具。
下面我通过短链微服务来演示通过[go-zero](https://github.com/tal-tech/go-zero)快速的创建微服务的流程,走完一遍,你就会发现:原来编写微服务如此简单!
## 1. 什么是短链服务
短链服务就是将长的URL网址通过程序计算等方式转换为简短的网址字符串。
写此短链服务是为了从整体上演示go-zero构建完整微服务的过程算法和实现细节尽可能简化了所以这不是一个高阶的短链服务。
## 2. 短链微服务架构图
<img src="images/shorturl-arch.png" alt="架构图" width="800" />
* 这里只用了`Transform RPC`一个微服务并不是说API Gateway只能调用一个微服务只是为了最简演示API Gateway如何调用RPC微服务而已
* 在真正项目里要尽可能每个微服务使用自己的数据库,数据边界要清晰
## 3. goctl各层代码生成一览
所有绿色背景的功能模块是自动生成的,按需激活,红色模块是需要自己写的,也就是增加下依赖,编写业务特有逻辑,各层示意图分别如下:
* API Gateway
<img src="images/shorturl-api.png" alt="api" width="800" />
* RPC
<img src="images/shorturl-rpc.png" alt="架构图" width="800" />
* model
<img src="images/shorturl-model.png" alt="model" width="800" />
下面我们来一起完整走一遍快速构建微服务的流程Lets `Go`!🏃‍♂️
## 4. 准备工作
* 安装etcd, mysql, redis
* 安装`protoc-gen-go`
```shell
go get -u github.com/golang/protobuf/protoc-gen-go
```
* 安装goctl工具
```shell
GO111MODULE=on GOPROXY=https://goproxy.cn/,direct go get -u github.com/tal-tech/go-zero/tools/goctl
```
* 创建工作目录 `shorturl` 和 `shorturl/api`
* 在`shorturl`目录下执行`go mod init shorturl`初始化`go.mod`
## 5. 编写API Gateway代码
* 在`shorturl/api`目录下通过goctl生成`api/shorturl.api`
```shell
goctl api -o shorturl.api
```
* 编辑`api/shorturl.api`,为了简洁,去除了文件开头的`info`,代码如下:
```go
type (
expandReq struct {
shorten string `form:"shorten"`
}
expandResp struct {
url string `json:"url"`
}
)
type (
shortenReq struct {
url string `form:"url"`
}
shortenResp struct {
shorten string `json:"shorten"`
}
)
service shorturl-api {
@server(
handler: ShortenHandler
)
get /shorten(shortenReq) returns(shortenResp)
@server(
handler: ExpandHandler
)
get /expand(expandReq) returns(expandResp)
}
```
type用法和go一致service用来定义get/post/head/delete等api请求解释如下
* `service shorturl-api {`这一行定义了service名字
* `@server`部分用来定义server端用到的属性
* `handler`定义了服务端handler名字
* `get /shorten(shortenReq) returns(shortenResp)`定义了get方法的路由、请求参数、返回参数等
* 使用goctl生成API Gateway代码
```shell
goctl api go -api shorturl.api -dir .
```
生成的文件结构如下:
```Plain Text
.
├── api
│   ├── etc
│   │   └── shorturl-api.yaml // 配置文件
│   ├── internal
│   │   ├── config
│   │   │   └── config.go // 定义配置
│   │   ├── handler
│   │   │   ├── expandhandler.go // 实现expandHandler
│   │   │   ├── routes.go // 定义路由处理
│   │   │   └── shortenhandler.go // 实现shortenHandler
│   │   ├── logic
│   │   │   ├── expandlogic.go // 实现ExpandLogic
│   │   │   └── shortenlogic.go // 实现ShortenLogic
│   │   ├── svc
│   │   │   └── servicecontext.go // 定义ServiceContext
│   │   └── types
│   │   └── types.go // 定义请求、返回结构体
│   ├── shorturl.api
│   └── shorturl.go // main入口定义
├── go.mod
└── go.sum
```
* 启动API Gateway服务默认侦听在8888端口
```shell
go run shorturl.go -f etc/shorturl-api.yaml
```
* 测试API Gateway服务
```shell
curl -i "http://localhost:8888/shorten?url=http://www.xiaoheiban.cn"
```
返回如下:
```http
HTTP/1.1 200 OK
Content-Type: application/json
Date: Thu, 27 Aug 2020 14:31:39 GMT
Content-Length: 15
{"shortUrl":""}
```
可以看到我们API Gateway其实啥也没干就返回了个空值接下来我们会在rpc服务里实现业务逻辑
* 可以修改`internal/svc/servicecontext.go`来传递服务依赖(如果需要)
* 实现逻辑可以修改`internal/logic`下的对应文件
* 可以通过`goctl`生成各种客户端语言的api调用代码
* 到这里你已经可以通过goctl生成客户端代码给客户端同学并行开发了支持多种语言详见文档
## 6. 编写transform rpc服务
- 在 `shorturl` 目录下创建 `rpc` 目录
* 在`rpc/transform`目录下编写`transform.proto`文件
可以通过命令生成proto文件模板
```shell
goctl rpc template -o transform.proto
```
修改后文件内容如下:
```protobuf
syntax = "proto3";
package transform;
message expandReq {
string shorten = 1;
}
message expandResp {
string url = 1;
}
message shortenReq {
string url = 1;
}
message shortenResp {
string shorten = 1;
}
service transformer {
rpc expand(expandReq) returns(expandResp);
rpc shorten(shortenReq) returns(shortenResp);
}
```
* 用`goctl`生成rpc代码在`rpc/transform`目录下执行命令
```shell
goctl rpc proto -src transform.proto
```
文件结构如下:
```Plain Text
rpc/transform
├── etc
│   └── transform.yaml // 配置文件
├── internal
│   ├── config
│   │   └── config.go // 配置定义
│   ├── logic
│   │   ├── expandlogic.go // expand业务逻辑在这里实现
│   │   └── shortenlogic.go // shorten业务逻辑在这里实现
│   ├── server
│   │   └── transformerserver.go // 调用入口, 不需要修改
│   └── svc
│   └── servicecontext.go // 定义ServiceContext传递依赖
├── pb
│   └── transform.pb.go
├── transform.go // rpc服务main函数
├── transform.proto
└── transformer
├── transformer.go // 提供了外部调用方法,无需修改
├── transformer_mock.go // mock方法测试用
└── types.go // request/response结构体定义
```
直接可以运行,如下:
```shell
$ go run transform.go -f etc/transform.yaml
Starting rpc server at 127.0.0.1:8080...
```
`etc/transform.yaml`文件里可以修改侦听端口等配置
## 7. 修改API Gateway代码调用transform rpc服务
* 修改配置文件`shorturl-api.yaml`,增加如下内容
```yaml
Transform:
Etcd:
Hosts:
- localhost:2379
Key: transform.rpc
```
通过etcd自动去发现可用的transform服务
* 修改`internal/config/config.go`如下增加transform服务依赖
```go
type Config struct {
rest.RestConf
Transform zrpc.RpcClientConf // 手动代码
}
```
* 修改`internal/svc/servicecontext.go`,如下:
```go
type ServiceContext struct {
Config config.Config
Transformer transformer.Transformer // 手动代码
}
func NewServiceContext(c config.Config) *ServiceContext {
return &ServiceContext{
Config: c,
Transformer: transformer.NewTransformer(zrpc.MustNewClient(c.Transform)), // 手动代码
}
}
```
通过ServiceContext在不同业务逻辑之间传递依赖
* 修改`internal/logic/expandlogic.go`里的`Expand`方法,如下:
```go
func (l *ExpandLogic) Expand(req types.ExpandReq) (*types.ExpandResp, error) {
// 手动代码开始
resp, err := l.svcCtx.Transformer.Expand(l.ctx, &transformer.ExpandReq{
Shorten: req.Shorten,
})
if err != nil {
return nil, err
}
return &types.ExpandResp{
Url: resp.Url,
}, nil
// 手动代码结束
}
```
通过调用`transformer`的`Expand`方法实现短链恢复到url
* 修改`internal/logic/shortenlogic.go`,如下:
```go
func (l *ShortenLogic) Shorten(req types.ShortenReq) (*types.ShortenResp, error) {
// 手动代码开始
resp, err := l.svcCtx.Transformer.Shorten(l.ctx, &transformer.ShortenReq{
Url: req.Url,
})
if err != nil {
return nil, err
}
return &types.ShortenResp{
Shorten: resp.Shorten,
}, nil
// 手动代码结束
}
```
通过调用`transformer`的`Shorten`方法实现url到短链的变换
至此API Gateway修改完成虽然贴的代码多但是期中修改的是很少的一部分为了方便理解上下文我贴了完整代码接下来处理CRUD+cache
## 8. 定义数据库表结构并生成CRUD+cache代码
* shorturl下创建`rpc/transform/model`目录:`mkdir -p rpc/transform/model`
* 在rpc/transform/model目录下编写创建shorturl表的sql文件`shorturl.sql`,如下:
```sql
CREATE TABLE `shorturl`
(
`shorten` varchar(255) NOT NULL COMMENT 'shorten key',
`url` varchar(255) NOT NULL COMMENT 'original url',
PRIMARY KEY(`shorten`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;
```
* 创建DB和table
```sql
create database gozero;
```
```sql
source shorturl.sql;
```
* 在`rpc/transform/model`目录下执行如下命令生成CRUD+cache代码`-c`表示使用`redis cache`
```shell
goctl model mysql ddl -c -src shorturl.sql -dir .
```
也可以用`datasource`命令代替`ddl`来指定数据库链接直接从schema生成
生成后的文件结构如下:
```Plain Text
rpc/transform/model
├── shorturl.sql
├── shorturlmodel.go // CRUD+cache代码
└── vars.go // 定义常量和变量
```
## 9. 修改shorten/expand rpc代码调用crud+cache代码
* 修改`rpc/transform/etc/transform.yaml`,增加如下内容:
```yaml
DataSource: root:@tcp(localhost:3306)/gozero
Table: shorturl
Cache:
- Host: localhost:6379
```
可以使用多个redis作为cache支持redis单点或者redis集群
* 修改`rpc/transform/internal/config.go`,如下:
```go
type Config struct {
zrpc.RpcServerConf
DataSource string // 手动代码
Table string // 手动代码
Cache cache.CacheConf // 手动代码
}
```
增加了mysql和redis cache配置
* 修改`rpc/transform/internal/svc/servicecontext.go`,如下:
```go
type ServiceContext struct {
c config.Config
Model *model.ShorturlModel // 手动代码
}
func NewServiceContext(c config.Config) *ServiceContext {
return &ServiceContext{
c: c,
Model: model.NewShorturlModel(sqlx.NewMysql(c.DataSource), c.Cache, c.Table), // 手动代码
}
}
```
* 修改`rpc/transform/internal/logic/expandlogic.go`,如下:
```go
func (l *ExpandLogic) Expand(in *transform.ExpandReq) (*transform.ExpandResp, error) {
// 手动代码开始
res, err := l.svcCtx.Model.FindOne(in.Shorten)
if err != nil {
return nil, err
}
return &transform.ExpandResp{
Url: res.Url,
}, nil
// 手动代码结束
}
```
* 修改`rpc/shorten/internal/logic/shortenlogic.go`,如下:
```go
func (l *ShortenLogic) Shorten(in *transform.ShortenReq) (*transform.ShortenResp, error) {
// 手动代码开始,生成短链接
key := hash.Md5Hex([]byte(in.Url))[:6]
_, err := l.svcCtx.Model.Insert(model.Shorturl{
Shorten: key,
Url: in.Url,
})
if err != nil {
return nil, err
}
return &transform.ShortenResp{
Shorten: key,
}, nil
// 手动代码结束
}
```
至此代码修改完成,凡是手动修改的代码我加了标注
## 10. 完整调用演示
* shorten api调用
```shell
curl -i "http://localhost:8888/shorten?url=http://www.xiaoheiban.cn"
```
返回如下:
```http
HTTP/1.1 200 OK
Content-Type: application/json
Date: Sat, 29 Aug 2020 10:49:49 GMT
Content-Length: 21
{"shorten":"f35b2a"}
```
* expand api调用
```shell
curl -i "http://localhost:8888/expand?shorten=f35b2a"
```
返回如下:
```http
HTTP/1.1 200 OK
Content-Type: application/json
Date: Sat, 29 Aug 2020 10:51:53 GMT
Content-Length: 34
{"url":"http://www.xiaoheiban.cn"}
```
## 11. Benchmark
因为写入依赖于mysql的写入速度就相当于压mysql了所以压测只测试了expand接口相当于从mysql里读取并利用缓存shorten.lua里随机从db里获取了100个热key来生成压测请求
![Benchmark](images/shorturl-benchmark.png)
可以看出在我的MacBook Pro上能达到3万+的qps。
## 12. 完整代码
[https://github.com/tal-tech/go-zero/tree/master/example/shorturl](https://github.com/tal-tech/go-zero/tree/master/example/shorturl)
## 12. 总结
我们一直强调**工具大于约定和文档**。
go-zero不只是一个框架更是一个建立在框架+工具基础上的,简化和规范了整个微服务构建的技术体系。
我们在保持简单的同时也尽可能把微服务治理的复杂度封装到了框架内部,极大的降低了开发人员的心智负担,使得业务开发得以快速推进。
通过go-zero+goctl生成的代码包含了微服务治理的各种组件包括并发控制、自适应熔断、自适应降载、自动缓存控制等可以轻松部署以承载巨大访问量。
有任何好的提升工程效率的想法,随时欢迎交流!👏

View File

@@ -1,23 +0,0 @@
# DB缓存机制
## QueryRowIndex
* 没有查询条件到Primary映射的缓存
* 通过查询条件到DB去查询行记录然后
* **把Primary到行记录的缓存写到redis里**
* **把查询条件到Primary的映射保存到redis里***框架的Take方法自动做了*
* 可能的过期顺序
* 查询条件到Primary的映射缓存未过期
* Primary到行记录的缓存未过期
* 直接返回缓存行记录
* Primary到行记录的缓存已过期
* 通过Primary到DB获取行记录并写入缓存
* 此时存在的问题是查询条件到Primary的缓存可能已经快要过期了短时间内的查询又会触发一次数据库查询
* 要避免这个问题,可以让**上面粗体部分**第一个过期时间略长于第二个比如5秒
* 查询条件到Primary的映射缓存已过期不管Primary到行记录的缓存是否过期
* 查询条件到Primary的映射会被重新获取获取过程中会自动写入新的Primary到行记录的缓存这样两种缓存的过期时间都是刚刚设置
* 有查询条件到Primary映射的缓存
* 没有Primary到行记录的缓存
* 通过Primary到DB查询行记录并写入缓存
* 有Primary到行记录的缓存
* 直接返回缓存结果

View File

@@ -128,41 +128,38 @@ func main() {
ticker := time.NewTicker(time.Minute)
defer ticker.Stop()
for {
select {
case <-ticker.C:
expect, err := loadAll(registry.Client().(*clientv3.Client))
if err != nil {
fmt.Println("[ETCD-test] can't load current keys")
continue
}
check := func() bool {
var match bool
barrier.Guard(func() {
match = compare(expect, vals)
})
if match {
logx.Info("match")
}
return match
}
if check() {
continue
}
time.AfterFunc(time.Second*5, func() {
if check() {
return
}
var builder strings.Builder
builder.WriteString(fmt.Sprintf("expect:\n%s\n", serializeMap(expect, "\t")))
barrier.Guard(func() {
builder.WriteString(fmt.Sprintf("actual:\n%s\n", serializeMap(vals, "\t")))
})
fmt.Println(builder.String())
})
for range ticker.C {
expect, err := loadAll(registry.Client().(*clientv3.Client))
if err != nil {
fmt.Println("[ETCD-test] can't load current keys")
continue
}
check := func() bool {
var match bool
barrier.Guard(func() {
match = compare(expect, vals)
})
if match {
logx.Info("match")
}
return match
}
if check() {
continue
}
time.AfterFunc(time.Second*5, func() {
if check() {
return
}
var builder strings.Builder
builder.WriteString(fmt.Sprintf("expect:\n%s\n", serializeMap(expect, "\t")))
barrier.Guard(func() {
builder.WriteString(fmt.Sprintf("actual:\n%s\n", serializeMap(vals, "\t")))
})
fmt.Println(builder.String())
})
}
}

View File

@@ -1,11 +1,19 @@
package main
import (
"fmt"
"testing"
"github.com/tal-tech/go-zero/core/fx"
)
func TestFxSplit(t *testing.T) {
fx.Just(1, 2, 3, 4, 5, 6, 7, 8, 9, 10).Split(4).ForEach(func(item interface{}) {
vals := item.([]interface{})
fmt.Println(len(vals))
})
}
func BenchmarkFx(b *testing.B) {
type Mixed struct {
Name string

View File

@@ -10,7 +10,6 @@ func main() {
result, err := fx.From(func(source chan<- interface{}) {
for i := 0; i < 10; i++ {
source <- i
source <- i
}
}).Map(func(item interface{}) interface{} {
i := item.(int)

View File

@@ -56,7 +56,7 @@ func main() {
Port: *port,
Timeout: *timeout,
MaxConns: 500,
})
}, rest.WithNotAllowedHandler(rest.CorsHandler()))
defer engine.Stop()
engine.Use(first)

View File

@@ -26,10 +26,6 @@ func main() {
}
writer.Write(user)
}, func(pipe <-chan interface{}, writer mr.Writer, cancel func(error)) {
var users []*User
for p := range pipe {
users = append(users, p.(*User))
}
// missing writer.Write(...), should not panic
})
if err != nil {

View File

@@ -12,7 +12,11 @@ func main() {
fmt.Println(len(items))
}, executors.WithBulkTasks(10))
for {
executor.Add(1)
if err := executor.Add(1); err != nil {
fmt.Println(err)
return
}
time.Sleep(time.Millisecond * 90)
}
}

View File

@@ -24,11 +24,8 @@ func main() {
ticker := time.NewTicker(time.Second * 5)
defer ticker.Stop()
for {
select {
case <-ticker.C:
percent := stat.CpuUsage()
fmt.Println("cpu:", percent)
}
for range ticker.C {
percent := stat.CpuUsage()
fmt.Println("cpu:", percent)
}
}

Some files were not shown because too many files have changed in this diff Show More