initial import

This commit is contained in:
kevin
2020-07-26 17:09:05 +08:00
commit 7e3a369a8f
647 changed files with 54754 additions and 0 deletions

357
core/fx/fn.go Normal file
View File

@@ -0,0 +1,357 @@
package fx
import (
"sort"
"sync"
"zero/core/collection"
"zero/core/lang"
"zero/core/threading"
)
const (
defaultWorkers = 16
minWorkers = 1
)
type (
rxOptions struct {
unlimitedWorkers bool
workers int
}
FilterFunc func(item interface{}) bool
ForAllFunc func(pipe <-chan interface{})
ForEachFunc func(item interface{})
GenerateFunc func(source chan<- interface{})
KeyFunc func(item interface{}) interface{}
LessFunc func(a, b interface{}) bool
MapFunc func(item interface{}) interface{}
Option func(opts *rxOptions)
ParallelFunc func(item interface{})
ReduceFunc func(pipe <-chan interface{}) (interface{}, error)
WalkFunc func(item interface{}, pipe chan<- interface{})
Stream struct {
source <-chan interface{}
}
)
// From constructs a Stream from the given GenerateFunc.
func From(generate GenerateFunc) Stream {
source := make(chan interface{})
threading.GoSafe(func() {
defer close(source)
generate(source)
})
return Range(source)
}
// Just converts the given arbitary items to a Stream.
func Just(items ...interface{}) Stream {
source := make(chan interface{}, len(items))
for _, item := range items {
source <- item
}
close(source)
return Range(source)
}
// Range converts the given channel to a Stream.
func Range(source <-chan interface{}) Stream {
return Stream{
source: source,
}
}
// Buffer buffers the items into a queue with size n.
func (p Stream) Buffer(n int) Stream {
if n < 0 {
n = 0
}
source := make(chan interface{}, n)
go func() {
for item := range p.source {
source <- item
}
close(source)
}()
return Range(source)
}
// Distinct removes the duplicated items base on the given KeyFunc.
func (p Stream) Distinct(fn KeyFunc) Stream {
source := make(chan interface{})
threading.GoSafe(func() {
defer close(source)
keys := make(map[interface{}]lang.PlaceholderType)
for item := range p.source {
key := fn(item)
if _, ok := keys[key]; !ok {
source <- item
keys[key] = lang.Placeholder
}
}
})
return Range(source)
}
// Done waits all upstreaming operations to be done.
func (p Stream) Done() {
for range p.source {
}
}
// Filter filters the items by the given FilterFunc.
func (p Stream) Filter(fn FilterFunc, opts ...Option) Stream {
return p.Walk(func(item interface{}, pipe chan<- interface{}) {
if fn(item) {
pipe <- item
}
}, opts...)
}
// ForAll handles the streaming elements from the source and no later streams.
func (p Stream) ForAll(fn ForAllFunc) {
fn(p.source)
}
// ForEach seals the Stream with the ForEachFunc on each item, no successive operations.
func (p Stream) ForEach(fn ForEachFunc) {
for item := range p.source {
fn(item)
}
}
// Group groups the elements into different groups based on their keys.
func (p Stream) Group(fn KeyFunc) Stream {
groups := make(map[interface{}][]interface{})
for item := range p.source {
key := fn(item)
groups[key] = append(groups[key], item)
}
source := make(chan interface{})
go func() {
for _, group := range groups {
source <- group
}
close(source)
}()
return Range(source)
}
func (p Stream) Head(n int64) Stream {
source := make(chan interface{})
go func() {
for item := range p.source {
n--
if n >= 0 {
source <- item
}
if n == 0 {
// let successive method go ASAP even we have more items to skip
// why we don't just break the loop, because if break,
// this former goroutine will block forever, which will cause goroutine leak.
close(source)
}
}
if n > 0 {
close(source)
}
}()
return Range(source)
}
// Maps converts each item to another corresponding item, which means it's a 1:1 model.
func (p Stream) Map(fn MapFunc, opts ...Option) Stream {
return p.Walk(func(item interface{}, pipe chan<- interface{}) {
pipe <- fn(item)
}, opts...)
}
// Merge merges all the items into a slice and generates a new stream.
func (p Stream) Merge() Stream {
var items []interface{}
for item := range p.source {
items = append(items, item)
}
source := make(chan interface{}, 1)
source <- items
close(source)
return Range(source)
}
// Parallel applies the given ParallenFunc to each item concurrently with given number of workers.
func (p Stream) Parallel(fn ParallelFunc, opts ...Option) {
p.Walk(func(item interface{}, pipe chan<- interface{}) {
fn(item)
}, opts...).Done()
}
// Reduce is a utility method to let the caller deal with the underlying channel.
func (p Stream) Reduce(fn ReduceFunc) (interface{}, error) {
return fn(p.source)
}
// Reverse reverses the elements in the stream.
func (p Stream) Reverse() Stream {
var items []interface{}
for item := range p.source {
items = append(items, item)
}
// reverse, official method
for i := len(items)/2 - 1; i >= 0; i-- {
opp := len(items) - 1 - i
items[i], items[opp] = items[opp], items[i]
}
return Just(items...)
}
// Sort sorts the items from the underlying source.
func (p Stream) Sort(less LessFunc) Stream {
var items []interface{}
for item := range p.source {
items = append(items, item)
}
sort.Slice(items, func(i, j int) bool {
return less(items[i], items[j])
})
return Just(items...)
}
func (p Stream) Tail(n int64) Stream {
source := make(chan interface{})
go func() {
ring := collection.NewRing(int(n))
for item := range p.source {
ring.Add(item)
}
for _, item := range ring.Take() {
source <- item
}
close(source)
}()
return Range(source)
}
// Walk lets the callers handle each item, the caller may write zero, one or more items base on the given item.
func (p Stream) Walk(fn WalkFunc, opts ...Option) Stream {
option := buildOptions(opts...)
if option.unlimitedWorkers {
return p.walkUnlimited(fn, option)
} else {
return p.walkLimited(fn, option)
}
}
func (p Stream) walkLimited(fn WalkFunc, option *rxOptions) Stream {
pipe := make(chan interface{}, option.workers)
go func() {
var wg sync.WaitGroup
pool := make(chan lang.PlaceholderType, option.workers)
for {
pool <- lang.Placeholder
item, ok := <-p.source
if !ok {
<-pool
break
}
wg.Add(1)
// better to safely run caller defined method
threading.GoSafe(func() {
defer func() {
wg.Done()
<-pool
}()
fn(item, pipe)
})
}
wg.Wait()
close(pipe)
}()
return Range(pipe)
}
func (p Stream) walkUnlimited(fn WalkFunc, option *rxOptions) Stream {
pipe := make(chan interface{}, defaultWorkers)
go func() {
var wg sync.WaitGroup
for {
item, ok := <-p.source
if !ok {
break
}
wg.Add(1)
// better to safely run caller defined method
threading.GoSafe(func() {
defer wg.Done()
fn(item, pipe)
})
}
wg.Wait()
close(pipe)
}()
return Range(pipe)
}
// UnlimitedWorkers lets the caller to use as many workers as the tasks.
func UnlimitedWorkers() Option {
return func(opts *rxOptions) {
opts.unlimitedWorkers = true
}
}
// WithWorkers lets the caller to customize the concurrent workers.
func WithWorkers(workers int) Option {
return func(opts *rxOptions) {
if workers < minWorkers {
opts.workers = minWorkers
} else {
opts.workers = workers
}
}
}
func buildOptions(opts ...Option) *rxOptions {
options := newOptions()
for _, opt := range opts {
opt(options)
}
return options
}
func newOptions() *rxOptions {
return &rxOptions{
workers: defaultWorkers,
}
}

293
core/fx/fn_test.go Normal file
View File

@@ -0,0 +1,293 @@
package fx
import (
"io/ioutil"
"log"
"runtime"
"sync"
"sync/atomic"
"testing"
"time"
"zero/core/stringx"
"github.com/stretchr/testify/assert"
)
func TestBuffer(t *testing.T) {
const N = 5
var count int32
var wait sync.WaitGroup
wait.Add(1)
From(func(source chan<- interface{}) {
ticker := time.NewTicker(10 * time.Millisecond)
defer ticker.Stop()
for i := 0; i < 2*N; i++ {
select {
case source <- i:
atomic.AddInt32(&count, 1)
case <-ticker.C:
wait.Done()
return
}
}
}).Buffer(N).ForAll(func(pipe <-chan interface{}) {
wait.Wait()
// why N+1, because take one more to wait for sending into the channel
assert.Equal(t, int32(N+1), atomic.LoadInt32(&count))
})
}
func TestBufferNegative(t *testing.T) {
var result int
Just(1, 2, 3, 4).Buffer(-1).Reduce(func(pipe <-chan interface{}) (interface{}, error) {
for item := range pipe {
result += item.(int)
}
return result, nil
})
assert.Equal(t, 10, result)
}
func TestDone(t *testing.T) {
var count int32
Just(1, 2, 3).Walk(func(item interface{}, pipe chan<- interface{}) {
time.Sleep(time.Millisecond * 100)
atomic.AddInt32(&count, int32(item.(int)))
}).Done()
assert.Equal(t, int32(6), count)
}
func TestJust(t *testing.T) {
var result int
Just(1, 2, 3, 4).Reduce(func(pipe <-chan interface{}) (interface{}, error) {
for item := range pipe {
result += item.(int)
}
return result, nil
})
assert.Equal(t, 10, result)
}
func TestDistinct(t *testing.T) {
var result int
Just(4, 1, 3, 2, 3, 4).Distinct(func(item interface{}) interface{} {
return item
}).Reduce(func(pipe <-chan interface{}) (interface{}, error) {
for item := range pipe {
result += item.(int)
}
return result, nil
})
assert.Equal(t, 10, result)
}
func TestFilter(t *testing.T) {
var result int
Just(1, 2, 3, 4).Filter(func(item interface{}) bool {
return item.(int)%2 == 0
}).Reduce(func(pipe <-chan interface{}) (interface{}, error) {
for item := range pipe {
result += item.(int)
}
return result, nil
})
assert.Equal(t, 6, result)
}
func TestForAll(t *testing.T) {
var result int
Just(1, 2, 3, 4).Filter(func(item interface{}) bool {
return item.(int)%2 == 0
}).ForAll(func(pipe <-chan interface{}) {
for item := range pipe {
result += item.(int)
}
})
assert.Equal(t, 6, result)
}
func TestGroup(t *testing.T) {
var groups [][]int
Just(10, 11, 20, 21).Group(func(item interface{}) interface{} {
v := item.(int)
return v / 10
}).ForEach(func(item interface{}) {
v := item.([]interface{})
var group []int
for _, each := range v {
group = append(group, each.(int))
}
groups = append(groups, group)
})
assert.Equal(t, 2, len(groups))
for _, group := range groups {
assert.Equal(t, 2, len(group))
assert.True(t, group[0]/10 == group[1]/10)
}
}
func TestHead(t *testing.T) {
var result int
Just(1, 2, 3, 4).Head(2).Reduce(func(pipe <-chan interface{}) (interface{}, error) {
for item := range pipe {
result += item.(int)
}
return result, nil
})
assert.Equal(t, 3, result)
}
func TestHeadMore(t *testing.T) {
var result int
Just(1, 2, 3, 4).Head(6).Reduce(func(pipe <-chan interface{}) (interface{}, error) {
for item := range pipe {
result += item.(int)
}
return result, nil
})
assert.Equal(t, 10, result)
}
func TestMap(t *testing.T) {
log.SetOutput(ioutil.Discard)
tests := []struct {
mapper MapFunc
expect int
}{
{
mapper: func(item interface{}) interface{} {
v := item.(int)
return v * v
},
expect: 30,
},
{
mapper: func(item interface{}) interface{} {
v := item.(int)
if v%2 == 0 {
return 0
}
return v * v
},
expect: 10,
},
{
mapper: func(item interface{}) interface{} {
v := item.(int)
if v%2 == 0 {
panic(v)
}
return v * v
},
expect: 10,
},
}
// Map(...) works even WithWorkers(0)
for i, test := range tests {
t.Run(stringx.Rand(), func(t *testing.T) {
var result int
var workers int
if i%2 == 0 {
workers = 0
} else {
workers = runtime.NumCPU()
}
From(func(source chan<- interface{}) {
for i := 1; i < 5; i++ {
source <- i
}
}).Map(test.mapper, WithWorkers(workers)).Reduce(
func(pipe <-chan interface{}) (interface{}, error) {
for item := range pipe {
result += item.(int)
}
return result, nil
})
assert.Equal(t, test.expect, result)
})
}
}
func TestMerge(t *testing.T) {
Just(1, 2, 3, 4).Merge().ForEach(func(item interface{}) {
assert.ElementsMatch(t, []interface{}{1, 2, 3, 4}, item.([]interface{}))
})
}
func TestParallelJust(t *testing.T) {
var count int32
Just(1, 2, 3).Parallel(func(item interface{}) {
time.Sleep(time.Millisecond * 100)
atomic.AddInt32(&count, int32(item.(int)))
}, UnlimitedWorkers())
assert.Equal(t, int32(6), count)
}
func TestReverse(t *testing.T) {
Just(1, 2, 3, 4).Reverse().Merge().ForEach(func(item interface{}) {
assert.ElementsMatch(t, []interface{}{4, 3, 2, 1}, item.([]interface{}))
})
}
func TestSort(t *testing.T) {
var prev int
Just(5, 3, 7, 1, 9, 6, 4, 8, 2).Sort(func(a, b interface{}) bool {
return a.(int) < b.(int)
}).ForEach(func(item interface{}) {
next := item.(int)
assert.True(t, prev < next)
prev = next
})
}
func TestTail(t *testing.T) {
var result int
Just(1, 2, 3, 4).Tail(2).Reduce(func(pipe <-chan interface{}) (interface{}, error) {
for item := range pipe {
result += item.(int)
}
return result, nil
})
assert.Equal(t, 7, result)
}
func TestWalk(t *testing.T) {
var result int
Just(1, 2, 3, 4, 5).Walk(func(item interface{}, pipe chan<- interface{}) {
if item.(int)%2 != 0 {
pipe <- item
}
}, UnlimitedWorkers()).ForEach(func(item interface{}) {
result += item.(int)
})
assert.Equal(t, 9, result)
}
func BenchmarkMapReduce(b *testing.B) {
b.ReportAllocs()
mapper := func(v interface{}) interface{} {
return v.(int64) * v.(int64)
}
reducer := func(input <-chan interface{}) (interface{}, error) {
var result int64
for v := range input {
result += v.(int64)
}
return result, nil
}
for i := 0; i < b.N; i++ {
From(func(input chan<- interface{}) {
for j := 0; j < 2; j++ {
input <- int64(j)
}
}).Map(mapper).Reduce(reducer)
}
}

11
core/fx/parallel.go Normal file
View File

@@ -0,0 +1,11 @@
package fx
import "zero/core/threading"
func Parallel(fns ...func()) {
group := threading.NewRoutineGroup()
for _, fn := range fns {
group.RunSafe(fn)
}
group.Wait()
}

24
core/fx/parallel_test.go Normal file
View File

@@ -0,0 +1,24 @@
package fx
import (
"sync/atomic"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestParallel(t *testing.T) {
var count int32
Parallel(func() {
time.Sleep(time.Millisecond * 100)
atomic.AddInt32(&count, 1)
}, func() {
time.Sleep(time.Millisecond * 100)
atomic.AddInt32(&count, 2)
}, func() {
time.Sleep(time.Millisecond * 100)
atomic.AddInt32(&count, 3)
})
assert.Equal(t, int32(6), count)
}

43
core/fx/retry.go Normal file
View File

@@ -0,0 +1,43 @@
package fx
import "zero/core/errorx"
const defaultRetryTimes = 3
type (
RetryOption func(*retryOptions)
retryOptions struct {
times int
}
)
func DoWithRetries(fn func() error, opts ...RetryOption) error {
var options = newRetryOptions()
for _, opt := range opts {
opt(options)
}
var berr errorx.BatchError
for i := 0; i < options.times; i++ {
if err := fn(); err != nil {
berr.Add(err)
} else {
return nil
}
}
return berr.Err()
}
func WithRetries(times int) RetryOption {
return func(options *retryOptions) {
options.times = times
}
}
func newRetryOptions() *retryOptions {
return &retryOptions{
times: defaultRetryTimes,
}
}

42
core/fx/retry_test.go Normal file
View File

@@ -0,0 +1,42 @@
package fx
import (
"errors"
"testing"
"github.com/stretchr/testify/assert"
)
func TestRetry(t *testing.T) {
assert.NotNil(t, DoWithRetries(func() error {
return errors.New("any")
}))
var times int
assert.Nil(t, DoWithRetries(func() error {
times++
if times == defaultRetryTimes {
return nil
}
return errors.New("any")
}))
times = 0
assert.NotNil(t, DoWithRetries(func() error {
times++
if times == defaultRetryTimes+1 {
return nil
}
return errors.New("any")
}))
var total = 2 * defaultRetryTimes
times = 0
assert.Nil(t, DoWithRetries(func() error {
times++
if times == total {
return nil
}
return errors.New("any")
}, WithRetries(total)))
}

49
core/fx/timeout.go Normal file
View File

@@ -0,0 +1,49 @@
package fx
import (
"context"
"time"
)
var (
ErrCanceled = context.Canceled
ErrTimeout = context.DeadlineExceeded
)
type FxOption func() context.Context
func DoWithTimeout(fn func() error, timeout time.Duration, opts ...FxOption) error {
parentCtx := context.Background()
for _, opt := range opts {
parentCtx = opt()
}
ctx, cancel := context.WithTimeout(parentCtx, timeout)
defer cancel()
done := make(chan error)
panicChan := make(chan interface{}, 1)
go func() {
defer func() {
if p := recover(); p != nil {
panicChan <- p
}
}()
done <- fn()
close(done)
}()
select {
case p := <-panicChan:
panic(p)
case err := <-done:
return err
case <-ctx.Done():
return ctx.Err()
}
}
func WithContext(ctx context.Context) FxOption {
return func() context.Context {
return ctx
}
}

43
core/fx/timeout_test.go Normal file
View File

@@ -0,0 +1,43 @@
package fx
import (
"context"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestWithPanic(t *testing.T) {
assert.Panics(t, func() {
_ = DoWithTimeout(func() error {
panic("hello")
}, time.Millisecond*50)
})
}
func TestWithTimeout(t *testing.T) {
assert.Equal(t, ErrTimeout, DoWithTimeout(func() error {
time.Sleep(time.Millisecond * 50)
return nil
}, time.Millisecond))
}
func TestWithoutTimeout(t *testing.T) {
assert.Nil(t, DoWithTimeout(func() error {
return nil
}, time.Millisecond*50))
}
func TestWithCancel(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
go func() {
time.Sleep(time.Millisecond * 10)
cancel()
}()
err := DoWithTimeout(func() error {
time.Sleep(time.Minute)
return nil
}, time.Second, WithContext(ctx))
assert.Equal(t, ErrCanceled, err)
}