initial import
This commit is contained in:
93
core/executors/bulkexecutor.go
Normal file
93
core/executors/bulkexecutor.go
Normal file
@@ -0,0 +1,93 @@
|
||||
package executors
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
const defaultBulkTasks = 1000
|
||||
|
||||
type (
|
||||
BulkOption func(options *bulkOptions)
|
||||
|
||||
BulkExecutor struct {
|
||||
executor *PeriodicalExecutor
|
||||
container *bulkContainer
|
||||
}
|
||||
|
||||
bulkOptions struct {
|
||||
cachedTasks int
|
||||
flushInterval time.Duration
|
||||
}
|
||||
)
|
||||
|
||||
func NewBulkExecutor(execute Execute, opts ...BulkOption) *BulkExecutor {
|
||||
options := newBulkOptions()
|
||||
for _, opt := range opts {
|
||||
opt(&options)
|
||||
}
|
||||
|
||||
container := &bulkContainer{
|
||||
execute: execute,
|
||||
maxTasks: options.cachedTasks,
|
||||
}
|
||||
executor := &BulkExecutor{
|
||||
executor: NewPeriodicalExecutor(options.flushInterval, container),
|
||||
container: container,
|
||||
}
|
||||
|
||||
return executor
|
||||
}
|
||||
|
||||
func (be *BulkExecutor) Add(task interface{}) error {
|
||||
be.executor.Add(task)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (be *BulkExecutor) Flush() {
|
||||
be.executor.Flush()
|
||||
}
|
||||
|
||||
func (be *BulkExecutor) Wait() {
|
||||
be.executor.Wait()
|
||||
}
|
||||
|
||||
func WithBulkTasks(tasks int) BulkOption {
|
||||
return func(options *bulkOptions) {
|
||||
options.cachedTasks = tasks
|
||||
}
|
||||
}
|
||||
|
||||
func WithBulkInterval(duration time.Duration) BulkOption {
|
||||
return func(options *bulkOptions) {
|
||||
options.flushInterval = duration
|
||||
}
|
||||
}
|
||||
|
||||
func newBulkOptions() bulkOptions {
|
||||
return bulkOptions{
|
||||
cachedTasks: defaultBulkTasks,
|
||||
flushInterval: defaultFlushInterval,
|
||||
}
|
||||
}
|
||||
|
||||
type bulkContainer struct {
|
||||
tasks []interface{}
|
||||
execute Execute
|
||||
maxTasks int
|
||||
}
|
||||
|
||||
func (bc *bulkContainer) AddTask(task interface{}) bool {
|
||||
bc.tasks = append(bc.tasks, task)
|
||||
return len(bc.tasks) >= bc.maxTasks
|
||||
}
|
||||
|
||||
func (bc *bulkContainer) Execute(tasks interface{}) {
|
||||
vals := tasks.([]interface{})
|
||||
bc.execute(vals)
|
||||
}
|
||||
|
||||
func (bc *bulkContainer) RemoveAll() interface{} {
|
||||
tasks := bc.tasks
|
||||
bc.tasks = nil
|
||||
return tasks
|
||||
}
|
||||
113
core/executors/bulkexecutor_test.go
Normal file
113
core/executors/bulkexecutor_test.go
Normal file
@@ -0,0 +1,113 @@
|
||||
package executors
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestBulkExecutor(t *testing.T) {
|
||||
var values []int
|
||||
var lock sync.Mutex
|
||||
|
||||
exeutor := NewBulkExecutor(func(items []interface{}) {
|
||||
lock.Lock()
|
||||
values = append(values, len(items))
|
||||
lock.Unlock()
|
||||
}, WithBulkTasks(10), WithBulkInterval(time.Minute))
|
||||
|
||||
for i := 0; i < 50; i++ {
|
||||
exeutor.Add(1)
|
||||
time.Sleep(time.Millisecond)
|
||||
}
|
||||
|
||||
lock.Lock()
|
||||
assert.True(t, len(values) > 0)
|
||||
// ignore last value
|
||||
for i := 0; i < len(values); i++ {
|
||||
assert.Equal(t, 10, values[i])
|
||||
}
|
||||
lock.Unlock()
|
||||
}
|
||||
|
||||
func TestBulkExecutorFlushInterval(t *testing.T) {
|
||||
const (
|
||||
caches = 10
|
||||
size = 5
|
||||
)
|
||||
var wait sync.WaitGroup
|
||||
|
||||
wait.Add(1)
|
||||
exeutor := NewBulkExecutor(func(items []interface{}) {
|
||||
assert.Equal(t, size, len(items))
|
||||
wait.Done()
|
||||
}, WithBulkTasks(caches), WithBulkInterval(time.Millisecond*100))
|
||||
|
||||
for i := 0; i < size; i++ {
|
||||
exeutor.Add(1)
|
||||
}
|
||||
|
||||
wait.Wait()
|
||||
}
|
||||
|
||||
func TestBulkExecutorEmpty(t *testing.T) {
|
||||
NewBulkExecutor(func(items []interface{}) {
|
||||
assert.Fail(t, "should not called")
|
||||
}, WithBulkTasks(10), WithBulkInterval(time.Millisecond))
|
||||
time.Sleep(time.Millisecond * 100)
|
||||
}
|
||||
|
||||
func TestBulkExecutorFlush(t *testing.T) {
|
||||
const (
|
||||
caches = 10
|
||||
tasks = 5
|
||||
)
|
||||
|
||||
var wait sync.WaitGroup
|
||||
wait.Add(1)
|
||||
be := NewBulkExecutor(func(items []interface{}) {
|
||||
assert.Equal(t, tasks, len(items))
|
||||
wait.Done()
|
||||
}, WithBulkTasks(caches), WithBulkInterval(time.Minute))
|
||||
for i := 0; i < tasks; i++ {
|
||||
be.Add(1)
|
||||
}
|
||||
be.Flush()
|
||||
wait.Wait()
|
||||
}
|
||||
|
||||
func TestBuldExecutorFlushSlowTasks(t *testing.T) {
|
||||
const total = 1500
|
||||
lock := new(sync.Mutex)
|
||||
result := make([]interface{}, 0, 10000)
|
||||
exec := NewBulkExecutor(func(tasks []interface{}) {
|
||||
time.Sleep(time.Millisecond * 100)
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
for _, i := range tasks {
|
||||
result = append(result, i)
|
||||
}
|
||||
}, WithBulkTasks(1000))
|
||||
for i := 0; i < total; i++ {
|
||||
assert.Nil(t, exec.Add(i))
|
||||
}
|
||||
|
||||
exec.Flush()
|
||||
exec.Wait()
|
||||
assert.Equal(t, total, len(result))
|
||||
}
|
||||
|
||||
func BenchmarkBulkExecutor(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
|
||||
be := NewBulkExecutor(func(tasks []interface{}) {
|
||||
time.Sleep(time.Millisecond * time.Duration(len(tasks)))
|
||||
})
|
||||
for i := 0; i < b.N; i++ {
|
||||
time.Sleep(time.Microsecond * 200)
|
||||
be.Add(1)
|
||||
}
|
||||
be.Flush()
|
||||
}
|
||||
103
core/executors/chunkexecutor.go
Normal file
103
core/executors/chunkexecutor.go
Normal file
@@ -0,0 +1,103 @@
|
||||
package executors
|
||||
|
||||
import "time"
|
||||
|
||||
const defaultChunkSize = 1024 * 1024 // 1M
|
||||
|
||||
type (
|
||||
ChunkOption func(options *chunkOptions)
|
||||
|
||||
ChunkExecutor struct {
|
||||
executor *PeriodicalExecutor
|
||||
container *chunkContainer
|
||||
}
|
||||
|
||||
chunkOptions struct {
|
||||
chunkSize int
|
||||
flushInterval time.Duration
|
||||
}
|
||||
)
|
||||
|
||||
func NewChunkExecutor(execute Execute, opts ...ChunkOption) *ChunkExecutor {
|
||||
options := newChunkOptions()
|
||||
for _, opt := range opts {
|
||||
opt(&options)
|
||||
}
|
||||
|
||||
container := &chunkContainer{
|
||||
execute: execute,
|
||||
maxChunkSize: options.chunkSize,
|
||||
}
|
||||
executor := &ChunkExecutor{
|
||||
executor: NewPeriodicalExecutor(options.flushInterval, container),
|
||||
container: container,
|
||||
}
|
||||
|
||||
return executor
|
||||
}
|
||||
|
||||
func (ce *ChunkExecutor) Add(task interface{}, size int) error {
|
||||
ce.executor.Add(chunk{
|
||||
val: task,
|
||||
size: size,
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ce *ChunkExecutor) Flush() {
|
||||
ce.executor.Flush()
|
||||
}
|
||||
|
||||
func (ce *ChunkExecutor) Wait() {
|
||||
ce.executor.Wait()
|
||||
}
|
||||
|
||||
func WithChunkBytes(size int) ChunkOption {
|
||||
return func(options *chunkOptions) {
|
||||
options.chunkSize = size
|
||||
}
|
||||
}
|
||||
|
||||
func WithFlushInterval(duration time.Duration) ChunkOption {
|
||||
return func(options *chunkOptions) {
|
||||
options.flushInterval = duration
|
||||
}
|
||||
}
|
||||
|
||||
func newChunkOptions() chunkOptions {
|
||||
return chunkOptions{
|
||||
chunkSize: defaultChunkSize,
|
||||
flushInterval: defaultFlushInterval,
|
||||
}
|
||||
}
|
||||
|
||||
type chunkContainer struct {
|
||||
tasks []interface{}
|
||||
execute Execute
|
||||
size int
|
||||
maxChunkSize int
|
||||
}
|
||||
|
||||
func (bc *chunkContainer) AddTask(task interface{}) bool {
|
||||
ck := task.(chunk)
|
||||
bc.tasks = append(bc.tasks, ck.val)
|
||||
bc.size += ck.size
|
||||
return bc.size >= bc.maxChunkSize
|
||||
}
|
||||
|
||||
func (bc *chunkContainer) Execute(tasks interface{}) {
|
||||
vals := tasks.([]interface{})
|
||||
bc.execute(vals)
|
||||
}
|
||||
|
||||
func (bc *chunkContainer) RemoveAll() interface{} {
|
||||
tasks := bc.tasks
|
||||
bc.tasks = nil
|
||||
bc.size = 0
|
||||
return tasks
|
||||
}
|
||||
|
||||
type chunk struct {
|
||||
val interface{}
|
||||
size int
|
||||
}
|
||||
92
core/executors/chunkexecutor_test.go
Normal file
92
core/executors/chunkexecutor_test.go
Normal file
@@ -0,0 +1,92 @@
|
||||
package executors
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestChunkExecutor(t *testing.T) {
|
||||
var values []int
|
||||
var lock sync.Mutex
|
||||
|
||||
exeutor := NewChunkExecutor(func(items []interface{}) {
|
||||
lock.Lock()
|
||||
values = append(values, len(items))
|
||||
lock.Unlock()
|
||||
}, WithChunkBytes(10), WithFlushInterval(time.Minute))
|
||||
|
||||
for i := 0; i < 50; i++ {
|
||||
exeutor.Add(1, 1)
|
||||
time.Sleep(time.Millisecond)
|
||||
}
|
||||
|
||||
lock.Lock()
|
||||
assert.True(t, len(values) > 0)
|
||||
// ignore last value
|
||||
for i := 0; i < len(values); i++ {
|
||||
assert.Equal(t, 10, values[i])
|
||||
}
|
||||
lock.Unlock()
|
||||
}
|
||||
|
||||
func TestChunkExecutorFlushInterval(t *testing.T) {
|
||||
const (
|
||||
caches = 10
|
||||
size = 5
|
||||
)
|
||||
var wait sync.WaitGroup
|
||||
|
||||
wait.Add(1)
|
||||
exeutor := NewChunkExecutor(func(items []interface{}) {
|
||||
assert.Equal(t, size, len(items))
|
||||
wait.Done()
|
||||
}, WithChunkBytes(caches), WithFlushInterval(time.Millisecond*100))
|
||||
|
||||
for i := 0; i < size; i++ {
|
||||
exeutor.Add(1, 1)
|
||||
}
|
||||
|
||||
wait.Wait()
|
||||
}
|
||||
|
||||
func TestChunkExecutorEmpty(t *testing.T) {
|
||||
NewChunkExecutor(func(items []interface{}) {
|
||||
assert.Fail(t, "should not called")
|
||||
}, WithChunkBytes(10), WithFlushInterval(time.Millisecond))
|
||||
time.Sleep(time.Millisecond * 100)
|
||||
}
|
||||
|
||||
func TestChunkExecutorFlush(t *testing.T) {
|
||||
const (
|
||||
caches = 10
|
||||
tasks = 5
|
||||
)
|
||||
|
||||
var wait sync.WaitGroup
|
||||
wait.Add(1)
|
||||
be := NewChunkExecutor(func(items []interface{}) {
|
||||
assert.Equal(t, tasks, len(items))
|
||||
wait.Done()
|
||||
}, WithChunkBytes(caches), WithFlushInterval(time.Minute))
|
||||
for i := 0; i < tasks; i++ {
|
||||
be.Add(1, 1)
|
||||
}
|
||||
be.Flush()
|
||||
wait.Wait()
|
||||
}
|
||||
|
||||
func BenchmarkChunkExecutor(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
|
||||
be := NewChunkExecutor(func(tasks []interface{}) {
|
||||
time.Sleep(time.Millisecond * time.Duration(len(tasks)))
|
||||
})
|
||||
for i := 0; i < b.N; i++ {
|
||||
time.Sleep(time.Microsecond * 200)
|
||||
be.Add(1, 1)
|
||||
}
|
||||
be.Flush()
|
||||
}
|
||||
44
core/executors/delayexecutor.go
Normal file
44
core/executors/delayexecutor.go
Normal file
@@ -0,0 +1,44 @@
|
||||
package executors
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"zero/core/threading"
|
||||
)
|
||||
|
||||
type DelayExecutor struct {
|
||||
fn func()
|
||||
delay time.Duration
|
||||
triggered bool
|
||||
lock sync.Mutex
|
||||
}
|
||||
|
||||
func NewDelayExecutor(fn func(), delay time.Duration) *DelayExecutor {
|
||||
return &DelayExecutor{
|
||||
fn: fn,
|
||||
delay: delay,
|
||||
}
|
||||
}
|
||||
|
||||
func (de *DelayExecutor) Trigger() {
|
||||
de.lock.Lock()
|
||||
defer de.lock.Unlock()
|
||||
|
||||
if de.triggered {
|
||||
return
|
||||
}
|
||||
|
||||
de.triggered = true
|
||||
threading.GoSafe(func() {
|
||||
timer := time.NewTimer(de.delay)
|
||||
defer timer.Stop()
|
||||
<-timer.C
|
||||
|
||||
// set triggered to false before calling fn to ensure no triggers are missed.
|
||||
de.lock.Lock()
|
||||
de.triggered = false
|
||||
de.lock.Unlock()
|
||||
de.fn()
|
||||
})
|
||||
}
|
||||
21
core/executors/delayexecutor_test.go
Normal file
21
core/executors/delayexecutor_test.go
Normal file
@@ -0,0 +1,21 @@
|
||||
package executors
|
||||
|
||||
import (
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestDelayExecutor(t *testing.T) {
|
||||
var count int32
|
||||
ex := NewDelayExecutor(func() {
|
||||
atomic.AddInt32(&count, 1)
|
||||
}, time.Millisecond*10)
|
||||
for i := 0; i < 100; i++ {
|
||||
ex.Trigger()
|
||||
}
|
||||
time.Sleep(time.Millisecond * 100)
|
||||
assert.Equal(t, int32(1), atomic.LoadInt32(&count))
|
||||
}
|
||||
32
core/executors/lessexecutor.go
Normal file
32
core/executors/lessexecutor.go
Normal file
@@ -0,0 +1,32 @@
|
||||
package executors
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"zero/core/syncx"
|
||||
"zero/core/timex"
|
||||
)
|
||||
|
||||
type LessExecutor struct {
|
||||
threshold time.Duration
|
||||
lastTime *syncx.AtomicDuration
|
||||
}
|
||||
|
||||
func NewLessExecutor(threshold time.Duration) *LessExecutor {
|
||||
return &LessExecutor{
|
||||
threshold: threshold,
|
||||
lastTime: syncx.NewAtomicDuration(),
|
||||
}
|
||||
}
|
||||
|
||||
func (le *LessExecutor) DoOrDiscard(execute func()) bool {
|
||||
now := timex.Now()
|
||||
lastTime := le.lastTime.Load()
|
||||
if lastTime == 0 || lastTime+le.threshold < now {
|
||||
le.lastTime.Set(now)
|
||||
execute()
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
27
core/executors/lessexecutor_test.go
Normal file
27
core/executors/lessexecutor_test.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package executors
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"zero/core/timex"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestLessExecutor_DoOrDiscard(t *testing.T) {
|
||||
executor := NewLessExecutor(time.Minute)
|
||||
assert.True(t, executor.DoOrDiscard(func() {}))
|
||||
assert.False(t, executor.DoOrDiscard(func() {}))
|
||||
executor.lastTime.Set(timex.Now() - time.Minute - time.Second*30)
|
||||
assert.True(t, executor.DoOrDiscard(func() {}))
|
||||
assert.False(t, executor.DoOrDiscard(func() {}))
|
||||
}
|
||||
|
||||
func BenchmarkLessExecutor(b *testing.B) {
|
||||
exec := NewLessExecutor(time.Millisecond)
|
||||
for i := 0; i < b.N; i++ {
|
||||
exec.DoOrDiscard(func() {
|
||||
})
|
||||
}
|
||||
}
|
||||
158
core/executors/periodicalexecutor.go
Normal file
158
core/executors/periodicalexecutor.go
Normal file
@@ -0,0 +1,158 @@
|
||||
package executors
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"zero/core/proc"
|
||||
"zero/core/threading"
|
||||
"zero/core/timex"
|
||||
)
|
||||
|
||||
const idleRound = 10
|
||||
|
||||
type (
|
||||
// A type that satisfies executors.TaskContainer can be used as the underlying
|
||||
// container that used to do periodical executions.
|
||||
TaskContainer interface {
|
||||
// AddTask adds the task into the container.
|
||||
// Returns true if the container needs to be flushed after the addition.
|
||||
AddTask(task interface{}) bool
|
||||
// Execute handles the collected tasks by the container when flushing.
|
||||
Execute(tasks interface{})
|
||||
// RemoveAll removes the contained tasks, and return them.
|
||||
RemoveAll() interface{}
|
||||
}
|
||||
|
||||
PeriodicalExecutor struct {
|
||||
commander chan interface{}
|
||||
interval time.Duration
|
||||
container TaskContainer
|
||||
waitGroup sync.WaitGroup
|
||||
guarded bool
|
||||
newTicker func(duration time.Duration) timex.Ticker
|
||||
lock sync.Mutex
|
||||
}
|
||||
)
|
||||
|
||||
func NewPeriodicalExecutor(interval time.Duration, container TaskContainer) *PeriodicalExecutor {
|
||||
executor := &PeriodicalExecutor{
|
||||
// buffer 1 to let the caller go quickly
|
||||
commander: make(chan interface{}, 1),
|
||||
interval: interval,
|
||||
container: container,
|
||||
newTicker: func(d time.Duration) timex.Ticker {
|
||||
return timex.NewTicker(interval)
|
||||
},
|
||||
}
|
||||
proc.AddShutdownListener(func() {
|
||||
executor.Flush()
|
||||
})
|
||||
|
||||
return executor
|
||||
}
|
||||
|
||||
func (pe *PeriodicalExecutor) Add(task interface{}) {
|
||||
if vals, ok := pe.addAndCheck(task); ok {
|
||||
pe.commander <- vals
|
||||
}
|
||||
}
|
||||
|
||||
func (pe *PeriodicalExecutor) Flush() bool {
|
||||
return pe.executeTasks(func() interface{} {
|
||||
pe.lock.Lock()
|
||||
defer pe.lock.Unlock()
|
||||
return pe.container.RemoveAll()
|
||||
}())
|
||||
}
|
||||
|
||||
func (pe *PeriodicalExecutor) Sync(fn func()) {
|
||||
pe.lock.Lock()
|
||||
defer pe.lock.Unlock()
|
||||
fn()
|
||||
}
|
||||
|
||||
func (pe *PeriodicalExecutor) Wait() {
|
||||
pe.waitGroup.Wait()
|
||||
}
|
||||
|
||||
func (pe *PeriodicalExecutor) addAndCheck(task interface{}) (interface{}, bool) {
|
||||
pe.lock.Lock()
|
||||
defer func() {
|
||||
var start bool
|
||||
if !pe.guarded {
|
||||
pe.guarded = true
|
||||
start = true
|
||||
}
|
||||
pe.lock.Unlock()
|
||||
if start {
|
||||
pe.backgroundFlush()
|
||||
}
|
||||
}()
|
||||
|
||||
if pe.container.AddTask(task) {
|
||||
return pe.container.RemoveAll(), true
|
||||
}
|
||||
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func (pe *PeriodicalExecutor) backgroundFlush() {
|
||||
threading.GoSafe(func() {
|
||||
ticker := pe.newTicker(pe.interval)
|
||||
defer ticker.Stop()
|
||||
|
||||
var commanded bool
|
||||
last := timex.Now()
|
||||
for {
|
||||
select {
|
||||
case vals := <-pe.commander:
|
||||
commanded = true
|
||||
pe.executeTasks(vals)
|
||||
last = timex.Now()
|
||||
case <-ticker.Chan():
|
||||
if commanded {
|
||||
commanded = false
|
||||
} else if pe.Flush() {
|
||||
last = timex.Now()
|
||||
} else if timex.Since(last) > pe.interval*idleRound {
|
||||
pe.lock.Lock()
|
||||
pe.guarded = false
|
||||
pe.lock.Unlock()
|
||||
|
||||
// flush again to avoid missing tasks
|
||||
pe.Flush()
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func (pe *PeriodicalExecutor) executeTasks(tasks interface{}) bool {
|
||||
pe.waitGroup.Add(1)
|
||||
defer pe.waitGroup.Done()
|
||||
|
||||
ok := pe.hasTasks(tasks)
|
||||
if ok {
|
||||
pe.container.Execute(tasks)
|
||||
}
|
||||
|
||||
return ok
|
||||
}
|
||||
|
||||
func (pe *PeriodicalExecutor) hasTasks(tasks interface{}) bool {
|
||||
if tasks == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
val := reflect.ValueOf(tasks)
|
||||
switch val.Kind() {
|
||||
case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice:
|
||||
return val.Len() > 0
|
||||
default:
|
||||
// unknown type, let caller execute it
|
||||
return true
|
||||
}
|
||||
}
|
||||
118
core/executors/periodicalexecutor_test.go
Normal file
118
core/executors/periodicalexecutor_test.go
Normal file
@@ -0,0 +1,118 @@
|
||||
package executors
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"zero/core/timex"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
const threshold = 10
|
||||
|
||||
type container struct {
|
||||
interval time.Duration
|
||||
tasks []int
|
||||
execute func(tasks interface{})
|
||||
}
|
||||
|
||||
func newContainer(interval time.Duration, execute func(tasks interface{})) *container {
|
||||
return &container{
|
||||
interval: interval,
|
||||
execute: execute,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *container) AddTask(task interface{}) bool {
|
||||
c.tasks = append(c.tasks, task.(int))
|
||||
return len(c.tasks) > threshold
|
||||
}
|
||||
|
||||
func (c *container) Execute(tasks interface{}) {
|
||||
if c.execute != nil {
|
||||
c.execute(tasks)
|
||||
} else {
|
||||
time.Sleep(c.interval)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *container) RemoveAll() interface{} {
|
||||
tasks := c.tasks
|
||||
c.tasks = nil
|
||||
return tasks
|
||||
}
|
||||
|
||||
func TestPeriodicalExecutor_Sync(t *testing.T) {
|
||||
var done int32
|
||||
exec := NewPeriodicalExecutor(time.Second, newContainer(time.Millisecond*500, nil))
|
||||
exec.Sync(func() {
|
||||
atomic.AddInt32(&done, 1)
|
||||
})
|
||||
assert.Equal(t, int32(1), atomic.LoadInt32(&done))
|
||||
}
|
||||
|
||||
func TestPeriodicalExecutor_QuitGoroutine(t *testing.T) {
|
||||
ticker := timex.NewFakeTicker()
|
||||
exec := NewPeriodicalExecutor(time.Millisecond, newContainer(time.Millisecond, nil))
|
||||
exec.newTicker = func(d time.Duration) timex.Ticker {
|
||||
return ticker
|
||||
}
|
||||
routines := runtime.NumGoroutine()
|
||||
exec.Add(1)
|
||||
ticker.Tick()
|
||||
ticker.Wait(time.Millisecond * idleRound * 2)
|
||||
ticker.Tick()
|
||||
ticker.Wait(time.Millisecond * idleRound)
|
||||
assert.Equal(t, routines, runtime.NumGoroutine())
|
||||
}
|
||||
|
||||
func TestPeriodicalExecutor_Bulk(t *testing.T) {
|
||||
ticker := timex.NewFakeTicker()
|
||||
var vals []int
|
||||
// avoid data race
|
||||
var lock sync.Mutex
|
||||
exec := NewPeriodicalExecutor(time.Millisecond, newContainer(time.Millisecond, func(tasks interface{}) {
|
||||
t := tasks.([]int)
|
||||
for _, each := range t {
|
||||
lock.Lock()
|
||||
vals = append(vals, each)
|
||||
lock.Unlock()
|
||||
}
|
||||
}))
|
||||
exec.newTicker = func(d time.Duration) timex.Ticker {
|
||||
return ticker
|
||||
}
|
||||
for i := 0; i < threshold*10; i++ {
|
||||
if i%threshold == 5 {
|
||||
time.Sleep(time.Millisecond * idleRound * 2)
|
||||
}
|
||||
exec.Add(i)
|
||||
}
|
||||
ticker.Tick()
|
||||
ticker.Wait(time.Millisecond * idleRound * 2)
|
||||
ticker.Tick()
|
||||
ticker.Tick()
|
||||
ticker.Wait(time.Millisecond * idleRound)
|
||||
var expect []int
|
||||
for i := 0; i < threshold*10; i++ {
|
||||
expect = append(expect, i)
|
||||
}
|
||||
|
||||
lock.Lock()
|
||||
assert.EqualValues(t, expect, vals)
|
||||
lock.Unlock()
|
||||
}
|
||||
|
||||
// go test -benchtime 10s -bench .
|
||||
func BenchmarkExecutor(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
|
||||
executor := NewPeriodicalExecutor(time.Second, newContainer(time.Millisecond*500, nil))
|
||||
for i := 0; i < b.N; i++ {
|
||||
executor.Add(1)
|
||||
}
|
||||
}
|
||||
7
core/executors/vars.go
Normal file
7
core/executors/vars.go
Normal file
@@ -0,0 +1,7 @@
|
||||
package executors
|
||||
|
||||
import "time"
|
||||
|
||||
const defaultFlushInterval = time.Second
|
||||
|
||||
type Execute func(tasks []interface{})
|
||||
Reference in New Issue
Block a user