refactor
This commit is contained in:
19
rq/internal/conf.go
Normal file
19
rq/internal/conf.go
Normal file
@@ -0,0 +1,19 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"zero/core/queue"
|
||||
"zero/core/stores/redis"
|
||||
)
|
||||
|
||||
type RedisKeyConf struct {
|
||||
redis.RedisConf
|
||||
Key string `json:",optional"`
|
||||
}
|
||||
|
||||
func (rkc RedisKeyConf) NewProducer(opts ...ProducerOption) (queue.Producer, error) {
|
||||
return newProducer(rkc.NewRedis(), rkc.Key, opts...)
|
||||
}
|
||||
|
||||
func (rkc RedisKeyConf) NewPusher(opts ...PusherOption) queue.QueuePusher {
|
||||
return NewPusher(rkc.NewRedis(), rkc.Key, opts...)
|
||||
}
|
||||
7
rq/internal/const.go
Normal file
7
rq/internal/const.go
Normal file
@@ -0,0 +1,7 @@
|
||||
package internal
|
||||
|
||||
const (
|
||||
Delimeter = "/"
|
||||
ServerSensitivePrefix = '*'
|
||||
TimedQueueType = "timed"
|
||||
)
|
||||
39
rq/internal/hashchange.go
Normal file
39
rq/internal/hashchange.go
Normal file
@@ -0,0 +1,39 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
|
||||
"zero/core/hash"
|
||||
)
|
||||
|
||||
type HashChange struct {
|
||||
id int64
|
||||
oldHash *hash.ConsistentHash
|
||||
newHash *hash.ConsistentHash
|
||||
}
|
||||
|
||||
func NewHashChange(oldHash, newHash *hash.ConsistentHash) HashChange {
|
||||
return HashChange{
|
||||
id: rand.Int63(),
|
||||
oldHash: oldHash,
|
||||
newHash: newHash,
|
||||
}
|
||||
}
|
||||
|
||||
func (hc HashChange) GetId() int64 {
|
||||
return hc.id
|
||||
}
|
||||
|
||||
func (hc HashChange) ShallEvict(key interface{}) bool {
|
||||
oldTarget, oldOk := hc.oldHash.Get(key)
|
||||
if !oldOk {
|
||||
return false
|
||||
}
|
||||
|
||||
newTarget, newOk := hc.newHash.Get(key)
|
||||
if !newOk {
|
||||
return false
|
||||
}
|
||||
|
||||
return oldTarget != newTarget
|
||||
}
|
||||
6
rq/internal/message.go
Normal file
6
rq/internal/message.go
Normal file
@@ -0,0 +1,6 @@
|
||||
package internal
|
||||
|
||||
type TimedMessage struct {
|
||||
Time int64 `json:"time"`
|
||||
Payload string `json:"payload"`
|
||||
}
|
||||
82
rq/internal/redisqueue_test.go
Normal file
82
rq/internal/redisqueue_test.go
Normal file
@@ -0,0 +1,82 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"zero/core/logx"
|
||||
"zero/core/queue"
|
||||
"zero/core/stores/redis"
|
||||
|
||||
"github.com/alicebob/miniredis"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func init() {
|
||||
logx.Disable()
|
||||
}
|
||||
|
||||
func TestRedisQueue(t *testing.T) {
|
||||
const (
|
||||
total = 1000
|
||||
key = "queue"
|
||||
)
|
||||
r, err := miniredis.Run()
|
||||
assert.Nil(t, err)
|
||||
|
||||
c := RedisKeyConf{
|
||||
RedisConf: redis.RedisConf{
|
||||
Host: r.Addr(),
|
||||
Type: redis.NodeType,
|
||||
},
|
||||
Key: key,
|
||||
}
|
||||
|
||||
pusher := NewPusher(c.NewRedis(), key, WithTime())
|
||||
assert.True(t, len(pusher.Name()) > 0)
|
||||
for i := 0; i < total; i++ {
|
||||
err := pusher.Push(strconv.Itoa(i))
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
consumer := new(mockedConsumer)
|
||||
consumer.wait.Add(total)
|
||||
q := queue.NewQueue(func() (queue.Producer, error) {
|
||||
return c.NewProducer(TimeSensitive(5))
|
||||
}, func() (queue.Consumer, error) {
|
||||
return consumer, nil
|
||||
})
|
||||
q.SetNumProducer(1)
|
||||
q.SetNumConsumer(1)
|
||||
go func() {
|
||||
q.Start()
|
||||
}()
|
||||
consumer.wait.Wait()
|
||||
q.Stop()
|
||||
|
||||
var expect int
|
||||
for i := 0; i < total; i++ {
|
||||
expect ^= i
|
||||
}
|
||||
assert.Equal(t, expect, consumer.xor)
|
||||
}
|
||||
|
||||
type mockedConsumer struct {
|
||||
wait sync.WaitGroup
|
||||
xor int
|
||||
}
|
||||
|
||||
func (c *mockedConsumer) Consume(s string) error {
|
||||
val, err := strconv.Atoi(s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.xor ^= val
|
||||
c.wait.Done()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *mockedConsumer) OnEvent(event interface{}) {
|
||||
}
|
||||
166
rq/internal/redisqueueproducer.go
Normal file
166
rq/internal/redisqueueproducer.go
Normal file
@@ -0,0 +1,166 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"zero/core/jsonx"
|
||||
"zero/core/logx"
|
||||
"zero/core/queue"
|
||||
"zero/core/stores/redis"
|
||||
)
|
||||
|
||||
const (
|
||||
logIntervalMillis = 1000
|
||||
retryRedisInterval = time.Second
|
||||
)
|
||||
|
||||
type (
|
||||
ProducerOption func(p queue.Producer) queue.Producer
|
||||
|
||||
RedisQueueProducer struct {
|
||||
name string
|
||||
store *redis.Redis
|
||||
key string
|
||||
redisNode redis.ClosableNode
|
||||
listeners []queue.ProduceListener
|
||||
}
|
||||
)
|
||||
|
||||
func NewProducerFactory(store *redis.Redis, key string, opts ...ProducerOption) queue.ProducerFactory {
|
||||
return func() (queue.Producer, error) {
|
||||
return newProducer(store, key, opts...)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *RedisQueueProducer) AddListener(listener queue.ProduceListener) {
|
||||
p.listeners = append(p.listeners, listener)
|
||||
}
|
||||
|
||||
func (p *RedisQueueProducer) Name() string {
|
||||
return p.name
|
||||
}
|
||||
|
||||
func (p *RedisQueueProducer) Produce() (string, bool) {
|
||||
lessLogger := logx.NewLessLogger(logIntervalMillis)
|
||||
|
||||
for {
|
||||
value, ok, err := p.store.BlpopEx(p.redisNode, p.key)
|
||||
if err == nil {
|
||||
return value, ok
|
||||
} else if err == redis.Nil {
|
||||
// timed out without elements popped
|
||||
continue
|
||||
} else {
|
||||
lessLogger.Errorf("Error on blpop: %v", err)
|
||||
p.waitForRedisAvailable()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func newProducer(store *redis.Redis, key string, opts ...ProducerOption) (queue.Producer, error) {
|
||||
redisNode, err := redis.CreateBlockingNode(store)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var producer queue.Producer = &RedisQueueProducer{
|
||||
name: fmt.Sprintf("%s/%s/%s", store.Type, store.Addr, key),
|
||||
store: store,
|
||||
key: key,
|
||||
redisNode: redisNode,
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
producer = opt(producer)
|
||||
}
|
||||
|
||||
return producer, nil
|
||||
}
|
||||
|
||||
func (p *RedisQueueProducer) resetRedisConnection() error {
|
||||
if p.redisNode != nil {
|
||||
p.redisNode.Close()
|
||||
p.redisNode = nil
|
||||
}
|
||||
|
||||
redisNode, err := redis.CreateBlockingNode(p.store)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
p.redisNode = redisNode
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *RedisQueueProducer) waitForRedisAvailable() {
|
||||
var paused bool
|
||||
var pauseOnce sync.Once
|
||||
|
||||
for {
|
||||
if err := p.resetRedisConnection(); err != nil {
|
||||
pauseOnce.Do(func() {
|
||||
paused = true
|
||||
for _, listener := range p.listeners {
|
||||
listener.OnProducerPause()
|
||||
}
|
||||
})
|
||||
logx.Errorf("Error occurred while connect to redis: %v", err)
|
||||
time.Sleep(retryRedisInterval)
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if paused {
|
||||
for _, listener := range p.listeners {
|
||||
listener.OnProducerResume()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TimeSensitive(seconds int64) ProducerOption {
|
||||
return func(p queue.Producer) queue.Producer {
|
||||
if seconds > 0 {
|
||||
return autoDropQueueProducer{
|
||||
seconds: seconds,
|
||||
producer: p,
|
||||
}
|
||||
}
|
||||
|
||||
return p
|
||||
}
|
||||
}
|
||||
|
||||
type autoDropQueueProducer struct {
|
||||
seconds int64 // seconds before to drop
|
||||
producer queue.Producer
|
||||
}
|
||||
|
||||
func (p autoDropQueueProducer) AddListener(listener queue.ProduceListener) {
|
||||
p.producer.AddListener(listener)
|
||||
}
|
||||
|
||||
func (p autoDropQueueProducer) Produce() (string, bool) {
|
||||
lessLogger := logx.NewLessLogger(logIntervalMillis)
|
||||
|
||||
for {
|
||||
content, ok := p.producer.Produce()
|
||||
if !ok {
|
||||
return "", false
|
||||
}
|
||||
|
||||
var timedMsg TimedMessage
|
||||
if err := jsonx.UnmarshalFromString(content, &timedMsg); err != nil {
|
||||
lessLogger.Errorf("invalid timedMessage: %s, error: %s", content, err.Error())
|
||||
continue
|
||||
}
|
||||
|
||||
if timedMsg.Time+p.seconds < time.Now().Unix() {
|
||||
lessLogger.Errorf("expired timedMessage: %s", content)
|
||||
}
|
||||
|
||||
return timedMsg.Payload, true
|
||||
}
|
||||
}
|
||||
78
rq/internal/redisqueuepusher.go
Normal file
78
rq/internal/redisqueuepusher.go
Normal file
@@ -0,0 +1,78 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"zero/core/jsonx"
|
||||
"zero/core/logx"
|
||||
"zero/core/queue"
|
||||
"zero/core/stores/redis"
|
||||
)
|
||||
|
||||
type (
|
||||
PusherOption func(p queue.QueuePusher) queue.QueuePusher
|
||||
|
||||
RedisQueuePusher struct {
|
||||
name string
|
||||
store *redis.Redis
|
||||
key string
|
||||
}
|
||||
)
|
||||
|
||||
func NewPusher(store *redis.Redis, key string, opts ...PusherOption) queue.QueuePusher {
|
||||
var pusher queue.QueuePusher = &RedisQueuePusher{
|
||||
name: fmt.Sprintf("%s/%s/%s", store.Type, store.Addr, key),
|
||||
store: store,
|
||||
key: key,
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
pusher = opt(pusher)
|
||||
}
|
||||
|
||||
return pusher
|
||||
}
|
||||
|
||||
func (saver *RedisQueuePusher) Name() string {
|
||||
return saver.name
|
||||
}
|
||||
|
||||
func (saver *RedisQueuePusher) Push(message string) error {
|
||||
_, err := saver.store.Rpush(saver.key, message)
|
||||
if nil != err {
|
||||
return err
|
||||
}
|
||||
|
||||
logx.Infof("<= %s", message)
|
||||
return nil
|
||||
}
|
||||
|
||||
func WithTime() PusherOption {
|
||||
return func(p queue.QueuePusher) queue.QueuePusher {
|
||||
return timedQueuePusher{
|
||||
pusher: p,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type timedQueuePusher struct {
|
||||
pusher queue.QueuePusher
|
||||
}
|
||||
|
||||
func (p timedQueuePusher) Name() string {
|
||||
return p.pusher.Name()
|
||||
}
|
||||
|
||||
func (p timedQueuePusher) Push(message string) error {
|
||||
tm := TimedMessage{
|
||||
Time: time.Now().Unix(),
|
||||
Payload: message,
|
||||
}
|
||||
|
||||
if content, err := jsonx.Marshal(tm); err != nil {
|
||||
return err
|
||||
} else {
|
||||
return p.pusher.Push(string(content))
|
||||
}
|
||||
}
|
||||
179
rq/internal/update/incrementalupdater.go
Normal file
179
rq/internal/update/incrementalupdater.go
Normal file
@@ -0,0 +1,179 @@
|
||||
package update
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"zero/core/hash"
|
||||
"zero/core/stringx"
|
||||
)
|
||||
|
||||
const (
|
||||
incrementalStep = 5
|
||||
stepDuration = time.Second * 3
|
||||
)
|
||||
|
||||
type (
|
||||
updateEvent struct {
|
||||
keys []string
|
||||
newKey string
|
||||
servers []string
|
||||
}
|
||||
|
||||
UpdateFunc func(change ServerChange)
|
||||
|
||||
IncrementalUpdater struct {
|
||||
lock sync.Mutex
|
||||
started bool
|
||||
taskChan chan updateEvent
|
||||
updates ServerChange
|
||||
updateFn UpdateFunc
|
||||
pendingEvents []updateEvent
|
||||
}
|
||||
)
|
||||
|
||||
func NewIncrementalUpdater(updateFn UpdateFunc) *IncrementalUpdater {
|
||||
return &IncrementalUpdater{
|
||||
taskChan: make(chan updateEvent),
|
||||
updates: ServerChange{
|
||||
Current: Snapshot{
|
||||
Keys: make([]string, 0),
|
||||
WeightedKeys: make([]weightedKey, 0),
|
||||
},
|
||||
Servers: make([]string, 0),
|
||||
},
|
||||
updateFn: updateFn,
|
||||
}
|
||||
}
|
||||
|
||||
func (ru *IncrementalUpdater) Update(keys []string, servers []string, newKey string) {
|
||||
ru.lock.Lock()
|
||||
defer ru.lock.Unlock()
|
||||
|
||||
if !ru.started {
|
||||
go ru.run()
|
||||
ru.started = true
|
||||
}
|
||||
|
||||
ru.taskChan <- updateEvent{
|
||||
keys: keys,
|
||||
newKey: newKey,
|
||||
servers: servers,
|
||||
}
|
||||
}
|
||||
|
||||
// Return true if incremental update is done
|
||||
func (ru *IncrementalUpdater) advance() bool {
|
||||
previous := ru.updates.Current
|
||||
keys := make([]string, 0)
|
||||
weightedKeys := make([]weightedKey, 0)
|
||||
servers := ru.updates.Servers
|
||||
for _, key := range ru.updates.Current.Keys {
|
||||
keys = append(keys, key)
|
||||
}
|
||||
for _, wkey := range ru.updates.Current.WeightedKeys {
|
||||
weight := wkey.Weight + incrementalStep
|
||||
if weight >= hash.TopWeight {
|
||||
keys = append(keys, wkey.Key)
|
||||
} else {
|
||||
weightedKeys = append(weightedKeys, weightedKey{
|
||||
Key: wkey.Key,
|
||||
Weight: weight,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
for _, event := range ru.pendingEvents {
|
||||
// ignore reload events
|
||||
if len(event.newKey) == 0 || len(event.servers) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// anyway, add the servers, just to avoid missing notify any server
|
||||
servers = stringx.Union(servers, event.servers)
|
||||
if keyExists(keys, weightedKeys, event.newKey) {
|
||||
continue
|
||||
}
|
||||
|
||||
weightedKeys = append(weightedKeys, weightedKey{
|
||||
Key: event.newKey,
|
||||
Weight: incrementalStep,
|
||||
})
|
||||
}
|
||||
|
||||
// clear pending events
|
||||
ru.pendingEvents = ru.pendingEvents[:0]
|
||||
|
||||
change := ServerChange{
|
||||
Previous: previous,
|
||||
Current: Snapshot{
|
||||
Keys: keys,
|
||||
WeightedKeys: weightedKeys,
|
||||
},
|
||||
Servers: servers,
|
||||
}
|
||||
ru.updates = change
|
||||
ru.updateFn(change)
|
||||
|
||||
return len(weightedKeys) == 0
|
||||
}
|
||||
|
||||
func (ru *IncrementalUpdater) run() {
|
||||
defer func() {
|
||||
ru.lock.Lock()
|
||||
ru.started = false
|
||||
ru.lock.Unlock()
|
||||
}()
|
||||
|
||||
ticker := time.NewTicker(stepDuration)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
if ru.advance() {
|
||||
return
|
||||
}
|
||||
case event := <-ru.taskChan:
|
||||
ru.updateKeys(event)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ru *IncrementalUpdater) updateKeys(event updateEvent) {
|
||||
isWeightedKey := func(key string) bool {
|
||||
for _, wkey := range ru.updates.Current.WeightedKeys {
|
||||
if wkey.Key == key {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
keys := make([]string, 0, len(event.keys))
|
||||
for _, key := range event.keys {
|
||||
if !isWeightedKey(key) {
|
||||
keys = append(keys, key)
|
||||
}
|
||||
}
|
||||
|
||||
ru.updates.Current.Keys = keys
|
||||
ru.pendingEvents = append(ru.pendingEvents, event)
|
||||
}
|
||||
|
||||
func keyExists(keys []string, weightedKeys []weightedKey, key string) bool {
|
||||
for _, each := range keys {
|
||||
if key == each {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
for _, wkey := range weightedKeys {
|
||||
if wkey.Key == key {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
106
rq/internal/update/serverchange.go
Normal file
106
rq/internal/update/serverchange.go
Normal file
@@ -0,0 +1,106 @@
|
||||
package update
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"sort"
|
||||
|
||||
"zero/core/hash"
|
||||
"zero/core/jsonx"
|
||||
"zero/rq/internal"
|
||||
)
|
||||
|
||||
var ErrInvalidServerChange = errors.New("not a server change message")
|
||||
|
||||
type (
|
||||
weightedKey struct {
|
||||
Key string
|
||||
Weight int
|
||||
}
|
||||
|
||||
Snapshot struct {
|
||||
Keys []string
|
||||
WeightedKeys []weightedKey
|
||||
}
|
||||
|
||||
ServerChange struct {
|
||||
Previous Snapshot
|
||||
Current Snapshot
|
||||
Servers []string
|
||||
}
|
||||
)
|
||||
|
||||
func (s Snapshot) GetCode() string {
|
||||
keys := append([]string(nil), s.Keys...)
|
||||
sort.Strings(keys)
|
||||
weightedKeys := append([]weightedKey(nil), s.WeightedKeys...)
|
||||
sort.SliceStable(weightedKeys, func(i, j int) bool {
|
||||
return weightedKeys[i].Key < weightedKeys[j].Key
|
||||
})
|
||||
|
||||
digest := md5.New()
|
||||
for _, key := range keys {
|
||||
io.WriteString(digest, fmt.Sprintf("%s\n", key))
|
||||
}
|
||||
for _, wkey := range weightedKeys {
|
||||
io.WriteString(digest, fmt.Sprintf("%s:%d\n", wkey.Key, wkey.Weight))
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%x", digest.Sum(nil))
|
||||
}
|
||||
|
||||
func (sc ServerChange) CreateCurrentHash() *hash.ConsistentHash {
|
||||
curHash := hash.NewConsistentHash()
|
||||
|
||||
for _, key := range sc.Current.Keys {
|
||||
curHash.Add(key)
|
||||
}
|
||||
for _, wkey := range sc.Current.WeightedKeys {
|
||||
curHash.AddWithWeight(wkey.Key, wkey.Weight)
|
||||
}
|
||||
|
||||
return curHash
|
||||
}
|
||||
|
||||
func (sc ServerChange) CreatePrevHash() *hash.ConsistentHash {
|
||||
prevHash := hash.NewConsistentHash()
|
||||
|
||||
for _, key := range sc.Previous.Keys {
|
||||
prevHash.Add(key)
|
||||
}
|
||||
for _, wkey := range sc.Previous.WeightedKeys {
|
||||
prevHash.AddWithWeight(wkey.Key, wkey.Weight)
|
||||
}
|
||||
|
||||
return prevHash
|
||||
}
|
||||
|
||||
func (sc ServerChange) GetCode() string {
|
||||
return sc.Current.GetCode()
|
||||
}
|
||||
|
||||
func IsServerChange(message string) bool {
|
||||
return len(message) > 0 && message[0] == internal.ServerSensitivePrefix
|
||||
}
|
||||
|
||||
func (sc ServerChange) Marshal() (string, error) {
|
||||
body, err := jsonx.Marshal(sc)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return string(append([]byte{internal.ServerSensitivePrefix}, body...)), nil
|
||||
}
|
||||
|
||||
func UnmarshalServerChange(body string) (ServerChange, error) {
|
||||
if len(body) == 0 {
|
||||
return ServerChange{}, ErrInvalidServerChange
|
||||
}
|
||||
|
||||
var change ServerChange
|
||||
err := jsonx.UnmarshalFromString(body[1:], &change)
|
||||
|
||||
return change, err
|
||||
}
|
||||
Reference in New Issue
Block a user