initial import

This commit is contained in:
kevin
2020-07-26 17:09:05 +08:00
commit 7e3a369a8f
647 changed files with 54754 additions and 0 deletions

18
rq/config.go Normal file
View File

@@ -0,0 +1,18 @@
package rq
import (
"zero/core/discov"
"zero/core/service"
"zero/core/stores/redis"
)
type RmqConf struct {
service.ServiceConf
Redis redis.RedisKeyConf
Etcd discov.EtcdConf `json:",optional"`
NumProducers int `json:",optional"`
NumConsumers int `json:",optional"`
Timeout int64 `json:",optional"`
DropBefore int64 `json:",optional"`
ServerSensitive bool `json:",default=false"`
}

7
rq/constant/const.go Normal file
View File

@@ -0,0 +1,7 @@
package constant
const (
Delimeter = "/"
ServerSensitivePrefix = '*'
TimedQueueType = "timed"
)

19
rq/etc/config.json Normal file
View File

@@ -0,0 +1,19 @@
{
"Log": {
"Access": "logs/access.log",
"Error": "logs/error.log",
"Stat": "logs/stat.log"
},
"MetricsUrl": "http://localhost:2222/add",
"Redis": {
"Host": "localhost:6379",
"Type": "node",
"Key": "reqs"
},
"Etcd": {
"Hosts": [
"localhost:2379"
],
"EtcdKey": "rq"
}
}

39
rq/hashchange.go Normal file
View File

@@ -0,0 +1,39 @@
package rq
import (
"math/rand"
"zero/core/hash"
)
type HashChange struct {
id int64
oldHash *hash.ConsistentHash
newHash *hash.ConsistentHash
}
func NewHashChange(oldHash, newHash *hash.ConsistentHash) HashChange {
return HashChange{
id: rand.Int63(),
oldHash: oldHash,
newHash: newHash,
}
}
func (hc HashChange) GetId() int64 {
return hc.id
}
func (hc HashChange) ShallEvict(key interface{}) bool {
oldTarget, oldOk := hc.oldHash.Get(key)
if !oldOk {
return false
}
newTarget, newOk := hc.newHash.Get(key)
if !newOk {
return false
}
return oldTarget != newTarget
}

446
rq/pusher.go Normal file
View File

@@ -0,0 +1,446 @@
package rq
import (
"context"
"errors"
"fmt"
"strings"
"sync"
"time"
"zero/core/discov"
"zero/core/errorx"
"zero/core/jsonx"
"zero/core/lang"
"zero/core/logx"
"zero/core/queue"
"zero/core/redisqueue"
"zero/core/stores/redis"
"zero/core/threading"
"zero/rq/constant"
"zero/rq/update"
)
const (
retryTimes = 3
etcdRedisFields = 4
)
var ErrPusherTypeError = errors.New("not a QueuePusher instance")
type (
KeyFn func(string) (key, payload string, err error)
KeysFn func(string) (ctx context.Context, keys []string, err error)
AssembleFn func(context.Context, []string) (payload string, err error)
PusherOption func(*Pusher) error
// just push once or do it retryTimes, it's a choice.
// because only when at least a server is alive, and
// pushing to the server failed, we'll return with an error
// if waken up, but the server is going down very quickly,
// we're going to wait again. so it's safe to push once.
pushStrategy interface {
addListener(listener discov.Listener)
push(string) error
}
batchConsistentStrategy struct {
keysFn KeysFn
assembleFn AssembleFn
subClient *discov.BatchConsistentSubClient
}
consistentStrategy struct {
keyFn KeyFn
subClient *discov.ConsistentSubClient
}
roundRobinStrategy struct {
subClient *discov.RoundRobinSubClient
}
serverListener struct {
updater *update.IncrementalUpdater
}
Pusher struct {
name string
endpoints []string
key string
failovers sync.Map
strategy pushStrategy
serverSensitive bool
}
)
func NewPusher(endpoints []string, key string, opts ...PusherOption) (*Pusher, error) {
pusher := &Pusher{
name: getName(key),
endpoints: endpoints,
key: key,
}
if len(opts) == 0 {
opts = []PusherOption{WithRoundRobinStrategy()}
}
for _, opt := range opts {
if err := opt(pusher); err != nil {
return nil, err
}
}
if pusher.serverSensitive {
listener := new(serverListener)
listener.updater = update.NewIncrementalUpdater(listener.update)
pusher.strategy.addListener(listener)
}
return pusher, nil
}
func (pusher *Pusher) Name() string {
return pusher.name
}
func (pusher *Pusher) Push(message string) error {
return pusher.strategy.push(message)
}
func (pusher *Pusher) close(server string, conn interface{}) error {
logx.Errorf("dropped redis node: %s", server)
return pusher.failover(server)
}
func (pusher *Pusher) dial(server string) (interface{}, error) {
pusher.failovers.Delete(server)
p, err := newPusher(server)
if err != nil {
return nil, err
}
logx.Infof("new redis node: %s", server)
return p, nil
}
func (pusher *Pusher) failover(server string) error {
pusher.failovers.Store(server, lang.Placeholder)
rds, key, option, err := newRedisWithKey(server)
if err != nil {
return err
}
threading.GoSafe(func() {
defer pusher.failovers.Delete(server)
for {
_, ok := pusher.failovers.Load(server)
if !ok {
logx.Infof("redis queue (%s) revived", server)
return
}
message, err := rds.Lpop(key)
if err != nil {
logx.Error(err)
return
}
if len(message) == 0 {
logx.Infof("repush redis queue (%s) done", server)
return
}
if option == constant.TimedQueueType {
message, err = unwrapTimedMessage(message)
if err != nil {
logx.Errorf("invalid timedMessage: %s, error: %s", message, err.Error())
return
}
}
if err = pusher.strategy.push(message); err != nil {
logx.Error(err)
return
}
}
})
return nil
}
func UnmarshalPusher(server string) (queue.QueuePusher, error) {
store, key, option, err := newRedisWithKey(server)
if err != nil {
return nil, err
}
if option == constant.TimedQueueType {
return redisqueue.NewPusher(store, key, redisqueue.WithTime()), nil
}
return redisqueue.NewPusher(store, key), nil
}
func WithBatchConsistentStrategy(keysFn KeysFn, assembleFn AssembleFn, opts ...discov.BalanceOption) PusherOption {
return func(pusher *Pusher) error {
subClient, err := discov.NewBatchConsistentSubClient(pusher.endpoints, pusher.key, pusher.dial,
pusher.close, opts...)
if err != nil {
return err
}
pusher.strategy = batchConsistentStrategy{
keysFn: keysFn,
assembleFn: assembleFn,
subClient: subClient,
}
return nil
}
}
func WithConsistentStrategy(keyFn KeyFn, opts ...discov.BalanceOption) PusherOption {
return func(pusher *Pusher) error {
subClient, err := discov.NewConsistentSubClient(pusher.endpoints, pusher.key, pusher.dial, pusher.close, opts...)
if err != nil {
return err
}
pusher.strategy = consistentStrategy{
keyFn: keyFn,
subClient: subClient,
}
return nil
}
}
func WithRoundRobinStrategy() PusherOption {
return func(pusher *Pusher) error {
subClient, err := discov.NewRoundRobinSubClient(pusher.endpoints, pusher.key, pusher.dial, pusher.close)
if err != nil {
return err
}
pusher.strategy = roundRobinStrategy{
subClient: subClient,
}
return nil
}
}
func WithServerSensitive() PusherOption {
return func(pusher *Pusher) error {
pusher.serverSensitive = true
return nil
}
}
func (bcs batchConsistentStrategy) addListener(listener discov.Listener) {
bcs.subClient.AddListener(listener)
}
func (bcs batchConsistentStrategy) balance(keys []string) map[interface{}][]string {
// we need to make sure the servers are available, otherwise wait forever
for {
if mapping, ok := bcs.subClient.Next(keys); ok {
return mapping
} else {
bcs.subClient.WaitForServers()
// make sure we don't flood logs too much in extreme conditions
time.Sleep(time.Second)
}
}
}
func (bcs batchConsistentStrategy) push(message string) error {
ctx, keys, err := bcs.keysFn(message)
if err != nil {
return err
}
var batchError errorx.BatchError
mapping := bcs.balance(keys)
for conn, connKeys := range mapping {
payload, err := bcs.assembleFn(ctx, connKeys)
if err != nil {
batchError.Add(err)
continue
}
for i := 0; i < retryTimes; i++ {
if err = bcs.pushOnce(conn, payload); err != nil {
batchError.Add(err)
} else {
break
}
}
}
return batchError.Err()
}
func (bcs batchConsistentStrategy) pushOnce(server interface{}, payload string) error {
pusher, ok := server.(queue.QueuePusher)
if ok {
return pusher.Push(payload)
} else {
return ErrPusherTypeError
}
}
func (cs consistentStrategy) addListener(listener discov.Listener) {
cs.subClient.AddListener(listener)
}
func (cs consistentStrategy) push(message string) error {
var batchError errorx.BatchError
key, payload, err := cs.keyFn(message)
if err != nil {
return err
}
for i := 0; i < retryTimes; i++ {
if err = cs.pushOnce(key, payload); err != nil {
batchError.Add(err)
} else {
return nil
}
}
return batchError.Err()
}
func (cs consistentStrategy) pushOnce(key, payload string) error {
// we need to make sure the servers are available, otherwise wait forever
for {
if server, ok := cs.subClient.Next(key); ok {
pusher, ok := server.(queue.QueuePusher)
if ok {
return pusher.Push(payload)
} else {
return ErrPusherTypeError
}
} else {
cs.subClient.WaitForServers()
// make sure we don't flood logs too much in extreme conditions
time.Sleep(time.Second)
}
}
}
func (rrs roundRobinStrategy) addListener(listener discov.Listener) {
rrs.subClient.AddListener(listener)
}
func (rrs roundRobinStrategy) push(message string) error {
var batchError errorx.BatchError
for i := 0; i < retryTimes; i++ {
if err := rrs.pushOnce(message); err != nil {
batchError.Add(err)
} else {
return nil
}
}
return batchError.Err()
}
func (rrs roundRobinStrategy) pushOnce(message string) error {
if server, ok := rrs.subClient.Next(); ok {
pusher, ok := server.(queue.QueuePusher)
if ok {
return pusher.Push(message)
} else {
return ErrPusherTypeError
}
} else {
rrs.subClient.WaitForServers()
return rrs.pushOnce(message)
}
}
func getName(key string) string {
return fmt.Sprintf("etcd:%s", key)
}
func newPusher(server string) (queue.QueuePusher, error) {
if rds, key, option, err := newRedisWithKey(server); err != nil {
return nil, err
} else if option == constant.TimedQueueType {
return redisqueue.NewPusher(rds, key, redisqueue.WithTime()), nil
} else {
return redisqueue.NewPusher(rds, key), nil
}
}
func newRedisWithKey(server string) (rds *redis.Redis, key, option string, err error) {
fields := strings.Split(server, constant.Delimeter)
if len(fields) < etcdRedisFields {
err = fmt.Errorf("wrong redis queue: %s, should be ip:port/type/password/key/[option]", server)
return
}
addr := fields[0]
tp := fields[1]
pass := fields[2]
key = fields[3]
if len(fields) > etcdRedisFields {
option = fields[4]
}
rds = redis.NewRedis(addr, tp, pass)
return
}
func (sl *serverListener) OnUpdate(keys []string, servers []string, newKey string) {
sl.updater.Update(keys, servers, newKey)
}
func (sl *serverListener) OnReload() {
sl.updater.Update(nil, nil, "")
}
func (sl *serverListener) update(change update.ServerChange) {
content, err := change.Marshal()
if err != nil {
logx.Error(err)
}
if err = broadcast(change.Servers, content); err != nil {
logx.Error(err)
}
}
func broadcast(servers []string, message string) error {
var be errorx.BatchError
for _, server := range servers {
q, err := UnmarshalPusher(server)
if err != nil {
be.Add(err)
} else {
q.Push(message)
}
}
return be.Err()
}
func unwrapTimedMessage(message string) (string, error) {
var tm redisqueue.TimedMessage
if err := jsonx.UnmarshalFromString(message, &tm); err != nil {
return "", err
}
return tm.Payload, nil
}

339
rq/queue.go Normal file
View File

@@ -0,0 +1,339 @@
package rq
import (
"errors"
"fmt"
"log"
"strings"
"sync"
"time"
"zero/core/discov"
"zero/core/logx"
"zero/core/queue"
"zero/core/redisqueue"
"zero/core/service"
"zero/core/stores/redis"
"zero/core/stringx"
"zero/core/threading"
"zero/rq/constant"
"zero/rq/update"
)
const keyLen = 6
var (
ErrTimeout = errors.New("timeout error")
eventHandlerPlaceholder = dummyEventHandler(0)
)
type (
ConsumeHandle func(string) error
ConsumeHandler interface {
Consume(string) error
}
EventHandler interface {
OnEvent(event interface{})
}
QueueOption func(queue *MessageQueue)
queueOptions struct {
renewId int64
}
MessageQueue struct {
c RmqConf
redisQueue *queue.Queue
consumerFactory queue.ConsumerFactory
options queueOptions
eventLock sync.Mutex
lastEvent string
}
)
func MustNewMessageQueue(c RmqConf, factory queue.ConsumerFactory, opts ...QueueOption) queue.MessageQueue {
q, err := NewMessageQueue(c, factory, opts...)
if err != nil {
log.Fatal(err)
}
return q
}
func NewMessageQueue(c RmqConf, factory queue.ConsumerFactory, opts ...QueueOption) (queue.MessageQueue, error) {
if err := c.SetUp(); err != nil {
return nil, err
}
q := &MessageQueue{
c: c,
}
if len(q.c.Redis.Key) == 0 {
if len(q.c.Name) == 0 {
q.c.Redis.Key = stringx.Randn(keyLen)
} else {
q.c.Redis.Key = fmt.Sprintf("%s-%s", q.c.Name, stringx.Randn(keyLen))
}
}
if q.c.Timeout > 0 {
factory = wrapWithTimeout(factory, time.Duration(q.c.Timeout)*time.Millisecond)
}
factory = wrapWithServerSensitive(q, factory)
q.consumerFactory = factory
q.redisQueue = q.buildQueue()
for _, opt := range opts {
opt(q)
}
return q, nil
}
func (q *MessageQueue) Start() {
serviceGroup := service.NewServiceGroup()
serviceGroup.Add(q.redisQueue)
q.maybeAppendRenewer(serviceGroup, q.redisQueue)
serviceGroup.Start()
}
func (q *MessageQueue) Stop() {
logx.Close()
}
func (q *MessageQueue) buildQueue() *queue.Queue {
inboundStore := redis.NewRedis(q.c.Redis.Host, q.c.Redis.Type, q.c.Redis.Pass)
producerFactory := redisqueue.NewProducerFactory(inboundStore, q.c.Redis.Key,
redisqueue.TimeSensitive(q.c.DropBefore))
mq := queue.NewQueue(producerFactory, q.consumerFactory)
if len(q.c.Name) > 0 {
mq.SetName(q.c.Name)
}
if q.c.NumConsumers > 0 {
mq.SetNumConsumer(q.c.NumConsumers)
}
if q.c.NumProducers > 0 {
mq.SetNumProducer(q.c.NumProducers)
}
return mq
}
func (q *MessageQueue) compareAndSetEvent(event string) bool {
q.eventLock.Lock()
defer q.eventLock.Unlock()
if q.lastEvent == event {
return false
}
q.lastEvent = event
return true
}
func (q *MessageQueue) maybeAppendRenewer(group *service.ServiceGroup, mq *queue.Queue) {
if len(q.c.Etcd.Hosts) > 0 || len(q.c.Etcd.Key) > 0 {
etcdValue := MarshalQueue(q.c.Redis)
if q.c.DropBefore > 0 {
etcdValue = strings.Join([]string{etcdValue, constant.TimedQueueType}, constant.Delimeter)
}
keepAliver := discov.NewRenewer(q.c.Etcd.Hosts, q.c.Etcd.Key, etcdValue, q.options.renewId)
mq.AddListener(pauseResumeHandler{
Renewer: keepAliver,
})
group.Add(keepAliver)
}
}
func MarshalQueue(rds redis.RedisKeyConf) string {
return strings.Join([]string{
rds.Host,
rds.Type,
rds.Pass,
rds.Key,
}, constant.Delimeter)
}
func WithHandle(handle ConsumeHandle) queue.ConsumerFactory {
return WithHandler(innerConsumerHandler{handle})
}
func WithHandler(handler ConsumeHandler, eventHandlers ...EventHandler) queue.ConsumerFactory {
return func() (queue.Consumer, error) {
if len(eventHandlers) < 1 {
return eventConsumer{
consumeHandler: handler,
eventHandler: eventHandlerPlaceholder,
}, nil
} else {
return eventConsumer{
consumeHandler: handler,
eventHandler: eventHandlers[0],
}, nil
}
}
}
func WithHandlerFactory(factory func() (ConsumeHandler, error)) queue.ConsumerFactory {
return func() (queue.Consumer, error) {
if handler, err := factory(); err != nil {
return nil, err
} else {
return eventlessHandler{handler}, nil
}
}
}
func WithRenewId(id int64) QueueOption {
return func(mq *MessageQueue) {
mq.options.renewId = id
}
}
func wrapWithServerSensitive(mq *MessageQueue, factory queue.ConsumerFactory) queue.ConsumerFactory {
return func() (queue.Consumer, error) {
consumer, err := factory()
if err != nil {
return nil, err
}
return &serverSensitiveConsumer{
mq: mq,
consumer: consumer,
}, nil
}
}
func wrapWithTimeout(factory queue.ConsumerFactory, dt time.Duration) queue.ConsumerFactory {
return func() (queue.Consumer, error) {
consumer, err := factory()
if err != nil {
return nil, err
}
return &timeoutConsumer{
consumer: consumer,
dt: dt,
timer: time.NewTimer(dt),
}, nil
}
}
type innerConsumerHandler struct {
handle ConsumeHandle
}
func (h innerConsumerHandler) Consume(v string) error {
return h.handle(v)
}
type serverSensitiveConsumer struct {
mq *MessageQueue
consumer queue.Consumer
}
func (c *serverSensitiveConsumer) Consume(msg string) error {
if update.IsServerChange(msg) {
change, err := update.UnmarshalServerChange(msg)
if err != nil {
return err
}
code := change.GetCode()
if !c.mq.compareAndSetEvent(code) {
return nil
}
oldHash := change.CreatePrevHash()
newHash := change.CreateCurrentHash()
hashChange := NewHashChange(oldHash, newHash)
c.mq.redisQueue.Broadcast(hashChange)
return nil
}
return c.consumer.Consume(msg)
}
func (c *serverSensitiveConsumer) OnEvent(event interface{}) {
c.consumer.OnEvent(event)
}
type timeoutConsumer struct {
consumer queue.Consumer
dt time.Duration
timer *time.Timer
}
func (c *timeoutConsumer) Consume(msg string) error {
done := make(chan error)
threading.GoSafe(func() {
if err := c.consumer.Consume(msg); err != nil {
done <- err
}
close(done)
})
c.timer.Reset(c.dt)
select {
case err, ok := <-done:
c.timer.Stop()
if ok {
return err
} else {
return nil
}
case <-c.timer.C:
return ErrTimeout
}
}
func (c *timeoutConsumer) OnEvent(event interface{}) {
c.consumer.OnEvent(event)
}
type pauseResumeHandler struct {
discov.Renewer
}
func (pr pauseResumeHandler) OnPause() {
pr.Pause()
}
func (pr pauseResumeHandler) OnResume() {
pr.Resume()
}
type eventConsumer struct {
consumeHandler ConsumeHandler
eventHandler EventHandler
}
func (ec eventConsumer) Consume(msg string) error {
return ec.consumeHandler.Consume(msg)
}
func (ec eventConsumer) OnEvent(event interface{}) {
ec.eventHandler.OnEvent(event)
}
type eventlessHandler struct {
handler ConsumeHandler
}
func (h eventlessHandler) Consume(msg string) error {
return h.handler.Consume(msg)
}
func (h eventlessHandler) OnEvent(event interface{}) {
}
type dummyEventHandler int
func (eh dummyEventHandler) OnEvent(event interface{}) {
}

62
rq/queue_test.go Normal file
View File

@@ -0,0 +1,62 @@
package rq
import (
"strconv"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestQueueWithTimeout(t *testing.T) {
consumer, err := wrapWithTimeout(WithHandle(func(string) error {
time.Sleep(time.Minute)
return nil
}), 100)()
if err != nil {
t.Fatal(err)
}
assert.Equal(t, ErrTimeout, consumer.Consume("any"))
}
func TestQueueWithoutTimeout(t *testing.T) {
consumer, err := wrapWithTimeout(WithHandle(func(string) error {
return nil
}), 3600000)()
if err != nil {
t.Fatal(err)
}
assert.Nil(t, consumer.Consume("any"))
}
func BenchmarkQueue(b *testing.B) {
b.ReportAllocs()
consumer, err := WithHandle(func(string) error {
return nil
})()
if err != nil {
b.Fatal(err)
}
for i := 0; i < b.N; i++ {
consumer.Consume(strconv.Itoa(i))
}
}
func BenchmarkQueueWithTimeout(b *testing.B) {
b.ReportAllocs()
consumer, err := wrapWithTimeout(WithHandle(func(string) error {
return nil
}), 1000)()
if err != nil {
b.Fatal(err)
}
for i := 0; i < b.N; i++ {
consumer.Consume(strconv.Itoa(i))
}
}

View File

@@ -0,0 +1,179 @@
package update
import (
"sync"
"time"
"zero/core/hash"
"zero/core/stringx"
)
const (
incrementalStep = 5
stepDuration = time.Second * 3
)
type (
updateEvent struct {
keys []string
newKey string
servers []string
}
UpdateFunc func(change ServerChange)
IncrementalUpdater struct {
lock sync.Mutex
started bool
taskChan chan updateEvent
updates ServerChange
updateFn UpdateFunc
pendingEvents []updateEvent
}
)
func NewIncrementalUpdater(updateFn UpdateFunc) *IncrementalUpdater {
return &IncrementalUpdater{
taskChan: make(chan updateEvent),
updates: ServerChange{
Current: Snapshot{
Keys: make([]string, 0),
WeightedKeys: make([]weightedKey, 0),
},
Servers: make([]string, 0),
},
updateFn: updateFn,
}
}
func (ru *IncrementalUpdater) Update(keys []string, servers []string, newKey string) {
ru.lock.Lock()
defer ru.lock.Unlock()
if !ru.started {
go ru.run()
ru.started = true
}
ru.taskChan <- updateEvent{
keys: keys,
newKey: newKey,
servers: servers,
}
}
// Return true if incremental update is done
func (ru *IncrementalUpdater) advance() bool {
previous := ru.updates.Current
keys := make([]string, 0)
weightedKeys := make([]weightedKey, 0)
servers := ru.updates.Servers
for _, key := range ru.updates.Current.Keys {
keys = append(keys, key)
}
for _, wkey := range ru.updates.Current.WeightedKeys {
weight := wkey.Weight + incrementalStep
if weight >= hash.TopWeight {
keys = append(keys, wkey.Key)
} else {
weightedKeys = append(weightedKeys, weightedKey{
Key: wkey.Key,
Weight: weight,
})
}
}
for _, event := range ru.pendingEvents {
// ignore reload events
if len(event.newKey) == 0 || len(event.servers) == 0 {
continue
}
// anyway, add the servers, just to avoid missing notify any server
servers = stringx.Union(servers, event.servers)
if keyExists(keys, weightedKeys, event.newKey) {
continue
}
weightedKeys = append(weightedKeys, weightedKey{
Key: event.newKey,
Weight: incrementalStep,
})
}
// clear pending events
ru.pendingEvents = ru.pendingEvents[:0]
change := ServerChange{
Previous: previous,
Current: Snapshot{
Keys: keys,
WeightedKeys: weightedKeys,
},
Servers: servers,
}
ru.updates = change
ru.updateFn(change)
return len(weightedKeys) == 0
}
func (ru *IncrementalUpdater) run() {
defer func() {
ru.lock.Lock()
ru.started = false
ru.lock.Unlock()
}()
ticker := time.NewTicker(stepDuration)
defer ticker.Stop()
for {
select {
case <-ticker.C:
if ru.advance() {
return
}
case event := <-ru.taskChan:
ru.updateKeys(event)
}
}
}
func (ru *IncrementalUpdater) updateKeys(event updateEvent) {
isWeightedKey := func(key string) bool {
for _, wkey := range ru.updates.Current.WeightedKeys {
if wkey.Key == key {
return true
}
}
return false
}
keys := make([]string, 0, len(event.keys))
for _, key := range event.keys {
if !isWeightedKey(key) {
keys = append(keys, key)
}
}
ru.updates.Current.Keys = keys
ru.pendingEvents = append(ru.pendingEvents, event)
}
func keyExists(keys []string, weightedKeys []weightedKey, key string) bool {
for _, each := range keys {
if key == each {
return true
}
}
for _, wkey := range weightedKeys {
if wkey.Key == key {
return true
}
}
return false
}

106
rq/update/serverchange.go Normal file
View File

@@ -0,0 +1,106 @@
package update
import (
"crypto/md5"
"errors"
"fmt"
"io"
"sort"
"zero/core/hash"
"zero/core/jsonx"
"zero/rq/constant"
)
var ErrInvalidServerChange = errors.New("not a server change message")
type (
weightedKey struct {
Key string
Weight int
}
Snapshot struct {
Keys []string
WeightedKeys []weightedKey
}
ServerChange struct {
Previous Snapshot
Current Snapshot
Servers []string
}
)
func (s Snapshot) GetCode() string {
keys := append([]string(nil), s.Keys...)
sort.Strings(keys)
weightedKeys := append([]weightedKey(nil), s.WeightedKeys...)
sort.SliceStable(weightedKeys, func(i, j int) bool {
return weightedKeys[i].Key < weightedKeys[j].Key
})
digest := md5.New()
for _, key := range keys {
io.WriteString(digest, fmt.Sprintf("%s\n", key))
}
for _, wkey := range weightedKeys {
io.WriteString(digest, fmt.Sprintf("%s:%d\n", wkey.Key, wkey.Weight))
}
return fmt.Sprintf("%x", digest.Sum(nil))
}
func (sc ServerChange) CreateCurrentHash() *hash.ConsistentHash {
curHash := hash.NewConsistentHash()
for _, key := range sc.Current.Keys {
curHash.Add(key)
}
for _, wkey := range sc.Current.WeightedKeys {
curHash.AddWithWeight(wkey.Key, wkey.Weight)
}
return curHash
}
func (sc ServerChange) CreatePrevHash() *hash.ConsistentHash {
prevHash := hash.NewConsistentHash()
for _, key := range sc.Previous.Keys {
prevHash.Add(key)
}
for _, wkey := range sc.Previous.WeightedKeys {
prevHash.AddWithWeight(wkey.Key, wkey.Weight)
}
return prevHash
}
func (sc ServerChange) GetCode() string {
return sc.Current.GetCode()
}
func IsServerChange(message string) bool {
return len(message) > 0 && message[0] == constant.ServerSensitivePrefix
}
func (sc ServerChange) Marshal() (string, error) {
body, err := jsonx.Marshal(sc)
if err != nil {
return "", err
}
return string(append([]byte{constant.ServerSensitivePrefix}, body...)), nil
}
func UnmarshalServerChange(body string) (ServerChange, error) {
if len(body) == 0 {
return ServerChange{}, ErrInvalidServerChange
}
var change ServerChange
err := jsonx.UnmarshalFromString(body[1:], &change)
return change, err
}