initial import

This commit is contained in:
kevin
2020-07-26 17:09:05 +08:00
commit 7e3a369a8f
647 changed files with 54754 additions and 0 deletions

View File

@@ -0,0 +1,42 @@
package main
import (
"fmt"
"zero/core/stores/redis"
"zero/dq"
)
func main() {
consumer := dq.NewConsumer(dq.DqConf{
Beanstalks: []dq.Beanstalk{
{
Endpoint: "localhost:11300",
Tube: "tube",
},
{
Endpoint: "localhost:11301",
Tube: "tube",
},
{
Endpoint: "localhost:11302",
Tube: "tube",
},
{
Endpoint: "localhost:11303",
Tube: "tube",
},
{
Endpoint: "localhost:11304",
Tube: "tube",
},
},
Redis: redis.RedisConf{
Host: "localhost:6379",
Type: redis.NodeType,
},
})
consumer.Consume(func(body []byte) {
fmt.Println(string(body))
})
}

View File

@@ -0,0 +1,40 @@
package main
import (
"fmt"
"strconv"
"time"
"zero/dq"
)
func main() {
producer := dq.NewProducer([]dq.Beanstalk{
{
Endpoint: "localhost:11300",
Tube: "tube",
},
{
Endpoint: "localhost:11301",
Tube: "tube",
},
{
Endpoint: "localhost:11302",
Tube: "tube",
},
{
Endpoint: "localhost:11303",
Tube: "tube",
},
{
Endpoint: "localhost:11304",
Tube: "tube",
},
})
for i := 0; i < 5; i++ {
_, err := producer.At([]byte(strconv.Itoa(i)), time.Now().Add(time.Second*10))
if err != nil {
fmt.Println(err)
}
}
}

18
example/bloom/bloom.go Normal file
View File

@@ -0,0 +1,18 @@
package main
import (
"fmt"
"zero/core/bloom"
"zero/core/stores/redis"
)
func main() {
store := redis.NewRedis("localhost:6379", "node")
filter := bloom.New(store, "testbloom", 64)
filter.Add([]byte("kevin"))
filter.Add([]byte("wan"))
fmt.Println(filter.Exists([]byte("kevin")))
fmt.Println(filter.Exists([]byte("wan")))
fmt.Println(filter.Exists([]byte("nothing")))
}

139
example/breaker/main.go Normal file
View File

@@ -0,0 +1,139 @@
package main
import (
"fmt"
"math/rand"
"os"
"sync"
"sync/atomic"
"time"
"zero/core/breaker"
"zero/core/lang"
"gopkg.in/cheggaaa/pb.v1"
)
const (
duration = time.Minute * 5
breakRange = 20
workRange = 50
requestInterval = time.Millisecond
// multiply to make it visible in plot
stateFator = float64(time.Second/requestInterval) / 2
)
type (
server struct {
state int32
}
metric struct {
calls int64
}
)
func (m *metric) addCall() {
atomic.AddInt64(&m.calls, 1)
}
func (m *metric) reset() int64 {
return atomic.SwapInt64(&m.calls, 0)
}
func newServer() *server {
return &server{}
}
func (s *server) serve(m *metric) bool {
m.addCall()
return atomic.LoadInt32(&s.state) == 1
}
func (s *server) start() {
go func() {
r := rand.New(rand.NewSource(time.Now().UnixNano()))
var state int32
for {
var v int32
if state == 0 {
v = r.Int31n(breakRange)
} else {
v = r.Int31n(workRange)
}
time.Sleep(time.Second * time.Duration(v+1))
state ^= 1
atomic.StoreInt32(&s.state, state)
}
}()
}
func runBreaker(s *server, br breaker.Breaker, duration time.Duration, m *metric) {
ticker := time.NewTicker(requestInterval)
defer ticker.Stop()
done := make(chan lang.PlaceholderType)
go func() {
time.Sleep(duration)
close(done)
}()
for {
select {
case <-ticker.C:
_ = br.Do(func() error {
if s.serve(m) {
return nil
} else {
return breaker.ErrServiceUnavailable
}
})
case <-done:
return
}
}
}
func main() {
srv := newServer()
srv.start()
gb := breaker.NewBreaker()
fp, err := os.Create("result.csv")
lang.Must(err)
defer fp.Close()
fmt.Fprintln(fp, "seconds,state,googleCalls,netflixCalls")
var gm, nm metric
go func() {
ticker := time.NewTicker(time.Second)
defer ticker.Stop()
var seconds int
for range ticker.C {
seconds++
gcalls := gm.reset()
ncalls := nm.reset()
fmt.Fprintf(fp, "%d,%.2f,%d,%d\n",
seconds, float64(atomic.LoadInt32(&srv.state))*stateFator, gcalls, ncalls)
}
}()
var waitGroup sync.WaitGroup
waitGroup.Add(1)
go func() {
runBreaker(srv, gb, duration, &gm)
waitGroup.Done()
}()
go func() {
bar := pb.New(int(duration / time.Second)).Start()
ticker := time.NewTicker(time.Second)
defer ticker.Stop()
for range ticker.C {
bar.Increment()
}
bar.Finish()
}()
waitGroup.Wait()
}

15
example/breaker/plot.py Normal file
View File

@@ -0,0 +1,15 @@
import click
import pandas as pd
import matplotlib.pyplot as plt
@click.command()
@click.option("--csv", default="result.csv")
def main(csv):
df = pd.read_csv(csv, index_col="seconds")
df.plot()
plt.show()
if __name__ == "__main__":
main()

65
example/clickhouse/ch.go Normal file
View File

@@ -0,0 +1,65 @@
package main
import (
"log"
"time"
"zero/core/stores/clickhouse"
"zero/core/stores/sqlx"
)
func main() {
conn := clickhouse.New("tcp://127.0.0.1:9000")
_, err := conn.Exec(`
CREATE TABLE IF NOT EXISTS example (
country_code FixedString(2),
os_id UInt8,
browser_id UInt8,
categories Array(Int16),
action_day Date,
action_time DateTime
) engine=Memory
`)
if err != nil {
log.Fatal(err)
}
conn.Transact(func(session sqlx.Session) error {
stmt, err := session.Prepare("INSERT INTO example (country_code, os_id, browser_id, categories, action_day, action_time) VALUES (?, ?, ?, ?, ?, ?)")
if err != nil {
log.Fatal(err)
}
defer stmt.Close()
for i := 0; i < 10; i++ {
_, err := stmt.Exec("RU", 10+i, 100+i, []int16{1, 2, 3}, time.Now(), time.Now())
if err != nil {
log.Fatal(err)
}
}
return nil
})
var items []struct {
CountryCode string `db:"country_code"`
OsID uint8 `db:"os_id"`
BrowserID uint8 `db:"browser_id"`
Categories []int16 `db:"categories"`
ActionTime time.Time `db:"action_time"`
}
err = conn.QueryRows(&items, "SELECT country_code, os_id, browser_id, categories, action_time FROM example")
if err != nil {
log.Fatal(err)
}
for _, item := range items {
log.Printf("country: %s, os: %d, browser: %d, categories: %v, action_time: %s",
item.CountryCode, item.OsID, item.BrowserID, item.Categories, item.ActionTime)
}
if _, err := conn.Exec("DROP TABLE example"); err != nil {
log.Fatal(err)
}
}

View File

@@ -0,0 +1,2 @@
#date: "2019-06-20 00:00:00"
date: "2019-06-19T16:00:00Z"

View File

@@ -0,0 +1,21 @@
package main
import (
"time"
"zero/core/conf"
"zero/core/logx"
)
type TimeHolder struct {
Date time.Time `json:"date"`
}
func main() {
th := &TimeHolder{}
err := conf.LoadConfig("./date.yml", th)
if err != nil {
logx.Error(err)
}
logx.Infof("%+v", th)
}

View File

@@ -0,0 +1,27 @@
FROM golang:alpine AS builder
LABEL stage=gobuilder
ENV CGO_ENABLED 0
ENV GOOS linux
ENV GOPROXY https://goproxy.cn,direct
RUN apk add upx
WORKDIR $GOPATH/src/zero
COPY . .
RUN go build -ldflags="-s -w" -o /app/etcdmon example/etcd/demo/etcdmon.go
RUN upx -q /app/etcdmon
FROM alpine
RUN apk update --no-cache
RUN apk add --no-cache ca-certificates
RUN apk add --no-cache tzdata
ENV TZ Asia/Shanghai
WORKDIR /app
COPY --from=builder /app/etcdmon /app/etcdmon
CMD ["./etcdmon"]

View File

@@ -0,0 +1,13 @@
version := v$(shell /bin/date "+%y%m%d%H%M%S")
build:
#docker pull alpine
#docker pull golang:alpine
cd $(GOPATH)/src/zero && docker build -t registry.cn-hangzhou.aliyuncs.com/xapp/etcdmon:$(version) . -f example/etcd/demo/Dockerfile
#docker image prune --filter label=stage=gobuilder -f
push: build
docker push registry.cn-hangzhou.aliyuncs.com/xapp/etcdmon:$(version)
deploy: push
kubectl -n xx-xiaoheiban set image deployment/etcdmon-deployment etcdmon=registry-vpc.cn-hangzhou.aliyuncs.com/xapp/etcdmon:$(version)

View File

@@ -0,0 +1,169 @@
package main
import (
"context"
"fmt"
"strings"
"time"
"zero/core/discov"
"zero/core/logx"
"zero/core/proc"
"zero/core/syncx"
"go.etcd.io/etcd/clientv3"
)
var (
endpoints []string
keys = []string{
"user.rpc",
"classroom.rpc",
}
vals = make(map[string]map[string]string)
barrier syncx.Barrier
)
type listener struct {
key string
}
func init() {
cluster := proc.Env("ETCD_CLUSTER")
if len(cluster) > 0 {
endpoints = strings.Split(cluster, ",")
} else {
endpoints = []string{"localhost:2379"}
}
}
func (l listener) OnAdd(key, val string) {
fmt.Printf("add, key: %s, val: %s\n", key, val)
barrier.Guard(func() {
if m, ok := vals[l.key]; ok {
m[key] = val
} else {
vals[l.key] = map[string]string{key: val}
}
})
}
func (l listener) OnDelete(key string) {
fmt.Printf("del, key: %s\n", key)
barrier.Guard(func() {
if m, ok := vals[l.key]; ok {
delete(m, key)
}
})
}
func load(cli *clientv3.Client, key string) (map[string]string, error) {
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
resp, err := cli.Get(ctx, key, clientv3.WithPrefix())
cancel()
if err != nil {
return nil, err
}
ret := make(map[string]string)
for _, ev := range resp.Kvs {
ret[string(ev.Key)] = string(ev.Value)
}
return ret, nil
}
func loadAll(cli *clientv3.Client) (map[string]map[string]string, error) {
ret := make(map[string]map[string]string)
for _, key := range keys {
m, err := load(cli, key)
if err != nil {
return nil, err
}
ret[key] = m
}
return ret, nil
}
func compare(a, b map[string]map[string]string) bool {
if len(a) != len(b) {
return false
}
for k := range a {
av := a[k]
bv := b[k]
if len(av) != len(bv) {
return false
}
for kk := range av {
if av[kk] != bv[kk] {
return false
}
}
}
return true
}
func serializeMap(m map[string]map[string]string, prefix string) string {
var builder strings.Builder
for k, v := range m {
fmt.Fprintf(&builder, "%s%s:\n", prefix, k)
for kk, vv := range v {
fmt.Fprintf(&builder, "%s\t%s: %s\n", prefix, kk, vv)
}
}
return builder.String()
}
func main() {
registry := discov.NewFacade(endpoints)
for _, key := range keys {
registry.Monitor(key, listener{key: key})
}
ticker := time.NewTicker(time.Minute)
defer ticker.Stop()
for {
select {
case <-ticker.C:
expect, err := loadAll(registry.Client().(*clientv3.Client))
if err != nil {
fmt.Println("[ETCD-test] can't load current keys")
continue
}
check := func() bool {
var match bool
barrier.Guard(func() {
match = compare(expect, vals)
})
if match {
logx.Info("match")
}
return match
}
if check() {
continue
}
time.AfterFunc(time.Second*5, func() {
if check() {
return
}
var builder strings.Builder
builder.WriteString(fmt.Sprintf("expect:\n%s\n", serializeMap(expect, "\t")))
barrier.Guard(func() {
builder.WriteString(fmt.Sprintf("actual:\n%s\n", serializeMap(vals, "\t")))
})
fmt.Println(builder.String())
})
}
}
}

View File

@@ -0,0 +1,15 @@
apiVersion: v1
kind: Pod
metadata:
name: etcdmon
namespace: discov
spec:
containers:
- name: etcdmon
image: registry-vpc.cn-hangzhou.aliyuncs.com/xapp/etcdmon:v200620093045
imagePullPolicy: Always
env:
- name: ETCD_CLUSTER
value: etcd.discov:2379
imagePullSecrets:
- name: aliyun

View File

@@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: discovery

378
example/etcd/etcd.yaml Normal file
View File

@@ -0,0 +1,378 @@
apiVersion: v1
kind: Service
metadata:
name: discov
namespace: discovery
spec:
ports:
- name: discov-port
port: 2379
protocol: TCP
targetPort: 2379
selector:
app: discov
---
apiVersion: v1
kind: Pod
metadata:
labels:
app: discov
discov_node: discov0
name: discov0
namespace: discovery
spec:
containers:
- command:
- /usr/local/bin/etcd
- --name
- discov0
- --initial-advertise-peer-urls
- http://discov0:2380
- --listen-peer-urls
- http://0.0.0.0:2380
- --listen-client-urls
- http://0.0.0.0:2379
- --advertise-client-urls
- http://discov0:2379
- --initial-cluster
- discov0=http://discov0:2380,discov1=http://discov1:2380,discov2=http://discov2:2380,discov3=http://discov3:2380,discov4=http://discov4:2380
- --initial-cluster-state
- new
image: registry-vpc.cn-hangzhou.aliyuncs.com/xapp/etcd:latest
name: discov0
ports:
- containerPort: 2379
name: client
protocol: TCP
- containerPort: 2380
name: server
protocol: TCP
imagePullSecrets:
- name: aliyun
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app
operator: In
values:
- discov
topologyKey: "kubernetes.io/hostname"
restartPolicy: Always
---
apiVersion: v1
kind: Service
metadata:
labels:
discov_node: discov0
name: discov0
namespace: discovery
spec:
ports:
- name: client
port: 2379
protocol: TCP
targetPort: 2379
- name: server
port: 2380
protocol: TCP
targetPort: 2380
selector:
discov_node: discov0
---
apiVersion: v1
kind: Pod
metadata:
labels:
app: discov
discov_node: discov1
name: discov1
namespace: discovery
spec:
containers:
- command:
- /usr/local/bin/etcd
- --name
- discov1
- --initial-advertise-peer-urls
- http://discov1:2380
- --listen-peer-urls
- http://0.0.0.0:2380
- --listen-client-urls
- http://0.0.0.0:2379
- --advertise-client-urls
- http://discov1:2379
- --initial-cluster
- discov0=http://discov0:2380,discov1=http://discov1:2380,discov2=http://discov2:2380,discov3=http://discov3:2380,discov4=http://discov4:2380
- --initial-cluster-state
- new
image: registry-vpc.cn-hangzhou.aliyuncs.com/xapp/etcd:latest
name: discov1
ports:
- containerPort: 2379
name: client
protocol: TCP
- containerPort: 2380
name: server
protocol: TCP
imagePullSecrets:
- name: aliyun
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app
operator: In
values:
- discov
topologyKey: "kubernetes.io/hostname"
restartPolicy: Always
---
apiVersion: v1
kind: Service
metadata:
labels:
discov_node: discov1
name: discov1
namespace: discovery
spec:
ports:
- name: client
port: 2379
protocol: TCP
targetPort: 2379
- name: server
port: 2380
protocol: TCP
targetPort: 2380
selector:
discov_node: discov1
---
apiVersion: v1
kind: Pod
metadata:
labels:
app: discov
discov_node: discov2
name: discov2
namespace: discovery
spec:
containers:
- command:
- /usr/local/bin/etcd
- --name
- discov2
- --initial-advertise-peer-urls
- http://discov2:2380
- --listen-peer-urls
- http://0.0.0.0:2380
- --listen-client-urls
- http://0.0.0.0:2379
- --advertise-client-urls
- http://discov2:2379
- --initial-cluster
- discov0=http://discov0:2380,discov1=http://discov1:2380,discov2=http://discov2:2380,discov3=http://discov3:2380,discov4=http://discov4:2380
- --initial-cluster-state
- new
image: registry-vpc.cn-hangzhou.aliyuncs.com/xapp/etcd:latest
name: discov2
ports:
- containerPort: 2379
name: client
protocol: TCP
- containerPort: 2380
name: server
protocol: TCP
imagePullSecrets:
- name: aliyun
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app
operator: In
values:
- discov
topologyKey: "kubernetes.io/hostname"
restartPolicy: Always
---
apiVersion: v1
kind: Service
metadata:
labels:
discov_node: discov2
name: discov2
namespace: discovery
spec:
ports:
- name: client
port: 2379
protocol: TCP
targetPort: 2379
- name: server
port: 2380
protocol: TCP
targetPort: 2380
selector:
discov_node: discov2
---
apiVersion: v1
kind: Pod
metadata:
labels:
app: discov
discov_node: discov3
name: discov3
namespace: discovery
spec:
containers:
- command:
- /usr/local/bin/etcd
- --name
- discov3
- --initial-advertise-peer-urls
- http://discov3:2380
- --listen-peer-urls
- http://0.0.0.0:2380
- --listen-client-urls
- http://0.0.0.0:2379
- --advertise-client-urls
- http://discov3:2379
- --initial-cluster
- discov0=http://discov0:2380,discov1=http://discov1:2380,discov2=http://discov2:2380,discov3=http://discov3:2380,discov4=http://discov4:2380
- --initial-cluster-state
- new
image: registry-vpc.cn-hangzhou.aliyuncs.com/xapp/etcd:latest
name: discov3
ports:
- containerPort: 2379
name: client
protocol: TCP
- containerPort: 2380
name: server
protocol: TCP
imagePullSecrets:
- name: aliyun
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app
operator: In
values:
- discov
topologyKey: "kubernetes.io/hostname"
restartPolicy: Always
---
apiVersion: v1
kind: Service
metadata:
labels:
discov_node: discov3
name: discov3
namespace: discovery
spec:
ports:
- name: client
port: 2379
protocol: TCP
targetPort: 2379
- name: server
port: 2380
protocol: TCP
targetPort: 2380
selector:
discov_node: discov3
---
apiVersion: v1
kind: Pod
metadata:
labels:
app: discov
discov_node: discov4
name: discov4
namespace: discovery
spec:
containers:
- command:
- /usr/local/bin/etcd
- --name
- discov4
- --initial-advertise-peer-urls
- http://discov4:2380
- --listen-peer-urls
- http://0.0.0.0:2380
- --listen-client-urls
- http://0.0.0.0:2379
- --advertise-client-urls
- http://discov4:2379
- --initial-cluster
- discov0=http://discov0:2380,discov1=http://discov1:2380,discov2=http://discov2:2380,discov3=http://discov3:2380,discov4=http://discov4:2380
- --initial-cluster-state
- new
image: registry-vpc.cn-hangzhou.aliyuncs.com/xapp/etcd:latest
name: discov4
ports:
- containerPort: 2379
name: client
protocol: TCP
- containerPort: 2380
name: server
protocol: TCP
imagePullSecrets:
- name: aliyun
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app
operator: In
values:
- discov
topologyKey: "kubernetes.io/hostname"
restartPolicy: Always
---
apiVersion: v1
kind: Service
metadata:
labels:
discov_node: discov4
name: discov4
namespace: discovery
spec:
ports:
- name: client
port: 2379
protocol: TCP
targetPort: 2379
- name: server
port: 2380
protocol: TCP
targetPort: 2380
selector:
discov_node: discov4

View File

@@ -0,0 +1,22 @@
FROM golang:1.13-alpine AS builder
LABEL stage=gobuilder
ENV CGO_ENABLED 0
ENV GOOS linux
ENV GOPROXY https://goproxy.cn,direct
WORKDIR $GOPATH/src/zero
COPY . .
RUN go build -ldflags="-s -w" -o /app/pub example/etcd/pub/pub.go
FROM alpine
RUN apk add --no-cache tzdata
ENV TZ Asia/Shanghai
WORKDIR /app
COPY --from=builder /app/pub /app/pub
CMD ["./pub"]

11
example/etcd/pub/Makefile Normal file
View File

@@ -0,0 +1,11 @@
version := v$(shell /bin/date "+%y%m%d%H%M%S")
build:
cd $(GOPATH)/src/zero && docker build -t registry.cn-hangzhou.aliyuncs.com/xapp/pub:$(version) . -f example/etcd/pub/Dockerfile
docker image prune --filter label=stage=gobuilder -f
push: build
docker push registry.cn-hangzhou.aliyuncs.com/xapp/pub:$(version)
deploy: push
kubectl -n adhoc set image deployment/pub-deployment pub=registry-vpc.cn-hangzhou.aliyuncs.com/xapp/pub:$(version)

27
example/etcd/pub/pub.go Normal file
View File

@@ -0,0 +1,27 @@
package main
import (
"flag"
"fmt"
"log"
"time"
"zero/core/discov"
)
var value = flag.String("v", "value", "the value")
func main() {
flag.Parse()
client := discov.NewPublisher([]string{"etcd.discovery:2379"}, "028F2C35852D", *value)
if err := client.KeepAlive(); err != nil {
log.Fatal(err)
}
defer client.Stop()
for {
time.Sleep(time.Second)
fmt.Println(*value)
}
}

26
example/etcd/pub/pub.yaml Normal file
View File

@@ -0,0 +1,26 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: pub-deployment
namespace: adhoc
labels:
app: pub
spec:
replicas: 1
selector:
matchLabels:
app: pub
template:
metadata:
labels:
app: pub
spec:
containers:
- name: pub
image: registry-vpc.cn-hangzhou.aliyuncs.com/xapp/pub:v200213172101
command:
- /app/pub
- -v
- ccc
imagePullSecrets:
- name: aliyun

View File

@@ -0,0 +1,22 @@
FROM golang:1.13-alpine AS builder
LABEL stage=gobuilder
ENV CGO_ENABLED 0
ENV GOOS linux
ENV GOPROXY https://goproxy.cn,direct
WORKDIR $GOPATH/src/zero
COPY . .
RUN go build -ldflags="-s -w" -o /app/sub example/etcd/sub/sub.go
FROM alpine
RUN apk add --no-cache tzdata
ENV TZ Asia/Shanghai
WORKDIR /app
COPY --from=builder /app/sub /app/sub
CMD ["./sub"]

11
example/etcd/sub/Makefile Normal file
View File

@@ -0,0 +1,11 @@
version := v$(shell /bin/date "+%y%m%d%H%M%S")
build:
cd $(GOPATH)/src/zero && docker build -t registry.cn-hangzhou.aliyuncs.com/xapp/sub:$(version) . -f example/etcd/sub/Dockerfile
docker image prune --filter label=stage=gobuilder -f
push: build
docker push registry.cn-hangzhou.aliyuncs.com/xapp/sub:$(version)
deploy: push
kubectl -n adhoc set image deployment/sub-deployment sub=registry-vpc.cn-hangzhou.aliyuncs.com/xapp/sub:$(version)

21
example/etcd/sub/sub.go Normal file
View File

@@ -0,0 +1,21 @@
package main
import (
"fmt"
"time"
"zero/core/discov"
)
func main() {
sub := discov.NewSubscriber([]string{"etcd.discovery:2379"}, "028F2C35852D", discov.Exclusive())
ticker := time.NewTicker(time.Second * 3)
defer ticker.Stop()
for {
select {
case <-ticker.C:
fmt.Println("values:", sub.Values())
}
}
}

16
example/etcd/sub/sub.yaml Normal file
View File

@@ -0,0 +1,16 @@
apiVersion: v1
kind: Pod
metadata:
labels:
app: sub
name: sub
namespace: adhoc
spec:
containers:
- command:
- /app/sub
image: registry-vpc.cn-hangzhou.aliyuncs.com/xapp/sub:v200213220509
name: sub
imagePullSecrets:
- name: aliyun
restartPolicy: Always

140
example/filex/pread.go Normal file
View File

@@ -0,0 +1,140 @@
package main
import (
"bufio"
"errors"
"flag"
"fmt"
"log"
"os"
"runtime"
"strconv"
"strings"
"time"
"zero/core/filex"
"zero/core/fx"
"zero/core/logx"
"gopkg.in/cheggaaa/pb.v1"
)
var (
file = flag.String("f", "", "the input file")
concurrent = flag.Int("c", runtime.NumCPU(), "concurrent goroutines")
wordVecDic TXDictionary
)
type (
Vector []float64
TXDictionary struct {
EmbeddingCount int64
Dim int64
Dict map[string]Vector
}
pair struct {
key string
vec Vector
}
)
func FastLoad(filename string) error {
if filename == "" {
return errors.New("no available dictionary")
}
now := time.Now()
defer func() {
logx.Infof("article2vec init dictionary end used %v", time.Since(now))
}()
dicFile, err := os.Open(filename)
if err != nil {
return err
}
defer dicFile.Close()
header, err := filex.FirstLine(filename)
if err != nil {
return err
}
total := strings.Split(header, " ")
wordVecDic.EmbeddingCount, err = strconv.ParseInt(total[0], 10, 64)
if err != nil {
return err
}
wordVecDic.Dim, err = strconv.ParseInt(total[1], 10, 64)
if err != nil {
return err
}
wordVecDic.Dict = make(map[string]Vector, wordVecDic.EmbeddingCount)
ranges, err := filex.SplitLineChunks(filename, *concurrent)
if err != nil {
return err
}
info, err := os.Stat(filename)
if err != nil {
return err
}
bar := pb.New64(info.Size()).SetUnits(pb.U_BYTES).Start()
fx.From(func(source chan<- interface{}) {
for _, each := range ranges {
source <- each
}
}).Walk(func(item interface{}, pipe chan<- interface{}) {
offsetRange := item.(filex.OffsetRange)
scanner := bufio.NewScanner(filex.NewRangeReader(dicFile, offsetRange.Start, offsetRange.Stop))
scanner.Buffer([]byte{}, 1<<20)
reader := filex.NewProgressScanner(scanner, bar)
if offsetRange.Start == 0 {
// skip header
reader.Scan()
}
for reader.Scan() {
text := reader.Text()
elements := strings.Split(text, " ")
vec := make(Vector, wordVecDic.Dim)
for i, ele := range elements {
if i == 0 {
continue
}
v, err := strconv.ParseFloat(ele, 64)
if err != nil {
return
}
vec[i-1] = v
}
pipe <- pair{
key: elements[0],
vec: vec,
}
}
}).ForEach(func(item interface{}) {
p := item.(pair)
wordVecDic.Dict[p.key] = p.vec
})
return nil
}
func main() {
flag.Parse()
start := time.Now()
if err := FastLoad(*file); err != nil {
log.Fatal(err)
}
fmt.Println(len(wordVecDic.Dict))
fmt.Println(time.Since(start))
}

25
example/fx/fx_test.go Normal file
View File

@@ -0,0 +1,25 @@
package main
import (
"testing"
"zero/core/fx"
)
func BenchmarkFx(b *testing.B) {
type Mixed struct {
Name string
Age int
Gender int
}
for i := 0; i < b.N; i++ {
var mx Mixed
fx.Parallel(func() {
mx.Name = "hello"
}, func() {
mx.Age = 20
}, func() {
mx.Gender = 1
})
}
}

36
example/fx/square.go Normal file
View File

@@ -0,0 +1,36 @@
package main
import (
"fmt"
"zero/core/fx"
)
func main() {
result, err := fx.From(func(source chan<- interface{}) {
for i := 0; i < 10; i++ {
source <- i
source <- i
}
}).Map(func(item interface{}) interface{} {
i := item.(int)
return i * i
}).Filter(func(item interface{}) bool {
i := item.(int)
return i%2 == 0
}).Distinct(func(item interface{}) interface{} {
return item
}).Reduce(func(pipe <-chan interface{}) (interface{}, error) {
var result int
for item := range pipe {
i := item.(int)
result += i
}
return result, nil
})
if err != nil {
fmt.Println(err)
} else {
fmt.Println(result)
}
}

View File

@@ -0,0 +1,26 @@
FROM golang:1.13 AS builder
ENV CGO_ENABLED 0
ENV GOOS linux
RUN apt-get update
RUN apt-get install -y apt-utils upx
WORKDIR $GOPATH/src/zero
COPY . .
RUN go build -ldflags="-s -w" -o /app/graceful example/graceful/dns/api/graceful.go
RUN upx /app/graceful
FROM alpine
RUN apk update --no-cache
RUN apk add --no-cache ca-certificates
RUN apk add --no-cache tzdata
ENV TZ Asia/Shanghai
WORKDIR /app
COPY --from=builder /app/graceful /app/graceful
COPY example/graceful/dns/api/etc/graceful-api.json /app/etc/config.json
CMD ["./graceful", "-f", "etc/config.json"]

View File

@@ -0,0 +1,11 @@
version := v$(shell /bin/date "+%y%m%d%H%M%S")
build:
docker pull alpine
cd $(GOPATH)/src/zero && docker build -t registry.cn-hangzhou.aliyuncs.com/xapp/graceful:$(version) . -f example/graceful/dns/api/Dockerfile
push: build
docker push registry.cn-hangzhou.aliyuncs.com/xapp/graceful:$(version)
deploy: push
kubectl -n kevin set image deployment/graceful-deployment graceful=registry-vpc.cn-hangzhou.aliyuncs.com/xapp/graceful:$(version)

View File

@@ -0,0 +1,11 @@
package config
import (
"zero/ngin"
"zero/rpcx"
)
type Config struct {
ngin.NgConf
Rpc rpcx.RpcClientConf
}

View File

@@ -0,0 +1,9 @@
{
"Name": "graceful-api",
"Host": "0.0.0.0",
"Port": 8888,
"MaxConns": 1000000,
"Rpc": {
"Server": "dns:///gracefulrpc:3456"
}
}

View File

@@ -0,0 +1,11 @@
type Response {
Host string `json:"host"`
Time int64 `json:"time"`
}
service graceful-api {
@server(
handler: GracefulHandler
)
get /api/graceful() returns(Response)
}

View File

@@ -0,0 +1,32 @@
package main
import (
"flag"
"zero/core/conf"
"zero/example/graceful/dns/api/config"
"zero/example/graceful/dns/api/handler"
"zero/example/graceful/dns/api/svc"
"zero/ngin"
"zero/rpcx"
)
var configFile = flag.String("f", "etc/graceful-api.json", "the config file")
func main() {
flag.Parse()
var c config.Config
conf.MustLoad(*configFile, &c)
client := rpcx.MustNewClient(c.Rpc)
ctx := &svc.ServiceContext{
Client: client,
}
engine := ngin.MustNewEngine(c.NgConf)
defer engine.Stop()
handler.RegisterHandlers(engine, ctx)
engine.Start()
}

View File

@@ -0,0 +1,42 @@
apiVersion: v1
kind: Service
metadata:
name: graceful
namespace: kevin
spec:
selector:
app: graceful
type: ClusterIP
ports:
- name: graceful-port
port: 3333
targetPort: 8888
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: graceful-deployment
namespace: kevin
labels:
app: graceful
spec:
replicas: 3
selector:
matchLabels:
app: graceful
template:
metadata:
labels:
app: graceful
spec:
containers:
- name: graceful
image: registry-vpc.cn-hangzhou.aliyuncs.com/xapp/graceful:v191022133857
imagePullPolicy: Always
ports:
- containerPort: 8888
imagePullSecrets:
- name: aliyun

View File

@@ -0,0 +1,49 @@
package handler
import (
"context"
"fmt"
"net/http"
"os"
"time"
"zero/core/executors"
"zero/core/httpx"
"zero/core/logx"
"zero/example/graceful/dns/api/svc"
"zero/example/graceful/dns/api/types"
"zero/example/graceful/dns/rpc/graceful"
)
func gracefulHandler(ctx *svc.ServiceContext) http.HandlerFunc {
logger := executors.NewLessExecutor(time.Second)
return func(w http.ResponseWriter, r *http.Request) {
var resp types.Response
conn, ok := ctx.Client.Next()
if !ok {
http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)
return
}
host, err := os.Hostname()
if err != nil {
http.Error(w, http.StatusText(http.StatusNotImplemented), http.StatusNotImplemented)
return
}
client := graceful.NewGraceServiceClient(conn)
rp, err := client.Grace(context.Background(), &graceful.Request{From: host})
if err != nil {
logx.Error(err)
http.Error(w, http.StatusText(http.StatusBadGateway), http.StatusBadGateway)
return
}
resp.Host = rp.Host
logger.DoOrDiscard(func() {
fmt.Printf("%s from host: %s\n", time.Now().Format("15:04:05"), rp.Host)
})
httpx.OkJson(w, resp)
}
}

View File

@@ -0,0 +1,19 @@
// DO NOT EDIT, generated by goctl
package handler
import (
"net/http"
"zero/example/graceful/dns/api/svc"
"zero/ngin"
)
func RegisterHandlers(engine *ngin.Engine, ctx *svc.ServiceContext) {
engine.AddRoutes([]ngin.Route{
{
Method: http.MethodGet,
Path: "/api/graceful",
Handler: gracefulHandler(ctx),
},
})
}

View File

@@ -0,0 +1,7 @@
package svc
import "zero/rpcx"
type ServiceContext struct {
Client *rpcx.RpcClient
}

View File

@@ -0,0 +1,7 @@
// DO NOT EDIT, generated by goctl
package types
type Response struct {
Host string `json:"host"`
Time int64 `json:"time"`
}

View File

@@ -0,0 +1,22 @@
FROM golang:1.13 AS builder
ENV CGO_ENABLED 0
ENV GOOS linux
WORKDIR $GOPATH/src/zero
COPY . .
RUN go build -ldflags="-s -w" -o /app/gracefulrpc example/graceful/dns/rpc/gracefulrpc.go
FROM alpine
RUN apk update --no-cache
RUN apk add --no-cache ca-certificates
RUN apk add --no-cache tzdata
ENV TZ Asia/Shanghai
WORKDIR /app
COPY --from=builder /app/gracefulrpc /app/gracefulrpc
COPY example/graceful/dns/rpc/etc/config.json /app/etc/config.json
CMD ["./gracefulrpc", "-f", "etc/config.json"]

View File

@@ -0,0 +1,11 @@
version := v$(shell /bin/date "+%y%m%d%H%M%S")
build:
docker pull alpine
cd $(GOPATH)/src/zero && docker build -t registry.cn-hangzhou.aliyuncs.com/xapp/gracefulrpc:$(version) . -f example/graceful/dns/rpc/Dockerfile
push: build
docker push registry.cn-hangzhou.aliyuncs.com/xapp/gracefulrpc:$(version)
deploy: push
kubectl -n kevin set image deployment/gracefulrpc-deployment gracefulrpc=registry-vpc.cn-hangzhou.aliyuncs.com/xapp/gracefulrpc:$(version)

View File

@@ -0,0 +1,4 @@
{
"Name": "rpc.grace",
"ListenOn": "0.0.0.0:3456"
}

View File

@@ -0,0 +1,15 @@
syntax = "proto3";
package graceful;
message Request {
string from = 1;
}
message Response {
string host = 2;
}
service GraceService {
rpc grace(Request) returns(Response);
}

View File

@@ -0,0 +1,159 @@
// Code generated by protoc-gen-go.
// source: graceful.proto
// DO NOT EDIT!
/*
Package graceful is a generated protocol buffer package.
It is generated from these files:
graceful.proto
It has these top-level messages:
Request
Response
*/
package graceful
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import (
context "golang.org/x/net/context"
grpc "google.golang.org/grpc"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type Request struct {
From string `protobuf:"bytes,1,opt,name=from" json:"from,omitempty"`
}
func (m *Request) Reset() { *m = Request{} }
func (m *Request) String() string { return proto.CompactTextString(m) }
func (*Request) ProtoMessage() {}
func (*Request) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (m *Request) GetFrom() string {
if m != nil {
return m.From
}
return ""
}
type Response struct {
Host string `protobuf:"bytes,2,opt,name=host" json:"host,omitempty"`
}
func (m *Response) Reset() { *m = Response{} }
func (m *Response) String() string { return proto.CompactTextString(m) }
func (*Response) ProtoMessage() {}
func (*Response) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
func (m *Response) GetHost() string {
if m != nil {
return m.Host
}
return ""
}
func init() {
proto.RegisterType((*Request)(nil), "graceful.Request")
proto.RegisterType((*Response)(nil), "graceful.Response")
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// Client API for GraceService service
type GraceServiceClient interface {
Grace(ctx context.Context, in *Request, opts ...grpc.CallOption) (*Response, error)
}
type graceServiceClient struct {
cc *grpc.ClientConn
}
func NewGraceServiceClient(cc *grpc.ClientConn) GraceServiceClient {
return &graceServiceClient{cc}
}
func (c *graceServiceClient) Grace(ctx context.Context, in *Request, opts ...grpc.CallOption) (*Response, error) {
out := new(Response)
err := grpc.Invoke(ctx, "/graceful.GraceService/grace", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// Server API for GraceService service
type GraceServiceServer interface {
Grace(context.Context, *Request) (*Response, error)
}
func RegisterGraceServiceServer(s *grpc.Server, srv GraceServiceServer) {
s.RegisterService(&_GraceService_serviceDesc, srv)
}
func _GraceService_Grace_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(Request)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(GraceServiceServer).Grace(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/graceful.GraceService/Grace",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(GraceServiceServer).Grace(ctx, req.(*Request))
}
return interceptor(ctx, in, info, handler)
}
var _GraceService_serviceDesc = grpc.ServiceDesc{
ServiceName: "graceful.GraceService",
HandlerType: (*GraceServiceServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "grace",
Handler: _GraceService_Grace_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "graceful.proto",
}
func init() { proto.RegisterFile("graceful.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 134 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x4b, 0x2f, 0x4a, 0x4c,
0x4e, 0x4d, 0x2b, 0xcd, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x80, 0xf1, 0x95, 0x64,
0xb9, 0xd8, 0x83, 0x52, 0x0b, 0x4b, 0x53, 0x8b, 0x4b, 0x84, 0x84, 0xb8, 0x58, 0xd2, 0x8a, 0xf2,
0x73, 0x25, 0x18, 0x15, 0x18, 0x35, 0x38, 0x83, 0xc0, 0x6c, 0x25, 0x39, 0x2e, 0x8e, 0xa0, 0xd4,
0xe2, 0x82, 0xfc, 0xbc, 0xe2, 0x54, 0x90, 0x7c, 0x46, 0x7e, 0x71, 0x89, 0x04, 0x13, 0x44, 0x1e,
0xc4, 0x36, 0xb2, 0xe3, 0xe2, 0x71, 0x07, 0x19, 0x15, 0x9c, 0x5a, 0x54, 0x96, 0x99, 0x9c, 0x2a,
0xa4, 0xc7, 0xc5, 0x0a, 0x36, 0x5a, 0x48, 0x50, 0x0f, 0x6e, 0x25, 0xd4, 0x7c, 0x29, 0x21, 0x64,
0x21, 0x88, 0x99, 0x49, 0x6c, 0x60, 0xf7, 0x18, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x81, 0x87,
0xc8, 0xc1, 0xa1, 0x00, 0x00, 0x00,
}

View File

@@ -0,0 +1,51 @@
package main
import (
"context"
"flag"
"fmt"
"os"
"time"
"zero/core/conf"
"zero/example/graceful/dns/rpc/graceful"
"zero/rpcx"
"google.golang.org/grpc"
)
var configFile = flag.String("f", "etc/config.json", "the config file")
type GracefulServer struct{}
func NewGracefulServer() *GracefulServer {
return &GracefulServer{}
}
func (gs *GracefulServer) Grace(ctx context.Context, req *graceful.Request) (*graceful.Response, error) {
fmt.Println("=>", req)
time.Sleep(time.Millisecond * 10)
hostname, err := os.Hostname()
if err != nil {
return nil, err
}
return &graceful.Response{
Host: hostname,
}, nil
}
func main() {
flag.Parse()
var c rpcx.RpcServerConf
conf.MustLoad(*configFile, &c)
server := rpcx.MustNewServer(c, func(grpcServer *grpc.Server) {
graceful.RegisterGraceServiceServer(grpcServer, NewGracefulServer())
})
defer server.Stop()
server.Start()
}

View File

@@ -0,0 +1,46 @@
apiVersion: v1
kind: Service
metadata:
name: gracefulrpc
namespace: kevin
spec:
selector:
app: gracefulrpc
type: ClusterIP
clusterIP: None
ports:
- name: gracefulrpc-port
port: 3456
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: gracefulrpc-deployment
namespace: kevin
labels:
app: gracefulrpc
spec:
replicas: 3
selector:
matchLabels:
app: gracefulrpc
template:
metadata:
labels:
app: gracefulrpc
spec:
containers:
- name: gracefulrpc
image: registry-vpc.cn-hangzhou.aliyuncs.com/xapp/gracefulrpc:v191022143425
imagePullPolicy: Always
ports:
- containerPort: 3456
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
imagePullSecrets:
- name: aliyun

View File

@@ -0,0 +1,28 @@
FROM golang:alpine AS builder
LABEL stage=gobuilder
ENV CGO_ENABLED 0
ENV GOOS linux
RUN apk update
RUN apk add upx
WORKDIR $GOPATH/src/zero
COPY . .
RUN go build -ldflags="-s -w" -o /app/graceful example/graceful/etcd/api/graceful.go
RUN upx /app/graceful
FROM alpine
RUN apk update --no-cache
RUN apk add --no-cache ca-certificates
RUN apk add --no-cache tzdata
ENV TZ Asia/Shanghai
WORKDIR /app
COPY --from=builder /app/graceful /app/graceful
COPY example/graceful/etcd/api/etc/graceful-api.json /app/etc/config.json
CMD ["./graceful", "-f", "etc/config.json"]

View File

@@ -0,0 +1,13 @@
version := v$(shell /bin/date "+%y%m%d%H%M%S")
build:
docker pull alpine
docker pull golang:alpine
cd $(GOPATH)/src/zero && docker build -t registry.cn-hangzhou.aliyuncs.com/xapp/graceful:$(version) . -f example/graceful/etcd/api/Dockerfile
docker image prune --filter label=stage=gobuilder -f
push: build
docker push registry.cn-hangzhou.aliyuncs.com/xapp/graceful:$(version)
deploy: push
kubectl -n kevin set image deployment/graceful-deployment graceful=registry-vpc.cn-hangzhou.aliyuncs.com/xapp/graceful:$(version)

View File

@@ -0,0 +1,11 @@
package config
import (
"zero/ngin"
"zero/rpcx"
)
type Config struct {
ngin.NgConf
Rpc rpcx.RpcClientConf
}

View File

@@ -0,0 +1,12 @@
{
"Name": "graceful-api",
"Host": "0.0.0.0",
"Port": 8888,
"MaxConns": 1000000,
"Rpc": {
"Etcd": {
"Hosts": ["etcd.discov:2379"],
"Key": "rpcx"
}
}
}

View File

@@ -0,0 +1,11 @@
type Response {
Host string `json:"host"`
Time int64 `json:"time"`
}
service graceful-api {
@server(
handler: GracefulHandler
)
get /api/graceful() returns(Response)
}

View File

@@ -0,0 +1,32 @@
package main
import (
"flag"
"zero/core/conf"
"zero/example/graceful/etcd/api/config"
"zero/example/graceful/etcd/api/handler"
"zero/example/graceful/etcd/api/svc"
"zero/ngin"
"zero/rpcx"
)
var configFile = flag.String("f", "etc/graceful-api.json", "the config file")
func main() {
flag.Parse()
var c config.Config
conf.MustLoad(*configFile, &c)
client := rpcx.MustNewClient(c.Rpc)
ctx := &svc.ServiceContext{
Client: client,
}
engine := ngin.MustNewEngine(c.NgConf)
defer engine.Stop()
handler.RegisterHandlers(engine, ctx)
engine.Start()
}

View File

@@ -0,0 +1,42 @@
apiVersion: v1
kind: Service
metadata:
name: graceful
namespace: kevin
spec:
selector:
app: graceful
type: ClusterIP
ports:
- name: graceful-port
port: 3333
targetPort: 8888
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: graceful-deployment
namespace: kevin
labels:
app: graceful
spec:
replicas: 3
selector:
matchLabels:
app: graceful
template:
metadata:
labels:
app: graceful
spec:
containers:
- name: graceful
image: registry-vpc.cn-hangzhou.aliyuncs.com/xapp/graceful:v191031145905
imagePullPolicy: Always
ports:
- containerPort: 8888
imagePullSecrets:
- name: aliyun

View File

@@ -0,0 +1,49 @@
package handler
import (
"context"
"fmt"
"net/http"
"os"
"time"
"zero/core/executors"
"zero/core/httpx"
"zero/core/logx"
"zero/example/graceful/etcd/api/svc"
"zero/example/graceful/etcd/api/types"
"zero/example/graceful/etcd/rpc/graceful"
)
func gracefulHandler(ctx *svc.ServiceContext) http.HandlerFunc {
logger := executors.NewLessExecutor(time.Second)
return func(w http.ResponseWriter, r *http.Request) {
var resp types.Response
conn, ok := ctx.Client.Next()
if !ok {
http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)
return
}
host, err := os.Hostname()
if err != nil {
http.Error(w, http.StatusText(http.StatusNotImplemented), http.StatusNotImplemented)
return
}
client := graceful.NewGraceServiceClient(conn)
rp, err := client.Grace(context.Background(), &graceful.Request{From: host})
if err != nil {
logx.Error(err)
http.Error(w, http.StatusText(http.StatusBadGateway), http.StatusBadGateway)
return
}
resp.Host = rp.Host
logger.DoOrDiscard(func() {
fmt.Printf("%s from host: %s\n", time.Now().Format("15:04:05"), rp.Host)
})
httpx.OkJson(w, resp)
}
}

View File

@@ -0,0 +1,19 @@
// DO NOT EDIT, generated by goctl
package handler
import (
"net/http"
"zero/example/graceful/etcd/api/svc"
"zero/ngin"
)
func RegisterHandlers(engine *ngin.Engine, ctx *svc.ServiceContext) {
engine.AddRoutes([]ngin.Route{
{
Method: http.MethodGet,
Path: "/api/graceful",
Handler: gracefulHandler(ctx),
},
})
}

View File

@@ -0,0 +1,7 @@
package svc
import "zero/rpcx"
type ServiceContext struct {
Client *rpcx.RpcClient
}

View File

@@ -0,0 +1,7 @@
// DO NOT EDIT, generated by goctl
package types
type Response struct {
Host string `json:"host"`
Time int64 `json:"time"`
}

View File

@@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: discov

View File

@@ -0,0 +1,319 @@
apiVersion: v1
kind: Service
metadata:
name: etcd
namespace: discov
spec:
ports:
- name: etcd-port
port: 2379
protocol: TCP
targetPort: 2379
selector:
app: etcd
---
apiVersion: v1
kind: Pod
metadata:
labels:
app: etcd
etcd_node: etcd0
name: etcd0
namespace: discov
spec:
containers:
- command:
- /usr/local/bin/etcd
- --name
- etcd0
- --initial-advertise-peer-urls
- http://etcd0:2380
- --listen-peer-urls
- http://0.0.0.0:2380
- --listen-client-urls
- http://0.0.0.0:2379
- --advertise-client-urls
- http://etcd0:2379
- --initial-cluster
- etcd0=http://etcd0:2380,etcd1=http://etcd1:2380,etcd2=http://etcd2:2380,etcd3=http://etcd3:2380,etcd4=http://etcd4:2380
- --initial-cluster-state
- new
image: quay.io/coreos/etcd:latest
name: etcd0
ports:
- containerPort: 2379
name: client
protocol: TCP
- containerPort: 2380
name: server
protocol: TCP
restartPolicy: Always
---
apiVersion: v1
kind: Service
metadata:
labels:
etcd_node: etcd0
name: etcd0
namespace: discov
spec:
ports:
- name: client
port: 2379
protocol: TCP
targetPort: 2379
- name: server
port: 2380
protocol: TCP
targetPort: 2380
selector:
etcd_node: etcd0
---
apiVersion: v1
kind: Pod
metadata:
labels:
app: etcd
etcd_node: etcd1
name: etcd1
namespace: discov
spec:
containers:
- command:
- /usr/local/bin/etcd
- --name
- etcd1
- --initial-advertise-peer-urls
- http://etcd1:2380
- --listen-peer-urls
- http://0.0.0.0:2380
- --listen-client-urls
- http://0.0.0.0:2379
- --advertise-client-urls
- http://etcd1:2379
- --initial-cluster
- etcd0=http://etcd0:2380,etcd1=http://etcd1:2380,etcd2=http://etcd2:2380,etcd3=http://etcd3:2380,etcd4=http://etcd4:2380
- --initial-cluster-state
- new
image: quay.io/coreos/etcd:latest
name: etcd1
ports:
- containerPort: 2379
name: client
protocol: TCP
- containerPort: 2380
name: server
protocol: TCP
restartPolicy: Always
---
apiVersion: v1
kind: Service
metadata:
labels:
etcd_node: etcd1
name: etcd1
namespace: discov
spec:
ports:
- name: client
port: 2379
protocol: TCP
targetPort: 2379
- name: server
port: 2380
protocol: TCP
targetPort: 2380
selector:
etcd_node: etcd1
---
apiVersion: v1
kind: Pod
metadata:
labels:
app: etcd
etcd_node: etcd2
name: etcd2
namespace: discov
spec:
containers:
- command:
- /usr/local/bin/etcd
- --name
- etcd2
- --initial-advertise-peer-urls
- http://etcd2:2380
- --listen-peer-urls
- http://0.0.0.0:2380
- --listen-client-urls
- http://0.0.0.0:2379
- --advertise-client-urls
- http://etcd2:2379
- --initial-cluster
- etcd0=http://etcd0:2380,etcd1=http://etcd1:2380,etcd2=http://etcd2:2380,etcd3=http://etcd3:2380,etcd4=http://etcd4:2380
- --initial-cluster-state
- new
image: quay.io/coreos/etcd:latest
name: etcd2
ports:
- containerPort: 2379
name: client
protocol: TCP
- containerPort: 2380
name: server
protocol: TCP
restartPolicy: Always
---
apiVersion: v1
kind: Service
metadata:
labels:
etcd_node: etcd2
name: etcd2
namespace: discov
spec:
ports:
- name: client
port: 2379
protocol: TCP
targetPort: 2379
- name: server
port: 2380
protocol: TCP
targetPort: 2380
selector:
etcd_node: etcd2
---
apiVersion: v1
kind: Pod
metadata:
labels:
app: etcd
etcd_node: etcd3
name: etcd3
namespace: discov
spec:
containers:
- command:
- /usr/local/bin/etcd
- --name
- etcd3
- --initial-advertise-peer-urls
- http://etcd3:2380
- --listen-peer-urls
- http://0.0.0.0:2380
- --listen-client-urls
- http://0.0.0.0:2379
- --advertise-client-urls
- http://etcd3:2379
- --initial-cluster
- etcd0=http://etcd0:2380,etcd1=http://etcd1:2380,etcd2=http://etcd2:2380,etcd3=http://etcd3:2380,etcd4=http://etcd4:2380
- --initial-cluster-state
- new
image: quay.io/coreos/etcd:latest
name: etcd3
ports:
- containerPort: 2379
name: client
protocol: TCP
- containerPort: 2380
name: server
protocol: TCP
restartPolicy: Always
---
apiVersion: v1
kind: Service
metadata:
labels:
etcd_node: etcd3
name: etcd3
namespace: discov
spec:
ports:
- name: client
port: 2379
protocol: TCP
targetPort: 2379
- name: server
port: 2380
protocol: TCP
targetPort: 2380
selector:
etcd_node: etcd3
---
apiVersion: v1
kind: Pod
metadata:
labels:
app: etcd
etcd_node: etcd4
name: etcd4
namespace: discov
spec:
containers:
- command:
- /usr/local/bin/etcd
- --name
- etcd4
- --initial-advertise-peer-urls
- http://etcd4:2380
- --listen-peer-urls
- http://0.0.0.0:2380
- --listen-client-urls
- http://0.0.0.0:2379
- --advertise-client-urls
- http://etcd4:2379
- --initial-cluster
- etcd0=http://etcd0:2380,etcd1=http://etcd1:2380,etcd2=http://etcd2:2380,etcd3=http://etcd3:2380,etcd4=http://etcd4:2380
- --initial-cluster-state
- new
image: quay.io/coreos/etcd:latest
name: etcd4
ports:
- containerPort: 2379
name: client
protocol: TCP
- containerPort: 2380
name: server
protocol: TCP
restartPolicy: Always
---
apiVersion: v1
kind: Service
metadata:
labels:
etcd_node: etcd4
name: etcd4
namespace: discov
spec:
ports:
- name: client
port: 2379
protocol: TCP
targetPort: 2379
- name: server
port: 2380
protocol: TCP
targetPort: 2380
selector:
etcd_node: etcd4

View File

@@ -0,0 +1,24 @@
FROM golang:alpine AS builder
LABEL stage=gobuilder
ENV CGO_ENABLED 0
ENV GOOS linux
WORKDIR $GOPATH/src/zero
COPY . .
RUN go build -ldflags="-s -w" -o /app/gracefulrpc example/graceful/etcd/rpc/gracefulrpc.go
FROM alpine
RUN apk update --no-cache
RUN apk add --no-cache ca-certificates
RUN apk add --no-cache tzdata
ENV TZ Asia/Shanghai
WORKDIR /app
COPY --from=builder /app/gracefulrpc /app/gracefulrpc
COPY example/graceful/etcd/rpc/etc/graceful-rpc.json /app/etc/config.json
CMD ["./gracefulrpc", "-f", "etc/config.json"]

View File

@@ -0,0 +1,13 @@
version := v$(shell /bin/date "+%y%m%d%H%M%S")
build:
docker pull alpine
docker pull golang:alpine
cd $(GOPATH)/src/zero && docker build -t registry.cn-hangzhou.aliyuncs.com/xapp/gracefulrpc:$(version) . -f example/graceful/etcd/rpc/Dockerfile
docker image prune --filter label=stage=gobuilder -f
push: build
docker push registry.cn-hangzhou.aliyuncs.com/xapp/gracefulrpc:$(version)
deploy: push
kubectl -n kevin set image deployment/gracefulrpc-deployment gracefulrpc=registry-vpc.cn-hangzhou.aliyuncs.com/xapp/gracefulrpc:$(version)

View File

@@ -0,0 +1,8 @@
{
"Name": "rpc.grace",
"ListenOn": "0.0.0.0:3456",
"Etcd": {
"Hosts": ["etcd.discov:2379"],
"Key": "rpcx"
}
}

View File

@@ -0,0 +1,15 @@
syntax = "proto3";
package graceful;
message Request {
string from = 1;
}
message Response {
string host = 2;
}
service GraceService {
rpc grace(Request) returns(Response);
}

View File

@@ -0,0 +1,159 @@
// Code generated by protoc-gen-go.
// source: graceful.proto
// DO NOT EDIT!
/*
Package graceful is a generated protocol buffer package.
It is generated from these files:
graceful.proto
It has these top-level messages:
Request
Response
*/
package graceful
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import (
context "golang.org/x/net/context"
grpc "google.golang.org/grpc"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type Request struct {
From string `protobuf:"bytes,1,opt,name=from" json:"from,omitempty"`
}
func (m *Request) Reset() { *m = Request{} }
func (m *Request) String() string { return proto.CompactTextString(m) }
func (*Request) ProtoMessage() {}
func (*Request) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (m *Request) GetFrom() string {
if m != nil {
return m.From
}
return ""
}
type Response struct {
Host string `protobuf:"bytes,2,opt,name=host" json:"host,omitempty"`
}
func (m *Response) Reset() { *m = Response{} }
func (m *Response) String() string { return proto.CompactTextString(m) }
func (*Response) ProtoMessage() {}
func (*Response) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
func (m *Response) GetHost() string {
if m != nil {
return m.Host
}
return ""
}
func init() {
proto.RegisterType((*Request)(nil), "graceful.Request")
proto.RegisterType((*Response)(nil), "graceful.Response")
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// Client API for GraceService service
type GraceServiceClient interface {
Grace(ctx context.Context, in *Request, opts ...grpc.CallOption) (*Response, error)
}
type graceServiceClient struct {
cc *grpc.ClientConn
}
func NewGraceServiceClient(cc *grpc.ClientConn) GraceServiceClient {
return &graceServiceClient{cc}
}
func (c *graceServiceClient) Grace(ctx context.Context, in *Request, opts ...grpc.CallOption) (*Response, error) {
out := new(Response)
err := grpc.Invoke(ctx, "/graceful.GraceService/grace", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// Server API for GraceService service
type GraceServiceServer interface {
Grace(context.Context, *Request) (*Response, error)
}
func RegisterGraceServiceServer(s *grpc.Server, srv GraceServiceServer) {
s.RegisterService(&_GraceService_serviceDesc, srv)
}
func _GraceService_Grace_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(Request)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(GraceServiceServer).Grace(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/graceful.GraceService/Grace",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(GraceServiceServer).Grace(ctx, req.(*Request))
}
return interceptor(ctx, in, info, handler)
}
var _GraceService_serviceDesc = grpc.ServiceDesc{
ServiceName: "graceful.GraceService",
HandlerType: (*GraceServiceServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "grace",
Handler: _GraceService_Grace_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "graceful.proto",
}
func init() { proto.RegisterFile("graceful.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 134 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x4b, 0x2f, 0x4a, 0x4c,
0x4e, 0x4d, 0x2b, 0xcd, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x80, 0xf1, 0x95, 0x64,
0xb9, 0xd8, 0x83, 0x52, 0x0b, 0x4b, 0x53, 0x8b, 0x4b, 0x84, 0x84, 0xb8, 0x58, 0xd2, 0x8a, 0xf2,
0x73, 0x25, 0x18, 0x15, 0x18, 0x35, 0x38, 0x83, 0xc0, 0x6c, 0x25, 0x39, 0x2e, 0x8e, 0xa0, 0xd4,
0xe2, 0x82, 0xfc, 0xbc, 0xe2, 0x54, 0x90, 0x7c, 0x46, 0x7e, 0x71, 0x89, 0x04, 0x13, 0x44, 0x1e,
0xc4, 0x36, 0xb2, 0xe3, 0xe2, 0x71, 0x07, 0x19, 0x15, 0x9c, 0x5a, 0x54, 0x96, 0x99, 0x9c, 0x2a,
0xa4, 0xc7, 0xc5, 0x0a, 0x36, 0x5a, 0x48, 0x50, 0x0f, 0x6e, 0x25, 0xd4, 0x7c, 0x29, 0x21, 0x64,
0x21, 0x88, 0x99, 0x49, 0x6c, 0x60, 0xf7, 0x18, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x81, 0x87,
0xc8, 0xc1, 0xa1, 0x00, 0x00, 0x00,
}

View File

@@ -0,0 +1,30 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: gracefulrpc-deployment
namespace: kevin
labels:
app: gracefulrpc
spec:
replicas: 9
selector:
matchLabels:
app: gracefulrpc
template:
metadata:
labels:
app: gracefulrpc
spec:
containers:
- name: gracefulrpc
image: registry-vpc.cn-hangzhou.aliyuncs.com/xapp/gracefulrpc:v191031144304
imagePullPolicy: Always
ports:
- containerPort: 3456
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
imagePullSecrets:
- name: aliyun

View File

@@ -0,0 +1,41 @@
apiVersion: v1
kind: Service
metadata:
name: gracefulrpc
namespace: kevin
spec:
selector:
app: gracefulrpc
type: ClusterIP
clusterIP: None
ports:
- name: gracefulrpc-port
port: 3456
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: gracefulrpc-deployment
namespace: kevin
labels:
app: gracefulrpc
spec:
replicas: 9
selector:
matchLabels:
app: gracefulrpc
template:
metadata:
labels:
app: gracefulrpc
spec:
containers:
- name: gracefulrpc
image: registry-vpc.cn-hangzhou.aliyuncs.com/xapp/gracefulrpc:v191031144304
imagePullPolicy: Always
ports:
- containerPort: 3456
imagePullSecrets:
- name: aliyun

View File

@@ -0,0 +1,51 @@
package main
import (
"context"
"flag"
"fmt"
"os"
"time"
"zero/core/conf"
"zero/example/graceful/etcd/rpc/graceful"
"zero/rpcx"
"google.golang.org/grpc"
)
var configFile = flag.String("f", "etc/config.json", "the config file")
type GracefulServer struct{}
func NewGracefulServer() *GracefulServer {
return &GracefulServer{}
}
func (gs *GracefulServer) Grace(ctx context.Context, req *graceful.Request) (*graceful.Response, error) {
fmt.Println("=>", req)
time.Sleep(time.Millisecond * 10)
hostname, err := os.Hostname()
if err != nil {
return nil, err
}
return &graceful.Response{
Host: hostname,
}, nil
}
func main() {
flag.Parse()
var c rpcx.RpcServerConf
conf.MustLoad(*configFile, &c)
server := rpcx.MustNewServer(c, func(grpcServer *grpc.Server) {
graceful.RegisterGraceServiceServer(grpcServer, NewGracefulServer())
})
defer server.Stop()
server.Start()
}

View File

@@ -0,0 +1,25 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: gracefulrpc-deployment
namespace: kevin
labels:
app: gracefulrpc
spec:
replicas: 9
selector:
matchLabels:
app: gracefulrpc
template:
metadata:
labels:
app: gracefulrpc
spec:
containers:
- name: gracefulrpc
image: registry-vpc.cn-hangzhou.aliyuncs.com/xapp/gracefulrpc:v191031144304
imagePullPolicy: Always
ports:
- containerPort: 3456
imagePullSecrets:
- name: aliyun

View File

@@ -0,0 +1,170 @@
package main
import (
"flag"
"fmt"
"net/http"
"os"
"sync"
"time"
"zero/core/lang"
"zero/core/threading"
"gopkg.in/cheggaaa/pb.v1"
)
var (
freq = flag.Int("freq", 100, "frequence")
duration = flag.String("duration", "10s", "duration")
)
type (
counting struct {
ok int
fail int
reject int
errs int
unknown int
}
metric struct {
counting
lock sync.Mutex
}
)
func (m *metric) addOk() {
m.lock.Lock()
m.ok++
m.lock.Unlock()
}
func (m *metric) addFail() {
m.lock.Lock()
m.ok++
m.lock.Unlock()
}
func (m *metric) addReject() {
m.lock.Lock()
m.ok++
m.lock.Unlock()
}
func (m *metric) addErrs() {
m.lock.Lock()
m.errs++
m.lock.Unlock()
}
func (m *metric) addUnknown() {
m.lock.Lock()
m.unknown++
m.lock.Unlock()
}
func (m *metric) reset() counting {
m.lock.Lock()
result := counting{
ok: m.ok,
fail: m.fail,
reject: m.reject,
errs: m.errs,
unknown: m.unknown,
}
m.ok = 0
m.fail = 0
m.reject = 0
m.errs = 0
m.unknown = 0
m.lock.Unlock()
return result
}
func runRequests(url string, frequence int, metrics *metric, done <-chan lang.PlaceholderType) {
ticker := time.NewTicker(time.Second / time.Duration(frequence))
defer ticker.Stop()
for {
select {
case <-ticker.C:
go func() {
resp, err := http.Get(url)
if err != nil {
metrics.addErrs()
return
}
defer resp.Body.Close()
switch resp.StatusCode {
case http.StatusOK:
metrics.addOk()
case http.StatusInternalServerError:
metrics.addFail()
case http.StatusServiceUnavailable:
metrics.addReject()
default:
metrics.addUnknown()
}
}()
case <-done:
return
}
}
}
func main() {
flag.Parse()
fp, err := os.Create("result.csv")
lang.Must(err)
defer fp.Close()
fmt.Fprintln(fp, "seconds,goodOk,goodFail,goodReject,goodErrs,goodUnknowns,goodDropRatio,"+
"heavyOk,heavyFail,heavyReject,heavyErrs,heavyUnknowns,heavyDropRatio")
var gm, hm metric
dur, err := time.ParseDuration(*duration)
lang.Must(err)
done := make(chan lang.PlaceholderType)
group := threading.NewRoutineGroup()
group.RunSafe(func() {
runRequests("http://localhost:8080/heavy", *freq, &hm, done)
})
group.RunSafe(func() {
runRequests("http://localhost:8080/good", *freq, &gm, done)
})
go func() {
ticker := time.NewTicker(time.Second)
defer ticker.Stop()
var seconds int
for range ticker.C {
seconds++
g := gm.reset()
h := hm.reset()
fmt.Fprintf(fp, "%d,%d,%d,%d,%d,%d,%.1f,%d,%d,%d,%d,%d,%.1f\n",
seconds, g.ok, g.fail, g.reject, g.errs, g.unknown,
float32(g.reject)/float32(g.ok+g.fail+g.reject+g.unknown),
h.ok, h.fail, h.reject, h.errs, h.unknown,
float32(h.reject)/float32(h.ok+h.fail+h.reject+h.unknown))
}
}()
go func() {
bar := pb.New(int(dur / time.Second)).Start()
ticker := time.NewTicker(time.Second)
defer ticker.Stop()
for range ticker.C {
bar.Increment()
}
bar.Finish()
}()
<-time.After(dur)
close(done)
group.Wait()
time.Sleep(time.Millisecond * 900)
}

View File

@@ -0,0 +1,3 @@
#!/bin/bash
hey -z 60s http://localhost:8080/good

View File

@@ -0,0 +1,3 @@
#!/bin/bash
hey -z 60s http://localhost:8080/heavy

View File

@@ -0,0 +1,59 @@
package main
import (
"net/http"
"runtime"
"time"
"zero/core/logx"
"zero/core/service"
"zero/core/stat"
"zero/core/syncx"
"zero/ngin"
)
func main() {
logx.Disable()
stat.SetReporter(nil)
server := ngin.MustNewEngine(ngin.NgConf{
ServiceConf: service.ServiceConf{
Name: "breaker",
Log: logx.LogConf{
Mode: "console",
},
},
Host: "0.0.0.0",
Port: 8080,
MaxConns: 1000,
Timeout: 3000,
})
latch := syncx.NewLimit(10)
server.AddRoute(ngin.Route{
Method: http.MethodGet,
Path: "/heavy",
Handler: func(w http.ResponseWriter, r *http.Request) {
if latch.TryBorrow() {
defer latch.Return()
runtime.LockOSThread()
defer runtime.UnlockOSThread()
begin := time.Now()
for {
if time.Now().Sub(begin) > time.Millisecond*50 {
break
}
}
} else {
w.WriteHeader(http.StatusInternalServerError)
}
},
})
server.AddRoute(ngin.Route{
Method: http.MethodGet,
Path: "/good",
Handler: func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
},
})
defer server.Stop()
server.Start()
}

View File

@@ -0,0 +1,5 @@
#!/bin/bash
GOOS=linux go build -ldflags="-s -w" server.go
docker run --rm -it --cpus=1 -p 8080:8080 -v `pwd`:/app -w /app alpine /app/server
rm -f server

View File

@@ -0,0 +1,56 @@
package main
import (
"fmt"
"log"
"zero/core/codec"
)
const (
pubKey = `-----BEGIN PUBLIC KEY-----
MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQD7bq4FLG0ctccbEFEsUBuRxkjE
eJ5U+0CAEjJk20V9/u2Fu76i1oKoShCs7GXtAFbDb5A/ImIXkPY62nAaxTGK4KVH
miYbRgh5Fy6336KepLCtCmV/r0PKZeCyJH9uYLs7EuE1z9Hgm5UUjmpHDhJtkAwR
my47YlhspwszKdRP+wIDAQAB
-----END PUBLIC KEY-----`
body = "hello"
)
var key = []byte("q4t7w!z%C*F-JaNdRgUjXn2r5u8x/A?D")
func main() {
encrypter, err := codec.NewRsaEncrypter([]byte(pubKey))
if err != nil {
log.Fatal(err)
}
decrypter, err := codec.NewRsaDecrypter("private.pem")
if err != nil {
log.Fatal(err)
}
output, err := encrypter.Encrypt([]byte(body))
if err != nil {
log.Fatal(err)
}
actual, err := decrypter.Decrypt(output)
if err != nil {
log.Fatal(err)
}
fmt.Println(actual)
out, err := codec.EcbEncrypt(key, []byte(body))
if err != nil {
log.Fatal(err)
}
ret, err := codec.EcbDecrypt(key, out)
if err != nil {
log.Fatal(err)
}
fmt.Println(string(ret))
}

70
example/http/demo/main.go Normal file
View File

@@ -0,0 +1,70 @@
package main
import (
"flag"
"net/http"
"zero/core/httpx"
"zero/core/logx"
"zero/core/service"
"zero/ngin"
)
var (
port = flag.Int("port", 3333, "the port to listen")
timeout = flag.Int64("timeout", 0, "timeout of milliseconds")
)
type Request struct {
User string `form:"user,options=a|b"`
}
func first(next http.HandlerFunc) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
w.Header().Add("X-Middleware", "first")
next(w, r)
}
}
func second(next http.HandlerFunc) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
w.Header().Add("X-Middleware", "second")
next(w, r)
}
}
func handle(w http.ResponseWriter, r *http.Request) {
var req Request
err := httpx.Parse(r, &req)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
httpx.OkJson(w, "helllo, "+req.User)
}
func main() {
flag.Parse()
engine := ngin.MustNewEngine(ngin.NgConf{
ServiceConf: service.ServiceConf{
Log: logx.LogConf{
Mode: "console",
},
},
Port: *port,
Timeout: *timeout,
MaxConns: 500,
})
defer engine.Stop()
engine.Use(first)
engine.Use(second)
engine.AddRoute(ngin.Route{
Method: http.MethodGet,
Path: "/",
Handler: handle,
})
engine.Start()
}

65
example/http/post/main.go Normal file
View File

@@ -0,0 +1,65 @@
package main
import (
"flag"
"fmt"
"net/http"
"zero/core/httpx"
"zero/core/logx"
"zero/core/service"
"zero/ngin"
)
var (
port = flag.Int("port", 3333, "the port to listen")
timeout = flag.Int64("timeout", 0, "timeout of milliseconds")
)
type Request struct {
User string `json:"user"`
}
func handleGet(w http.ResponseWriter, r *http.Request) {
}
func handlePost(w http.ResponseWriter, r *http.Request) {
var req Request
err := httpx.Parse(r, &req)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
httpx.OkJson(w, fmt.Sprintf("Content-Length: %d, UserLen: %d", r.ContentLength, len(req.User)))
}
func main() {
flag.Parse()
engine := ngin.MustNewEngine(ngin.NgConf{
ServiceConf: service.ServiceConf{
Log: logx.LogConf{
Mode: "console",
},
},
Port: *port,
Timeout: *timeout,
MaxConns: 500,
MaxBytes: 50,
CpuThreshold: 500,
})
defer engine.Stop()
engine.AddRoute(ngin.Route{
Method: http.MethodGet,
Path: "/",
Handler: handleGet,
})
engine.AddRoute(ngin.Route{
Method: http.MethodPost,
Path: "/",
Handler: handlePost,
})
engine.Start()
}

View File

@@ -0,0 +1,11 @@
FROM alpine
RUN apk update --no-cache
RUN apk add --no-cache ca-certificates
RUN apk add --no-cache tzdata
ENV TZ Asia/Shanghai
WORKDIR /app
COPY main /app/main
CMD ["./main"]

View File

@@ -0,0 +1,63 @@
package main
import (
"flag"
"math"
"net/http"
"time"
"zero/core/httpx"
"zero/core/logx"
"zero/core/service"
"zero/ngin"
)
var (
port = flag.Int("port", 3333, "the port to listen")
timeout = flag.Int64("timeout", 1000, "timeout of milliseconds")
cpu = flag.Int64("cpu", 500, "cpu threshold")
)
type Request struct {
User string `form:"user,optional"`
}
func handle(w http.ResponseWriter, r *http.Request) {
var req Request
err := httpx.Parse(r, &req)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
var result float64
for i := 0; i < 30000; i++ {
result += math.Sqrt(float64(i))
}
time.Sleep(time.Millisecond * 5)
httpx.OkJson(w, result)
}
func main() {
flag.Parse()
logx.Disable()
engine := ngin.MustNewEngine(ngin.NgConf{
ServiceConf: service.ServiceConf{
Log: logx.LogConf{
Mode: "console",
},
},
Port: *port,
Timeout: *timeout,
CpuThreshold: *cpu,
})
defer engine.Stop()
engine.AddRoute(ngin.Route{
Method: http.MethodGet,
Path: "/",
Handler: handle,
})
engine.Start()
}

View File

@@ -0,0 +1,113 @@
package main
import (
"crypto/hmac"
"crypto/md5"
"crypto/sha256"
"encoding/base64"
"flag"
"fmt"
"io"
"log"
"net/http"
"os"
"strconv"
"strings"
"time"
"zero/core/codec"
)
const pubKey = `-----BEGIN PUBLIC KEY-----
MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQD7bq4FLG0ctccbEFEsUBuRxkjE
eJ5U+0CAEjJk20V9/u2Fu76i1oKoShCs7GXtAFbDb5A/ImIXkPY62nAaxTGK4KVH
miYbRgh5Fy6336KepLCtCmV/r0PKZeCyJH9uYLs7EuE1z9Hgm5UUjmpHDhJtkAwR
my47YlhspwszKdRP+wIDAQAB
-----END PUBLIC KEY-----`
var (
crypt = flag.Bool("crypt", false, "encrypt body or not")
key = []byte("q4t7w!z%C*F-JaNdRgUjXn2r5u8x/A?D")
)
func fingerprint(key string) string {
h := md5.New()
io.WriteString(h, key)
return base64.StdEncoding.EncodeToString(h.Sum(nil))
}
func hs256(key []byte, body string) string {
h := hmac.New(sha256.New, key)
io.WriteString(h, body)
return base64.StdEncoding.EncodeToString(h.Sum(nil))
}
func main() {
flag.Parse()
var err error
body := "hello world!"
if *crypt {
bodyBytes, err := codec.EcbEncrypt(key, []byte(body))
if err != nil {
log.Fatal(err)
}
body = base64.StdEncoding.EncodeToString(bodyBytes)
}
r, err := http.NewRequest(http.MethodPost, "http://localhost:3333/a/b?c=first&d=second", strings.NewReader(body))
if err != nil {
log.Fatal(err)
}
timestamp := time.Now().Unix()
sha := sha256.New()
sha.Write([]byte(body))
bodySign := fmt.Sprintf("%x", sha.Sum(nil))
contentOfSign := strings.Join([]string{
strconv.FormatInt(timestamp, 10),
http.MethodPost,
r.URL.Path,
r.URL.RawQuery,
bodySign,
}, "\n")
sign := hs256(key, contentOfSign)
var mode string
if *crypt {
mode = "1"
} else {
mode = "0"
}
content := strings.Join([]string{
"version=v1",
"type=" + mode,
fmt.Sprintf("key=%s", base64.StdEncoding.EncodeToString(key)),
"time=" + strconv.FormatInt(timestamp, 10),
}, "; ")
encrypter, err := codec.NewRsaEncrypter([]byte(pubKey))
if err != nil {
log.Fatal(err)
}
output, err := encrypter.Encrypt([]byte(content))
if err != nil {
log.Fatal(err)
}
encryptedContent := base64.StdEncoding.EncodeToString(output)
r.Header.Set("X-Content-Security", strings.Join([]string{
fmt.Sprintf("key=%s", fingerprint(pubKey)),
"secret=" + encryptedContent,
"signature=" + sign,
}, "; "))
client := &http.Client{}
resp, err := client.Do(r)
if err != nil {
log.Fatal(err)
}
defer resp.Body.Close()
fmt.Println(resp.Status)
io.Copy(os.Stdout, resp.Body)
}

View File

@@ -0,0 +1,59 @@
package main
import (
"flag"
"io"
"net/http"
"zero/core/httpx"
"zero/core/logx"
"zero/core/service"
"zero/ngin"
)
var keyPem = flag.String("prikey", "private.pem", "the private key file")
type Request struct {
User string `form:"user,optional"`
}
func handle(w http.ResponseWriter, r *http.Request) {
var req Request
err := httpx.Parse(r, &req)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
io.Copy(w, r.Body)
}
func main() {
flag.Parse()
engine := ngin.MustNewEngine(ngin.NgConf{
ServiceConf: service.ServiceConf{
Log: logx.LogConf{
Path: "logs",
},
},
Port: 3333,
Signature: ngin.SignatureConf{
Strict: true,
PrivateKeys: []ngin.PrivateKeyConf{
{
Fingerprint: "bvw8YlnSqb+PoMf3MBbLdQ==",
KeyFile: *keyPem,
},
},
},
})
defer engine.Stop()
engine.AddRoute(ngin.Route{
Method: http.MethodPost,
Path: "/a/b",
Handler: handle,
})
engine.Start()
}

View File

@@ -0,0 +1,11 @@
package main
import "zero/core/threading"
func main() {
q := threading.NewTaskRunner(5)
q.Schedule(func() {
panic("hello")
})
select {}
}

View File

@@ -0,0 +1,41 @@
package main
import (
"encoding/json"
"fmt"
"log"
jsonx "github.com/segmentio/encoding/json"
)
type A struct {
AA string `json:"aa,omitempty"`
}
type B struct {
*A
BB string `json:"bb,omitempty"`
}
func main() {
var b B
b.BB = "b"
b.A = new(A)
b.A.AA = ""
fmt.Println("github.com/segmentio/encoding/json")
data, err := jsonx.Marshal(b)
if err != nil {
log.Fatal(err)
}
fmt.Println(string(data))
fmt.Println()
fmt.Println("encoding/json")
data, err = json.Marshal(b)
if err != nil {
log.Fatal(err)
}
fmt.Println(string(data))
}

View File

@@ -0,0 +1,74 @@
package testjson
import (
"encoding/json"
"testing"
jsoniter "github.com/json-iterator/go"
segment "github.com/segmentio/encoding/json"
)
const input = `{"@timestamp":"2020-02-12T14:02:10.849Z","@metadata":{"beat":"filebeat","type":"doc","version":"6.1.1","topic":"k8slog"},"index":"k8slog","offset":908739,"stream":"stdout","topic":"k8slog","k8s_container_name":"shield-rpc","k8s_pod_namespace":"xx-xiaoheiban","stage":"gray","prospector":{"type":"log"},"k8s_node_name":"cn-hangzhou.i-bp15w8irul9hmm3l9mxz","beat":{"name":"log-pilot-7s6qf","hostname":"log-pilot-7s6qf","version":"6.1.1"},"source":"/host/var/lib/docker/containers/4e6dca76f3e38fb8b39631e9bb3a19f9150cc82b1dab84f71d4622a08db20bfb/4e6dca76f3e38fb8b39631e9bb3a19f9150cc82b1dab84f71d4622a08db20bfb-json.log","level":"info","duration":"39.425µs","content":"172.25.5.167:49976 - /remoteshield.Filter/Filter - {\"sentence\":\"王XX2月12日作业\"}","k8s_pod":"shield-rpc-57c9dc6797-55skf","docker_container":"k8s_shield-rpc_shield-rpc-57c9dc6797-55skf_xx-xiaoheiban_a8341ba0-30ee-11ea-8ac4-00163e0fb3ef_0"}`
func BenchmarkStdJsonMarshal(b *testing.B) {
m := make(map[string]interface{})
if err := json.Unmarshal([]byte(input), &m); err != nil {
b.FailNow()
}
for i := 0; i < b.N; i++ {
if _, err := json.Marshal(m); err != nil {
b.FailNow()
}
}
}
func BenchmarkJsonIteratorMarshal(b *testing.B) {
m := make(map[string]interface{})
if err := jsoniter.Unmarshal([]byte(input), &m); err != nil {
b.FailNow()
}
for i := 0; i < b.N; i++ {
if _, err := jsoniter.Marshal(m); err != nil {
b.FailNow()
}
}
}
func BenchmarkSegmentioMarshal(b *testing.B) {
m := make(map[string]interface{})
if err := segment.Unmarshal([]byte(input), &m); err != nil {
b.FailNow()
}
for i := 0; i < b.N; i++ {
if _, err := jsoniter.Marshal(m); err != nil {
b.FailNow()
}
}
}
func BenchmarkStdJsonUnmarshal(b *testing.B) {
for i := 0; i < b.N; i++ {
m := make(map[string]interface{})
if err := json.Unmarshal([]byte(input), &m); err != nil {
b.FailNow()
}
}
}
func BenchmarkJsonIteratorUnmarshal(b *testing.B) {
for i := 0; i < b.N; i++ {
m := make(map[string]interface{})
if err := jsoniter.Unmarshal([]byte(input), &m); err != nil {
b.FailNow()
}
}
}
func BenchmarkSegmentioUnmarshal(b *testing.B) {
for i := 0; i < b.N; i++ {
m := make(map[string]interface{})
if err := segment.Unmarshal([]byte(input), &m); err != nil {
b.FailNow()
}
}
}

View File

@@ -0,0 +1,31 @@
package testjson
import (
"encoding/json"
"testing"
"github.com/stretchr/testify/assert"
)
func TestMarshal(t *testing.T) {
type A struct {
A string `json:"a"`
AA string `json:"aa"`
}
type B struct {
A // can't be A A, or A `json...`
B string `json:"b"`
}
type C struct {
A `json:"a"`
C string `json:"c"`
}
a := A{A: "a", AA: "aa"}
b := B{A: a, B: "b"}
c := C{A: a, C: "c"}
bstr, _ := json.Marshal(b)
cstr, _ := json.Marshal(c)
assert.Equal(t, `{"a":"a","aa":"aa","b":"b"}`, string(bstr))
assert.Equal(t, `{"a":{"a":"a","aa":"aa"},"c":"c"}`, string(cstr))
}

257
example/jwt/user/user.go Normal file
View File

@@ -0,0 +1,257 @@
package main
import (
"encoding/json"
"fmt"
"log"
"net/http"
"strings"
"time"
"zero/core/conf"
"zero/core/httpx"
"zero/ngin"
"github.com/dgrijalva/jwt-go"
"github.com/dgrijalva/jwt-go/request"
)
const jwtUserField = "user"
type (
Config struct {
ngin.NgConf
AccessSecret string
AccessExpire int64 `json:",default=1209600"` // 2 weeks
RefreshSecret string
RefreshExpire int64 `json:",default=2419200"` // 4 weeks
RefreshAfter int64 `json:",default=604800"` // 1 week
}
TokenOptions struct {
AccessSecret string
AccessExpire int64
RefreshSecret string
RefreshExpire int64
RefreshAfter int64
Fields map[string]interface{}
}
Tokens struct {
// Access token to access the apis
AccessToken string `json:"access_token"`
// Access token expire time, generated like: time.Now().Add(time.Day*14).Unix()
AccessExpire int64 `json:"access_expire"`
// Refresh token, use this to refresh the token
RefreshToken string `json:"refresh_token"`
// Refresh token expire time, generated like: time.Now().Add(time.Month).Unix()
RefreshExpire int64 `json:"refresh_expire"`
// Recommended time to refresh the access token
RefreshAfter int64 `json:"refresh_after"`
}
UserCredentials struct {
Username string `json:"username"`
Password string `json:"password"`
}
User struct {
ID int `json:"id"`
Name string `json:"name"`
Username string `json:"username"`
Password string `json:"password"`
}
Response struct {
Data string `json:"data"`
}
Token struct {
Token string `json:"token"`
}
AuthRequest struct {
User string `json:"u"`
}
)
func main() {
var c Config
conf.MustLoad("user.json", &c)
engine, err := ngin.NewEngine(c.NgConf)
if err != nil {
log.Fatal(err)
}
defer engine.Stop()
engine.AddRoute(ngin.Route{
Method: http.MethodPost,
Path: "/login",
Handler: LoginHandler(c),
})
engine.AddRoute(ngin.Route{
Method: http.MethodGet,
Path: "/resource",
Handler: ProtectedHandler,
}, ngin.WithJwt(c.AccessSecret))
engine.AddRoute(ngin.Route{
Method: http.MethodPost,
Path: "/refresh",
Handler: RefreshHandler(c),
}, ngin.WithJwt(c.RefreshSecret))
fmt.Println("Now listening...")
engine.Start()
}
func RefreshHandler(c Config) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
var authReq AuthRequest
if err := httpx.Parse(r, &authReq); err != nil {
w.WriteHeader(http.StatusBadRequest)
fmt.Println(err)
return
}
token, err := request.ParseFromRequest(r, request.AuthorizationHeaderExtractor,
func(token *jwt.Token) (interface{}, error) {
return []byte(c.RefreshSecret), nil
})
if err != nil {
w.WriteHeader(http.StatusUnauthorized)
fmt.Println("Unauthorized access to this resource")
return
}
if !token.Valid {
w.WriteHeader(http.StatusUnauthorized)
fmt.Println("Token is not valid")
return
}
claims, ok := token.Claims.(jwt.MapClaims)
if !ok {
w.WriteHeader(http.StatusBadRequest)
fmt.Println("not a valid jwt.MapClaims")
return
}
user, ok := claims[jwtUserField]
if !ok {
w.WriteHeader(http.StatusBadRequest)
fmt.Println("no user info in fresh token")
return
}
userStr, ok := user.(string)
if !ok || authReq.User != userStr {
w.WriteHeader(http.StatusBadRequest)
fmt.Println("user info not match in query and fresh token")
return
}
respond(w, c, userStr)
}
}
func ProtectedHandler(w http.ResponseWriter, r *http.Request) {
response := Response{"Gained access to protected resource"}
JsonResponse(response, w)
}
func LoginHandler(c Config) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
var user UserCredentials
if err := httpx.Parse(r, &user); err != nil {
w.WriteHeader(http.StatusBadRequest)
fmt.Fprint(w, "Error in request")
return
}
if strings.ToLower(user.Username) != "someone" {
if user.Password != "p@ssword" {
w.WriteHeader(http.StatusForbidden)
fmt.Println("Error logging in")
fmt.Fprint(w, "Invalid credentials")
return
}
}
respond(w, c, user.Username)
}
}
func JsonResponse(response interface{}, w http.ResponseWriter) {
content, err := json.Marshal(response)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusOK)
w.Header().Set("Content-Type", "application/json")
w.Write(content)
}
type ()
func buildTokens(opt TokenOptions) (Tokens, error) {
var tokens Tokens
accessToken, err := genToken(opt.AccessSecret, opt.Fields, opt.AccessExpire)
if err != nil {
return tokens, err
}
refreshToken, err := genToken(opt.RefreshSecret, opt.Fields, opt.RefreshExpire)
if err != nil {
return tokens, err
}
now := time.Now().Unix()
tokens.AccessToken = accessToken
tokens.AccessExpire = now + opt.AccessExpire
tokens.RefreshAfter = now + opt.RefreshAfter
tokens.RefreshToken = refreshToken
tokens.RefreshExpire = now + opt.RefreshExpire
return tokens, nil
}
func genToken(secretKey string, payloads map[string]interface{}, seconds int64) (string, error) {
now := time.Now().Unix()
claims := make(jwt.MapClaims)
claims["exp"] = now + seconds
claims["iat"] = now
for k, v := range payloads {
claims[k] = v
}
token := jwt.New(jwt.SigningMethodHS256)
token.Claims = claims
return token.SignedString([]byte(secretKey))
}
func respond(w http.ResponseWriter, c Config, user string) {
tokens, err := buildTokens(TokenOptions{
AccessSecret: c.AccessSecret,
AccessExpire: c.AccessExpire,
RefreshSecret: c.RefreshSecret,
RefreshExpire: c.RefreshExpire,
RefreshAfter: c.RefreshAfter,
Fields: map[string]interface{}{
jwtUserField: user,
},
})
if err != nil {
w.WriteHeader(http.StatusServiceUnavailable)
fmt.Println(err)
return
}
httpx.OkJson(w, tokens)
}

View File

@@ -0,0 +1,10 @@
{
"Name": "example.user",
"Host": "localhost",
"Port": 8080,
"AccessSecret": "B63F477D-BBA3-4E52-96D3-C0034C27694A",
"AccessExpire": 1800,
"RefreshSecret": "14F17379-EB8F-411B-8F12-6929002DCA76",
"RefreshExpire": 3600,
"RefreshAfter": 600
}

View File

@@ -0,0 +1,12 @@
{
"Name": "kmq",
"Brokers": [
"172.16.56.64:19092",
"172.16.56.65:19092",
"172.16.56.66:19092"
],
"Group": "adhoc",
"Topic": "kevin",
"Offset": "first",
"NumProducers": 1
}

View File

@@ -0,0 +1,20 @@
package main
import (
"fmt"
"zero/core/conf"
"zero/kq"
)
func main() {
var c kq.KqConf
conf.MustLoad("config.json", &c)
q := kq.MustNewQueue(c, kq.WithHandle(func(k, v string) error {
fmt.Printf("=> %s\n", v)
return nil
}))
defer q.Stop()
q.Start()
}

View File

@@ -0,0 +1,51 @@
package main
import (
"encoding/json"
"fmt"
"log"
"math/rand"
"strconv"
"time"
"zero/core/cmdline"
"zero/kq"
)
type message struct {
Key string `json:"key"`
Value string `json:"value"`
Payload string `json:"message"`
}
func main() {
pusher := kq.NewPusher([]string{
"172.16.56.64:19092",
"172.16.56.65:19092",
"172.16.56.66:19092",
}, "kevin")
ticker := time.NewTicker(time.Millisecond)
for round := 0; round < 3; round++ {
select {
case <-ticker.C:
count := rand.Intn(100)
m := message{
Key: strconv.FormatInt(time.Now().UnixNano(), 10),
Value: fmt.Sprintf("%d,%d", round, count),
Payload: fmt.Sprintf("%d,%d", round, count),
}
body, err := json.Marshal(m)
if err != nil {
log.Fatal(err)
}
fmt.Println(string(body))
if err := pusher.Push(string(body)); err != nil {
log.Fatal(err)
}
}
}
cmdline.EnterToContinue()
}

View File

@@ -0,0 +1,66 @@
package main
import (
"flag"
"fmt"
"log"
"runtime"
"strconv"
"sync"
"sync/atomic"
"time"
"zero/core/limit"
"zero/core/stores/redis"
)
const seconds = 5
var (
rdx = flag.String("redis", "localhost:6379", "the redis, default localhost:6379")
rdxType = flag.String("redisType", "node", "the redis type, default node")
rdxPass = flag.String("redisPass", "", "the redis password")
rdxKey = flag.String("redisKey", "rate", "the redis key, default rate")
threads = flag.Int("threads", runtime.NumCPU(), "the concurrent threads, default to cores")
)
func main() {
flag.Parse()
store := redis.NewRedis(*rdx, *rdxType, *rdxPass)
fmt.Println(store.Ping())
lmt := limit.NewPeriodLimit(seconds, 5, store, *rdxKey)
timer := time.NewTimer(time.Second * seconds)
quit := make(chan struct{})
defer timer.Stop()
go func() {
<-timer.C
close(quit)
}()
var allowed, denied int32
var wait sync.WaitGroup
for i := 0; i < *threads; i++ {
wait.Add(1)
go func() {
for {
select {
case <-quit:
wait.Done()
return
default:
if v, err := lmt.Take(strconv.FormatInt(int64(i), 10)); err == nil && v == limit.Allowed {
atomic.AddInt32(&allowed, 1)
} else if err != nil {
log.Fatal(err)
} else {
atomic.AddInt32(&denied, 1)
}
}
}
}()
}
wait.Wait()
fmt.Printf("allowed: %d, denied: %d, qps: %d\n", allowed, denied, (allowed+denied)/seconds)
}

View File

@@ -0,0 +1,66 @@
package main
import (
"flag"
"fmt"
"runtime"
"sync"
"sync/atomic"
"time"
"zero/core/limit"
"zero/core/stores/redis"
)
const (
burst = 100
rate = 100
seconds = 5
)
var (
rdx = flag.String("redis", "localhost:6379", "the redis, default localhost:6379")
rdxType = flag.String("redisType", "node", "the redis type, default node")
rdxKey = flag.String("redisKey", "rate", "the redis key, default rate")
rdxPass = flag.String("redisPass", "", "the redis password")
threads = flag.Int("threads", runtime.NumCPU(), "the concurrent threads, default to cores")
)
func main() {
flag.Parse()
store := redis.NewRedis(*rdx, *rdxType, *rdxPass)
fmt.Println(store.Ping())
limit := limit.NewTokenLimiter(rate, burst, store, *rdxKey)
timer := time.NewTimer(time.Second * seconds)
quit := make(chan struct{})
defer timer.Stop()
go func() {
<-timer.C
close(quit)
}()
var allowed, denied int32
var wait sync.WaitGroup
for i := 0; i < *threads; i++ {
wait.Add(1)
go func() {
for {
select {
case <-quit:
wait.Done()
return
default:
if limit.Allow() {
atomic.AddInt32(&allowed, 1)
} else {
atomic.AddInt32(&denied, 1)
}
}
}
}()
}
wait.Wait()
fmt.Printf("allowed: %d, denied: %d, qps: %d\n", allowed, denied, (allowed+denied)/seconds)
}

149
example/load/main.go Normal file
View File

@@ -0,0 +1,149 @@
package main
import (
"flag"
"fmt"
"io"
"math"
"math/rand"
"os"
"sync"
"sync/atomic"
"time"
"zero/core/collection"
"zero/core/executors"
"zero/core/lang"
"zero/core/syncx"
"gopkg.in/cheggaaa/pb.v1"
)
const (
beta = 0.9
total = 400
interval = time.Second
factor = 5
)
var (
seconds = flag.Int("d", 400, "duration to go")
flying uint64
avgFlyingAggressive float64
aggressiveLock syncx.SpinLock
avgFlyingLazy float64
lazyLock syncx.SpinLock
avgFlyingBoth float64
bothLock syncx.SpinLock
lessWriter *executors.LessExecutor
passCounter = collection.NewRollingWindow(50, time.Millisecond*100)
rtCounter = collection.NewRollingWindow(50, time.Millisecond*100)
index int32
)
func main() {
flag.Parse()
// only log 100 records
lessWriter = executors.NewLessExecutor(interval * total / 100)
fp, err := os.Create("result.csv")
lang.Must(err)
defer fp.Close()
fmt.Fprintln(fp, "second,maxFlight,flying,agressiveAvgFlying,lazyAvgFlying,bothAvgFlying")
ticker := time.NewTicker(interval)
defer ticker.Stop()
bar := pb.New(*seconds * 2).Start()
var waitGroup sync.WaitGroup
batchRequests := func(i int) {
<-ticker.C
requests := (i + 1) * factor
func() {
it := time.NewTicker(interval / time.Duration(requests))
defer it.Stop()
for j := 0; j < requests; j++ {
<-it.C
waitGroup.Add(1)
go func() {
issueRequest(fp, atomic.AddInt32(&index, 1))
waitGroup.Done()
}()
}
bar.Increment()
}()
}
for i := 0; i < *seconds; i++ {
batchRequests(i)
}
for i := *seconds; i > 0; i-- {
batchRequests(i)
}
bar.Finish()
waitGroup.Wait()
}
func issueRequest(writer io.Writer, idx int32) {
v := atomic.AddUint64(&flying, 1)
aggressiveLock.Lock()
af := avgFlyingAggressive*beta + float64(v)*(1-beta)
avgFlyingAggressive = af
aggressiveLock.Unlock()
bothLock.Lock()
bf := avgFlyingBoth*beta + float64(v)*(1-beta)
avgFlyingBoth = bf
bothLock.Unlock()
duration := time.Millisecond * time.Duration(rand.Int63n(10)+1)
job(duration)
passCounter.Add(1)
rtCounter.Add(float64(duration) / float64(time.Millisecond))
v1 := atomic.AddUint64(&flying, ^uint64(0))
lazyLock.Lock()
lf := avgFlyingLazy*beta + float64(v1)*(1-beta)
avgFlyingLazy = lf
lazyLock.Unlock()
bothLock.Lock()
bf = avgFlyingBoth*beta + float64(v1)*(1-beta)
avgFlyingBoth = bf
bothLock.Unlock()
lessWriter.DoOrDiscard(func() {
fmt.Fprintf(writer, "%d,%d,%d,%.2f,%.2f,%.2f\n", idx, maxFlight(), v, af, lf, bf)
})
}
func job(duration time.Duration) {
time.Sleep(duration)
}
func maxFlight() int64 {
return int64(math.Max(1, float64(maxPass()*10)*(minRt()/1e3)))
}
func maxPass() int64 {
var result float64 = 1
passCounter.Reduce(func(b *collection.Bucket) {
if b.Sum > result {
result = b.Sum
}
})
return int64(result)
}
func minRt() float64 {
var result float64 = 1000
rtCounter.Reduce(func(b *collection.Bucket) {
if b.Count <= 0 {
return
}
avg := math.Round(b.Sum / float64(b.Count))
if avg < result {
result = avg
}
})
return result
}

14
example/load/plot.py Normal file
View File

@@ -0,0 +1,14 @@
import click
import pandas as pd
import matplotlib.pyplot as plt
@click.command()
@click.option("--csv", default="result.csv")
def main(csv):
df = pd.read_csv(csv, index_col="second")
df.drop(["agressiveAvgFlying", "bothAvgFlying"], axis=1, inplace=True)
df.plot()
plt.show()
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,95 @@
package main
import (
"errors"
"flag"
"fmt"
"io"
"net/http"
"os"
"sync/atomic"
"time"
"zero/core/fx"
"zero/core/lang"
)
var (
errServiceUnavailable = errors.New("service unavailable")
total int64
pass int64
fail int64
drop int64
seconds int64 = 1
)
func main() {
flag.Parse()
fp, err := os.Create("result.csv")
lang.Must(err)
defer fp.Close()
fmt.Fprintln(fp, "seconds,total,pass,fail,drop")
go func() {
ticker := time.NewTicker(time.Second)
defer ticker.Stop()
for range ticker.C {
reset(fp)
}
}()
for i := 0; ; i++ {
it := time.NewTicker(time.Second / time.Duration(atomic.LoadInt64(&seconds)))
func() {
for j := 0; j < int(seconds); j++ {
<-it.C
go issueRequest()
}
}()
it.Stop()
cur := atomic.AddInt64(&seconds, 1)
fmt.Println(cur)
}
}
func issueRequest() {
atomic.AddInt64(&total, 1)
err := fx.DoWithTimeout(func() error {
return job()
}, time.Second)
switch err {
case nil:
atomic.AddInt64(&pass, 1)
case errServiceUnavailable:
atomic.AddInt64(&drop, 1)
default:
atomic.AddInt64(&fail, 1)
}
}
func job() error {
resp, err := http.Get("http://localhost:3333/")
if err != nil {
return err
}
defer resp.Body.Close()
switch resp.StatusCode {
case http.StatusOK:
return nil
default:
return errServiceUnavailable
}
}
func reset(writer io.Writer) {
fmt.Fprintf(writer, "%d,%d,%d,%d,%d\n",
atomic.LoadInt64(&seconds),
atomic.SwapInt64(&total, 0),
atomic.SwapInt64(&pass, 0),
atomic.SwapInt64(&fail, 0),
atomic.SwapInt64(&drop, 0),
)
}

View File

@@ -0,0 +1,13 @@
import click
import pandas as pd
import matplotlib.pyplot as plt
@click.command()
@click.option("--csv", default="result.csv")
def main(csv):
df = pd.read_csv(csv, index_col="seconds")
df.plot()
plt.show()
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,26 @@
FROM golang:alpine AS builder
LABEL stage=gobuilder
ENV CGO_ENABLED 0
ENV GOOS linux
ENV GOPROXY https://goproxy.cn,direct
WORKDIR $GOPATH/src/zero
COPY . .
RUN go build -ldflags="-s -w" -o /app/main example/load/simulate/cpu/main.go
FROM alpine
RUN apk add --no-cache tzdata
ENV TZ Asia/Shanghai
RUN apk add git
RUN go get github.com/vikyd/go-cpu-load
RUN mkdir /app
COPY --from=builder /app/main /app/main
WORKDIR /app
CMD ["/app/main"]

View File

@@ -0,0 +1,13 @@
version := v1
build:
cd $(GOPATH)/src/zero && docker build -t registry.cn-hangzhou.aliyuncs.com/xapp/shedding:$(version) . -f example/load/simulate/cpu/Dockerfile
push: build
docker push registry.cn-hangzhou.aliyuncs.com/xapp/shedding:$(version)
deploy: push
kubectl apply -f shedding.yaml
clean:
kubectl delete -f shedding.yaml

View File

@@ -0,0 +1,28 @@
# cpu监控准确度测试
1. 启动测试pod
`make deploy`
2. 通过`kubectl get po -n adhoc`确认`sheeding` pod已经成功运行通过如下命令进入pod
`kubectl exec -it -n adhoc shedding -- sh`
3. 启动负载
`/app # go-cpu-load -p 50 -c 1`
默认`go-cpu-load`是对每个core加上负载的所以测试里指定了`1000m`等同于1 core我们指定`-c 1`让测试更具有可读性
`-p`可以多换几个值测试
4. 验证测试准确性
`kubectl logs -f -n adhoc shedding`
可以看到日志中的`CPU`报告,`1000m`表示`100%`,如果看到`500m`则表示`50%`,每分钟输出一次
`watch -n 5 kubectl top pod -n adhoc`
可以看到`kubectl`报告的`CPU`使用率,两者进行对比,即可知道是否准确

View File

@@ -0,0 +1,7 @@
package main
import _ "zero/core/stat"
func main() {
select {}
}

View File

@@ -0,0 +1,17 @@
apiVersion: v1
kind: Pod
metadata:
name: shedding
namespace: adhoc
spec:
containers:
- name: shedding
image: registry-vpc.cn-hangzhou.aliyuncs.com/xapp/shedding:v1
imagePullPolicy: Always
resources:
requests:
cpu: 1000m
limits:
cpu: 1000m
imagePullSecrets:
- name: aliyun

View File

@@ -0,0 +1,71 @@
package main
import (
"fmt"
"net/http"
"runtime"
"time"
"zero/core/fx"
"zero/core/logx"
"zero/core/service"
"zero/core/stat"
"zero/ngin"
)
const duration = time.Millisecond
func main() {
go func() {
ticker := time.NewTicker(time.Second)
defer ticker.Stop()
for range ticker.C {
fmt.Printf("cpu: %d\n", stat.CpuUsage())
}
}()
logx.Disable()
engine := ngin.MustNewEngine(ngin.NgConf{
ServiceConf: service.ServiceConf{
Log: logx.LogConf{
Mode: "console",
},
},
Host: "0.0.0.0",
Port: 3333,
CpuThreshold: 800,
})
defer engine.Stop()
engine.AddRoute(ngin.Route{
Method: http.MethodGet,
Path: "/",
Handler: func(w http.ResponseWriter, r *http.Request) {
if err := fx.DoWithTimeout(func() error {
job(duration)
return nil
}, time.Millisecond*100); err != nil {
w.WriteHeader(http.StatusServiceUnavailable)
}
},
})
engine.Start()
}
func job(duration time.Duration) {
done := make(chan int)
for i := 0; i < runtime.NumCPU(); i++ {
go func() {
for {
select {
case <-done:
return
default:
}
}
}()
}
time.Sleep(duration)
close(done)
}

View File

@@ -0,0 +1,30 @@
package main
import (
"time"
"zero/core/logx"
)
func foo() {
logx.WithDuration(time.Second).Error("world")
}
func main() {
c := logx.LogConf{
Mode: "console",
Path: "logs",
}
logx.MustSetup(c)
defer logx.Close()
logx.Info("info")
logx.Error("error")
logx.ErrorStack("hello")
logx.Errorf("%s and %s", "hello", "world")
logx.Severef("%s severe %s", "hello", "world")
logx.Slowf("%s slow %s", "hello", "world")
logx.Statf("%s stat %s", "hello", "world")
logx.WithDuration(time.Minute + time.Second).Info("hello")
logx.WithDuration(time.Minute + time.Second).Error("hello")
foo()
}

View File

@@ -0,0 +1,20 @@
package main
import (
"fmt"
"time"
"zero/core/logx"
)
func main() {
logx.MustSetup(logx.LogConf{
Mode: "console",
})
logx.CollectSysLog()
line := "asdkg"
logx.Info(line)
fmt.Print(line)
time.Sleep(time.Second)
}

Some files were not shown because too many files have changed in this diff Show More