initial import

This commit is contained in:
kevin
2020-07-26 17:09:05 +08:00
commit 7e3a369a8f
647 changed files with 54754 additions and 0 deletions

11
core/logx/config.go Normal file
View File

@@ -0,0 +1,11 @@
package logx
type LogConf struct {
ServiceName string `json:",optional"`
Mode string `json:",default=console,options=console|file|volume"`
Path string `json:",default=logs"`
Level string `json:",default=info,options=info|error|severe"`
Compress bool `json:",optional"`
KeepDays int `json:",optional"`
StackCooldownMillis int `json:",default=100"`
}

62
core/logx/customlogger.go Normal file
View File

@@ -0,0 +1,62 @@
package logx
import (
"fmt"
"io"
"time"
"zero/core/timex"
)
const customCallerDepth = 3
type customLog logEntry
func WithDuration(d time.Duration) Logger {
return customLog{
Duration: timex.ReprOfDuration(d),
}
}
func (l customLog) Error(v ...interface{}) {
if shouldLog(ErrorLevel) {
l.write(errorLog, levelError, formatWithCaller(fmt.Sprint(v...), customCallerDepth))
}
}
func (l customLog) Errorf(format string, v ...interface{}) {
if shouldLog(ErrorLevel) {
l.write(errorLog, levelError, formatWithCaller(fmt.Sprintf(format, v...), customCallerDepth))
}
}
func (l customLog) Info(v ...interface{}) {
if shouldLog(InfoLevel) {
l.write(infoLog, levelInfo, fmt.Sprint(v...))
}
}
func (l customLog) Infof(format string, v ...interface{}) {
if shouldLog(InfoLevel) {
l.write(infoLog, levelInfo, fmt.Sprintf(format, v...))
}
}
func (l customLog) Slow(v ...interface{}) {
if shouldLog(ErrorLevel) {
l.write(slowLog, levelSlow, fmt.Sprint(v...))
}
}
func (l customLog) Slowf(format string, v ...interface{}) {
if shouldLog(ErrorLevel) {
l.write(slowLog, levelSlow, fmt.Sprintf(format, v...))
}
}
func (l customLog) write(writer io.Writer, level, content string) {
l.Timestamp = getTimestamp()
l.Level = level
l.Content = content
outputJson(writer, logEntry(l))
}

23
core/logx/lesslogger.go Normal file
View File

@@ -0,0 +1,23 @@
package logx
type LessLogger struct {
*limitedExecutor
}
func NewLessLogger(milliseconds int) *LessLogger {
return &LessLogger{
limitedExecutor: newLimitedExecutor(milliseconds),
}
}
func (logger *LessLogger) Error(v ...interface{}) {
logger.logOrDiscard(func() {
Error(v...)
})
}
func (logger *LessLogger) Errorf(format string, v ...interface{}) {
logger.logOrDiscard(func() {
Errorf(format, v...)
})
}

View File

@@ -0,0 +1,31 @@
package logx
import (
"log"
"strings"
"testing"
"github.com/stretchr/testify/assert"
)
func TestLessLogger_Error(t *testing.T) {
var builder strings.Builder
log.SetOutput(&builder)
l := NewLessLogger(500)
for i := 0; i < 100; i++ {
l.Error("hello")
}
assert.Equal(t, 1, strings.Count(builder.String(), "\n"))
}
func TestLessLogger_Errorf(t *testing.T) {
var builder strings.Builder
log.SetOutput(&builder)
l := NewLessLogger(500)
for i := 0; i < 100; i++ {
l.Errorf("hello")
}
assert.Equal(t, 1, strings.Count(builder.String(), "\n"))
}

22
core/logx/lesswriter.go Normal file
View File

@@ -0,0 +1,22 @@
package logx
import "io"
type lessWriter struct {
*limitedExecutor
writer io.Writer
}
func NewLessWriter(writer io.Writer, milliseconds int) *lessWriter {
return &lessWriter{
limitedExecutor: newLimitedExecutor(milliseconds),
writer: writer,
}
}
func (w *lessWriter) Write(p []byte) (n int, err error) {
w.logOrDiscard(func() {
w.writer.Write(p)
})
return len(p), nil
}

View File

@@ -0,0 +1,19 @@
package logx
import (
"strings"
"testing"
"github.com/stretchr/testify/assert"
)
func TestLessWriter(t *testing.T) {
var builder strings.Builder
w := NewLessWriter(&builder, 500)
for i := 0; i < 100; i++ {
_, err := w.Write([]byte("hello"))
assert.Nil(t, err)
}
assert.Equal(t, "hello", builder.String())
}

View File

@@ -0,0 +1,42 @@
package logx
import (
"sync/atomic"
"time"
"zero/core/syncx"
"zero/core/timex"
)
type limitedExecutor struct {
threshold time.Duration
lastTime *syncx.AtomicDuration
discarded uint32
}
func newLimitedExecutor(milliseconds int) *limitedExecutor {
return &limitedExecutor{
threshold: time.Duration(milliseconds) * time.Millisecond,
lastTime: syncx.NewAtomicDuration(),
}
}
func (le *limitedExecutor) logOrDiscard(execute func()) {
if le == nil || le.threshold <= 0 {
execute()
return
}
now := timex.Now()
if now-le.lastTime.Load() <= le.threshold {
atomic.AddUint32(&le.discarded, 1)
} else {
le.lastTime.Set(now)
discarded := atomic.SwapUint32(&le.discarded, 0)
if discarded > 0 {
Errorf("Discarded %d error messages", discarded)
}
execute()
}
}

481
core/logx/logs.go Normal file
View File

@@ -0,0 +1,481 @@
package logx
import (
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"path"
"runtime"
"runtime/debug"
"strconv"
"strings"
"sync"
"sync/atomic"
"zero/core/iox"
"zero/core/lang"
"zero/core/sysx"
"zero/core/timex"
)
const (
// InfoLevel logs everything
InfoLevel = iota
// ErrorLevel includes errors, slows, stacks
ErrorLevel
// SevereLevel only log severe messages
SevereLevel
)
const (
timeFormat = "2006-01-02T15:04:05.000Z07"
accessFilename = "access.log"
errorFilename = "error.log"
severeFilename = "severe.log"
slowFilename = "slow.log"
statFilename = "stat.log"
consoleMode = "console"
volumeMode = "volume"
levelInfo = "info"
levelError = "error"
levelSevere = "severe"
levelSlow = "slow"
levelStat = "stat"
backupFileDelimiter = "-"
callerInnerDepth = 5
flags = 0x0
)
var (
ErrLogPathNotSet = errors.New("log path must be set")
ErrLogNotInitialized = errors.New("log not initialized")
ErrLogServiceNameNotSet = errors.New("log service name must be set")
writeConsole bool
logLevel uint32
infoLog io.WriteCloser
errorLog io.WriteCloser
severeLog io.WriteCloser
slowLog io.WriteCloser
statLog io.WriteCloser
stackLog io.Writer
once sync.Once
initialized uint32
options logOptions
)
type (
logEntry struct {
Timestamp string `json:"@timestamp"`
Level string `json:"level"`
Duration string `json:"duration,omitempty"`
Content string `json:"content"`
}
logOptions struct {
gzipEnabled bool
logStackCooldownMills int
keepDays int
}
LogOption func(options *logOptions)
Logger interface {
Error(...interface{})
Errorf(string, ...interface{})
Info(...interface{})
Infof(string, ...interface{})
Slow(...interface{})
Slowf(string, ...interface{})
}
)
func MustSetup(c LogConf) {
lang.Must(SetUp(c))
}
// SetUp sets up the logx. If already set up, just return nil.
// we allow SetUp to be called multiple times, because for example
// we need to allow different service frameworks to initialize logx respectively.
// the same logic for SetUp
func SetUp(c LogConf) error {
switch c.Mode {
case consoleMode:
setupWithConsole(c)
return nil
case volumeMode:
return setupWithVolume(c)
default:
return setupWithFiles(c)
}
}
func Close() error {
if writeConsole {
return nil
}
if atomic.LoadUint32(&initialized) == 0 {
return ErrLogNotInitialized
}
atomic.StoreUint32(&initialized, 0)
if infoLog != nil {
if err := infoLog.Close(); err != nil {
return err
}
}
if errorLog != nil {
if err := errorLog.Close(); err != nil {
return err
}
}
if severeLog != nil {
if err := severeLog.Close(); err != nil {
return err
}
}
if slowLog != nil {
if err := slowLog.Close(); err != nil {
return err
}
}
if statLog != nil {
if err := statLog.Close(); err != nil {
return err
}
}
return nil
}
func Disable() {
once.Do(func() {
atomic.StoreUint32(&initialized, 1)
infoLog = iox.NopCloser(ioutil.Discard)
errorLog = iox.NopCloser(ioutil.Discard)
severeLog = iox.NopCloser(ioutil.Discard)
slowLog = iox.NopCloser(ioutil.Discard)
statLog = iox.NopCloser(ioutil.Discard)
stackLog = ioutil.Discard
})
}
func Error(v ...interface{}) {
ErrorCaller(1, v...)
}
func Errorf(format string, v ...interface{}) {
ErrorCallerf(1, format, v...)
}
func ErrorCaller(callDepth int, v ...interface{}) {
errorSync(fmt.Sprint(v...), callDepth+callerInnerDepth)
}
func ErrorCallerf(callDepth int, format string, v ...interface{}) {
errorSync(fmt.Sprintf(format, v...), callDepth+callerInnerDepth)
}
func ErrorStack(v ...interface{}) {
// there is newline in stack string
stackSync(fmt.Sprint(v...))
}
func ErrorStackf(format string, v ...interface{}) {
// there is newline in stack string
stackSync(fmt.Sprintf(format, v...))
}
func Info(v ...interface{}) {
infoSync(fmt.Sprint(v...))
}
func Infof(format string, v ...interface{}) {
infoSync(fmt.Sprintf(format, v...))
}
func SetLevel(level uint32) {
atomic.StoreUint32(&logLevel, level)
}
func Severe(v ...interface{}) {
severeSync(fmt.Sprint(v...))
}
func Severef(format string, v ...interface{}) {
severeSync(fmt.Sprintf(format, v...))
}
func Slow(v ...interface{}) {
slowSync(fmt.Sprint(v...))
}
func Slowf(format string, v ...interface{}) {
slowSync(fmt.Sprintf(format, v...))
}
func Stat(v ...interface{}) {
statSync(fmt.Sprint(v...))
}
func Statf(format string, v ...interface{}) {
statSync(fmt.Sprintf(format, v...))
}
func WithCooldownMillis(millis int) LogOption {
return func(opts *logOptions) {
opts.logStackCooldownMills = millis
}
}
func WithKeepDays(days int) LogOption {
return func(opts *logOptions) {
opts.keepDays = days
}
}
func WithGzip() LogOption {
return func(opts *logOptions) {
opts.gzipEnabled = true
}
}
func createOutput(path string) (io.WriteCloser, error) {
if len(path) == 0 {
return nil, ErrLogPathNotSet
}
return NewLogger(path, DefaultRotateRule(path, backupFileDelimiter, options.keepDays,
options.gzipEnabled), options.gzipEnabled)
}
func errorSync(msg string, callDepth int) {
if shouldLog(ErrorLevel) {
outputError(errorLog, msg, callDepth)
}
}
func formatWithCaller(msg string, callDepth int) string {
var buf strings.Builder
caller := getCaller(callDepth)
if len(caller) > 0 {
buf.WriteString(caller)
buf.WriteByte(' ')
}
buf.WriteString(msg)
return buf.String()
}
func getCaller(callDepth int) string {
var buf strings.Builder
_, file, line, ok := runtime.Caller(callDepth)
if ok {
short := file
for i := len(file) - 1; i > 0; i-- {
if file[i] == '/' {
short = file[i+1:]
break
}
}
buf.WriteString(short)
buf.WriteByte(':')
buf.WriteString(strconv.Itoa(line))
}
return buf.String()
}
func getTimestamp() string {
return timex.Time().Format(timeFormat)
}
func handleOptions(opts []LogOption) {
for _, opt := range opts {
opt(&options)
}
}
func infoSync(msg string) {
if shouldLog(InfoLevel) {
output(infoLog, levelInfo, msg)
}
}
func output(writer io.Writer, level, msg string) {
info := logEntry{
Timestamp: getTimestamp(),
Level: level,
Content: msg,
}
outputJson(writer, info)
}
func outputError(writer io.Writer, msg string, callDepth int) {
content := formatWithCaller(msg, callDepth)
output(writer, levelError, content)
}
func outputJson(writer io.Writer, info interface{}) {
if content, err := json.Marshal(info); err != nil {
log.Println(err.Error())
} else if atomic.LoadUint32(&initialized) == 0 || writer == nil {
log.Println(string(content))
} else {
writer.Write(append(content, '\n'))
}
}
func setupLogLevel(c LogConf) {
switch c.Level {
case levelInfo:
SetLevel(InfoLevel)
case levelError:
SetLevel(ErrorLevel)
case levelSevere:
SetLevel(SevereLevel)
}
}
func setupWithConsole(c LogConf) {
once.Do(func() {
atomic.StoreUint32(&initialized, 1)
writeConsole = true
setupLogLevel(c)
infoLog = newLogWriter(log.New(os.Stdout, "", flags))
errorLog = newLogWriter(log.New(os.Stderr, "", flags))
severeLog = newLogWriter(log.New(os.Stderr, "", flags))
slowLog = newLogWriter(log.New(os.Stderr, "", flags))
stackLog = NewLessWriter(errorLog, options.logStackCooldownMills)
statLog = infoLog
})
}
func setupWithFiles(c LogConf) error {
var opts []LogOption
var err error
if len(c.Path) == 0 {
return ErrLogPathNotSet
}
opts = append(opts, WithCooldownMillis(c.StackCooldownMillis))
if c.Compress {
opts = append(opts, WithGzip())
}
if c.KeepDays > 0 {
opts = append(opts, WithKeepDays(c.KeepDays))
}
accessFile := path.Join(c.Path, accessFilename)
errorFile := path.Join(c.Path, errorFilename)
severeFile := path.Join(c.Path, severeFilename)
slowFile := path.Join(c.Path, slowFilename)
statFile := path.Join(c.Path, statFilename)
once.Do(func() {
atomic.StoreUint32(&initialized, 1)
handleOptions(opts)
setupLogLevel(c)
if infoLog, err = createOutput(accessFile); err != nil {
return
}
if errorLog, err = createOutput(errorFile); err != nil {
return
}
if severeLog, err = createOutput(severeFile); err != nil {
return
}
if slowLog, err = createOutput(slowFile); err != nil {
return
}
if statLog, err = createOutput(statFile); err != nil {
return
}
stackLog = NewLessWriter(errorLog, options.logStackCooldownMills)
})
return err
}
func setupWithVolume(c LogConf) error {
if len(c.ServiceName) == 0 {
return ErrLogServiceNameNotSet
}
c.Path = path.Join(c.Path, c.ServiceName, sysx.Hostname())
return setupWithFiles(c)
}
func severeSync(msg string) {
if shouldLog(SevereLevel) {
output(severeLog, levelSevere, fmt.Sprintf("%s\n%s", msg, string(debug.Stack())))
}
}
func shouldLog(level uint32) bool {
return atomic.LoadUint32(&logLevel) <= level
}
func slowSync(msg string) {
if shouldLog(ErrorLevel) {
output(slowLog, levelSlow, msg)
}
}
func stackSync(msg string) {
if shouldLog(ErrorLevel) {
output(stackLog, levelError, fmt.Sprintf("%s\n%s", msg, string(debug.Stack())))
}
}
func statSync(msg string) {
if shouldLog(InfoLevel) {
output(statLog, levelStat, msg)
}
}
type logWriter struct {
logger *log.Logger
}
func newLogWriter(logger *log.Logger) logWriter {
return logWriter{
logger: logger,
}
}
func (lw logWriter) Close() error {
return nil
}
func (lw logWriter) Write(data []byte) (int, error) {
lw.logger.Print(string(data))
return len(data), nil
}

251
core/logx/logs_test.go Normal file
View File

@@ -0,0 +1,251 @@
package logx
import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"log"
"runtime"
"strings"
"sync/atomic"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
var (
s = []byte("Sending #11 notification (id: 1451875113812010473) in #1 connection")
pool = make(chan []byte, 1)
)
type mockWriter struct {
builder strings.Builder
}
func (mw *mockWriter) Write(data []byte) (int, error) {
return mw.builder.Write(data)
}
func (mw *mockWriter) Close() error {
return nil
}
func (mw *mockWriter) Reset() {
mw.builder.Reset()
}
func (mw *mockWriter) Contains(text string) bool {
return strings.Index(mw.builder.String(), text) > -1
}
func TestFileLineFileMode(t *testing.T) {
writer := new(mockWriter)
errorLog = writer
atomic.StoreUint32(&initialized, 1)
file, line := getFileLine()
Error("anything")
assert.True(t, writer.Contains(fmt.Sprintf("%s:%d", file, line+1)))
writer.Reset()
file, line = getFileLine()
Errorf("anything %s", "format")
assert.True(t, writer.Contains(fmt.Sprintf("%s:%d", file, line+1)))
}
func TestFileLineConsoleMode(t *testing.T) {
writer := new(mockWriter)
writeConsole = true
errorLog = newLogWriter(log.New(writer, "[ERROR] ", flags))
atomic.StoreUint32(&initialized, 1)
file, line := getFileLine()
Error("anything")
assert.True(t, writer.Contains(fmt.Sprintf("%s:%d", file, line+1)))
writer.Reset()
file, line = getFileLine()
Errorf("anything %s", "format")
assert.True(t, writer.Contains(fmt.Sprintf("%s:%d", file, line+1)))
}
func TestStructedLogInfo(t *testing.T) {
doTestStructedLog(t, levelInfo, func(writer io.WriteCloser) {
infoLog = writer
}, func(v ...interface{}) {
Info(v...)
})
}
func TestStructedLogSlow(t *testing.T) {
doTestStructedLog(t, levelSlow, func(writer io.WriteCloser) {
slowLog = writer
}, func(v ...interface{}) {
Slow(v...)
})
}
func TestStructedLogWithDuration(t *testing.T) {
const message = "hello there"
writer := new(mockWriter)
infoLog = writer
atomic.StoreUint32(&initialized, 1)
WithDuration(time.Second).Info(message)
var entry logEntry
if err := json.Unmarshal([]byte(writer.builder.String()), &entry); err != nil {
t.Error(err)
}
assert.Equal(t, levelInfo, entry.Level)
assert.Equal(t, message, entry.Content)
assert.Equal(t, "1000.0ms", entry.Duration)
}
func TestSetLevel(t *testing.T) {
SetLevel(ErrorLevel)
const message = "hello there"
writer := new(mockWriter)
infoLog = writer
atomic.StoreUint32(&initialized, 1)
Info(message)
assert.Equal(t, 0, writer.builder.Len())
}
func TestSetLevelTwiceWithMode(t *testing.T) {
testModes := []string{
"mode",
"console",
"volumn",
}
for _, mode := range testModes {
testSetLevelTwiceWithMode(t, mode)
}
}
func TestSetLevelWithDuration(t *testing.T) {
SetLevel(ErrorLevel)
const message = "hello there"
writer := new(mockWriter)
infoLog = writer
atomic.StoreUint32(&initialized, 1)
WithDuration(time.Second).Info(message)
assert.Equal(t, 0, writer.builder.Len())
}
func BenchmarkCopyByteSliceAppend(b *testing.B) {
for i := 0; i < b.N; i++ {
var buf []byte
buf = append(buf, getTimestamp()...)
buf = append(buf, ' ')
buf = append(buf, s...)
_ = buf
}
}
func BenchmarkCopyByteSliceAllocExactly(b *testing.B) {
for i := 0; i < b.N; i++ {
now := []byte(getTimestamp())
buf := make([]byte, len(now)+1+len(s))
n := copy(buf, now)
buf[n] = ' '
copy(buf[n+1:], s)
}
}
func BenchmarkCopyByteSlice(b *testing.B) {
var buf []byte
for i := 0; i < b.N; i++ {
buf = make([]byte, len(s))
copy(buf, s)
}
fmt.Fprint(ioutil.Discard, buf)
}
func BenchmarkCopyOnWriteByteSlice(b *testing.B) {
var buf []byte
for i := 0; i < b.N; i++ {
size := len(s)
buf = s[:size:size]
}
fmt.Fprint(ioutil.Discard, buf)
}
func BenchmarkCacheByteSlice(b *testing.B) {
for i := 0; i < b.N; i++ {
dup := fetch()
copy(dup, s)
put(dup)
}
}
func BenchmarkLogs(b *testing.B) {
b.ReportAllocs()
log.SetOutput(ioutil.Discard)
for i := 0; i < b.N; i++ {
Info(i)
}
}
func fetch() []byte {
select {
case b := <-pool:
return b
default:
}
return make([]byte, 4096)
}
func getFileLine() (string, int) {
_, file, line, _ := runtime.Caller(1)
short := file
for i := len(file) - 1; i > 0; i-- {
if file[i] == '/' {
short = file[i+1:]
break
}
}
return short, line
}
func put(b []byte) {
select {
case pool <- b:
default:
}
}
func doTestStructedLog(t *testing.T, level string, setup func(writer io.WriteCloser),
write func(...interface{})) {
const message = "hello there"
writer := new(mockWriter)
setup(writer)
atomic.StoreUint32(&initialized, 1)
write(message)
var entry logEntry
if err := json.Unmarshal([]byte(writer.builder.String()), &entry); err != nil {
t.Error(err)
}
assert.Equal(t, level, entry.Level)
assert.Equal(t, message, entry.Content)
}
func testSetLevelTwiceWithMode(t *testing.T, mode string) {
SetUp(LogConf{
Mode: mode,
Level: "error",
Path: "/dev/null",
})
SetUp(LogConf{
Mode: mode,
Level: "info",
Path: "/dev/null",
})
const message = "hello there"
writer := new(mockWriter)
infoLog = writer
atomic.StoreUint32(&initialized, 1)
Info(message)
assert.Equal(t, 0, writer.builder.Len())
}

315
core/logx/rotatelogger.go Normal file
View File

@@ -0,0 +1,315 @@
package logx
import (
"compress/gzip"
"errors"
"fmt"
"io"
"log"
"os"
"path"
"path/filepath"
"strings"
"sync"
"time"
"zero/core/fs"
"zero/core/lang"
"zero/core/timex"
)
const (
dateFormat = "2006-01-02"
hoursPerDay = 24
bufferSize = 100
defaultDirMode = 0755
defaultFileMode = 0600
)
var ErrLogFileClosed = errors.New("error: log file closed")
type (
RotateRule interface {
BackupFileName() string
MarkRotated()
OutdatedFiles() []string
ShallRotate() bool
}
RotateLogger struct {
filename string
backup string
fp *os.File
channel chan []byte
done chan lang.PlaceholderType
rule RotateRule
compress bool
keepDays int
// can't use threading.RoutineGroup because of cycle import
waitGroup sync.WaitGroup
closeOnce sync.Once
}
DailyRotateRule struct {
rotatedTime string
filename string
delimiter string
days int
gzip bool
}
)
func DefaultRotateRule(filename, delimiter string, days int, gzip bool) RotateRule {
return &DailyRotateRule{
rotatedTime: getNowDate(),
filename: filename,
delimiter: delimiter,
days: days,
gzip: gzip,
}
}
func (r *DailyRotateRule) BackupFileName() string {
return fmt.Sprintf("%s%s%s", r.filename, r.delimiter, getNowDate())
}
func (r *DailyRotateRule) MarkRotated() {
r.rotatedTime = getNowDate()
}
func (r *DailyRotateRule) OutdatedFiles() []string {
if r.days <= 0 {
return nil
}
var pattern string
if r.gzip {
pattern = fmt.Sprintf("%s%s*.gz", r.filename, r.delimiter)
} else {
pattern = fmt.Sprintf("%s%s*", r.filename, r.delimiter)
}
files, err := filepath.Glob(pattern)
if err != nil {
Errorf("failed to delete outdated log files, error: %s", err)
return nil
}
var buf strings.Builder
boundary := time.Now().Add(-time.Hour * time.Duration(hoursPerDay*r.days)).Format(dateFormat)
fmt.Fprintf(&buf, "%s%s%s", r.filename, r.delimiter, boundary)
if r.gzip {
buf.WriteString(".gz")
}
boundaryFile := buf.String()
var outdates []string
for _, file := range files {
if file < boundaryFile {
outdates = append(outdates, file)
}
}
return outdates
}
func (r *DailyRotateRule) ShallRotate() bool {
return len(r.rotatedTime) > 0 && getNowDate() != r.rotatedTime
}
func NewLogger(filename string, rule RotateRule, compress bool) (*RotateLogger, error) {
l := &RotateLogger{
filename: filename,
channel: make(chan []byte, bufferSize),
done: make(chan lang.PlaceholderType),
rule: rule,
compress: compress,
}
if err := l.init(); err != nil {
return nil, err
}
l.startWorker()
return l, nil
}
func (l *RotateLogger) Close() error {
var err error
l.closeOnce.Do(func() {
close(l.done)
l.waitGroup.Wait()
if err = l.fp.Sync(); err != nil {
return
}
err = l.fp.Close()
})
return err
}
func (l *RotateLogger) Write(data []byte) (int, error) {
select {
case l.channel <- data:
return len(data), nil
case <-l.done:
log.Println(string(data))
return 0, ErrLogFileClosed
}
}
func (l *RotateLogger) getBackupFilename() string {
if len(l.backup) == 0 {
return l.rule.BackupFileName()
} else {
return l.backup
}
}
func (l *RotateLogger) init() error {
l.backup = l.rule.BackupFileName()
if _, err := os.Stat(l.filename); err != nil {
basePath := path.Dir(l.filename)
if _, err = os.Stat(basePath); err != nil {
if err = os.MkdirAll(basePath, defaultDirMode); err != nil {
return err
}
}
if l.fp, err = os.Create(l.filename); err != nil {
return err
}
} else if l.fp, err = os.OpenFile(l.filename, os.O_APPEND|os.O_WRONLY, defaultFileMode); err != nil {
return err
}
fs.CloseOnExec(l.fp)
return nil
}
func (l *RotateLogger) maybeCompressFile(file string) {
if l.compress {
defer func() {
if r := recover(); r != nil {
ErrorStack(r)
}
}()
compressLogFile(file)
}
}
func (l *RotateLogger) maybeDeleteOutdatedFiles() {
files := l.rule.OutdatedFiles()
for _, file := range files {
if err := os.Remove(file); err != nil {
Errorf("failed to remove outdated file: %s", file)
}
}
}
func (l *RotateLogger) postRotate(file string) {
go func() {
// we cannot use threading.GoSafe here, because of import cycle.
l.maybeCompressFile(file)
l.maybeDeleteOutdatedFiles()
}()
}
func (l *RotateLogger) rotate() error {
if l.fp != nil {
err := l.fp.Close()
l.fp = nil
if err != nil {
return err
}
}
_, err := os.Stat(l.filename)
if err == nil && len(l.backup) > 0 {
backupFilename := l.getBackupFilename()
err = os.Rename(l.filename, backupFilename)
if err != nil {
return err
}
l.postRotate(backupFilename)
}
l.backup = l.rule.BackupFileName()
if l.fp, err = os.Create(l.filename); err == nil {
fs.CloseOnExec(l.fp)
}
return err
}
func (l *RotateLogger) startWorker() {
l.waitGroup.Add(1)
go func() {
defer l.waitGroup.Done()
for {
select {
case event := <-l.channel:
l.write(event)
case <-l.done:
return
}
}
}()
}
func (l *RotateLogger) write(v []byte) {
if l.rule.ShallRotate() {
if err := l.rotate(); err != nil {
log.Println(err)
} else {
l.rule.MarkRotated()
}
}
if l.fp != nil {
l.fp.Write(v)
}
}
func compressLogFile(file string) {
start := timex.Now()
Infof("compressing log file: %s", file)
if err := gzipFile(file); err != nil {
Errorf("compress error: %s", err)
} else {
Infof("compressed log file: %s, took %s", file, timex.Since(start))
}
}
func getNowDate() string {
return time.Now().Format(dateFormat)
}
func gzipFile(file string) error {
in, err := os.Open(file)
if err != nil {
return err
}
defer in.Close()
out, err := os.Create(fmt.Sprintf("%s.gz", file))
if err != nil {
return err
}
defer out.Close()
w := gzip.NewWriter(out)
if _, err = io.Copy(w, in); err != nil {
return err
} else if err = w.Close(); err != nil {
return err
}
return os.Remove(file)
}

15
core/logx/syslog.go Normal file
View File

@@ -0,0 +1,15 @@
package logx
import "log"
type redirector struct{}
// CollectSysLog redirects system log into logx info
func CollectSysLog() {
log.SetOutput(new(redirector))
}
func (r *redirector) Write(p []byte) (n int, err error) {
Info(string(p))
return len(p), nil
}

48
core/logx/syslog_test.go Normal file
View File

@@ -0,0 +1,48 @@
package logx
import (
"encoding/json"
"log"
"strings"
"sync/atomic"
"testing"
"github.com/stretchr/testify/assert"
)
const testlog = "Stay hungry, stay foolish."
func TestCollectSysLog(t *testing.T) {
CollectSysLog()
content := getContent(captureOutput(func() {
log.Printf(testlog)
}))
assert.True(t, strings.Contains(content, testlog))
}
func TestRedirector(t *testing.T) {
var r redirector
content := getContent(captureOutput(func() {
r.Write([]byte(testlog))
}))
assert.Equal(t, testlog, content)
}
func captureOutput(f func()) string {
atomic.StoreUint32(&initialized, 1)
writer := new(mockWriter)
infoLog = writer
prevLevel := logLevel
logLevel = InfoLevel
f()
logLevel = prevLevel
return writer.builder.String()
}
func getContent(jsonStr string) string {
var entry logEntry
json.Unmarshal([]byte(jsonStr), &entry)
return entry.Content
}

85
core/logx/tracelog.go Normal file
View File

@@ -0,0 +1,85 @@
package logx
import (
"context"
"fmt"
"io"
"zero/core/trace/tracespec"
)
type tracingEntry struct {
logEntry
Trace string `json:"trace,omitempty"`
Span string `json:"span,omitempty"`
ctx context.Context `json:"-"`
}
func (l tracingEntry) Error(v ...interface{}) {
if shouldLog(ErrorLevel) {
l.write(errorLog, levelError, formatWithCaller(fmt.Sprint(v...), customCallerDepth))
}
}
func (l tracingEntry) Errorf(format string, v ...interface{}) {
if shouldLog(ErrorLevel) {
l.write(errorLog, levelError, formatWithCaller(fmt.Sprintf(format, v...), customCallerDepth))
}
}
func (l tracingEntry) Info(v ...interface{}) {
if shouldLog(InfoLevel) {
l.write(infoLog, levelInfo, fmt.Sprint(v...))
}
}
func (l tracingEntry) Infof(format string, v ...interface{}) {
if shouldLog(InfoLevel) {
l.write(infoLog, levelInfo, fmt.Sprintf(format, v...))
}
}
func (l tracingEntry) Slow(v ...interface{}) {
if shouldLog(ErrorLevel) {
l.write(slowLog, levelSlow, fmt.Sprint(v...))
}
}
func (l tracingEntry) Slowf(format string, v ...interface{}) {
if shouldLog(ErrorLevel) {
l.write(slowLog, levelSlow, fmt.Sprintf(format, v...))
}
}
func (l tracingEntry) write(writer io.Writer, level, content string) {
l.Timestamp = getTimestamp()
l.Level = level
l.Content = content
l.Trace = traceIdFromContext(l.ctx)
l.Span = spanIdFromContext(l.ctx)
outputJson(writer, l)
}
func WithContext(ctx context.Context) Logger {
return tracingEntry{
ctx: ctx,
}
}
func spanIdFromContext(ctx context.Context) string {
t, ok := ctx.Value(tracespec.TracingKey).(tracespec.Trace)
if !ok {
return ""
}
return t.SpanId()
}
func traceIdFromContext(ctx context.Context) string {
t, ok := ctx.Value(tracespec.TracingKey).(tracespec.Trace)
if !ok {
return ""
}
return t.TraceId()
}

View File

@@ -0,0 +1,50 @@
package logx
import (
"context"
"strings"
"testing"
"zero/core/trace/tracespec"
"github.com/stretchr/testify/assert"
)
const (
mockTraceId = "mock-trace-id"
mockSpanId = "mock-span-id"
)
var mock tracespec.Trace = new(mockTrace)
func TestTraceLog(t *testing.T) {
var buf strings.Builder
ctx := context.WithValue(context.Background(), tracespec.TracingKey, mock)
WithContext(ctx).(tracingEntry).write(&buf, levelInfo, testlog)
assert.True(t, strings.Contains(buf.String(), mockTraceId))
assert.True(t, strings.Contains(buf.String(), mockSpanId))
}
type mockTrace struct{}
func (t mockTrace) TraceId() string {
return mockTraceId
}
func (t mockTrace) SpanId() string {
return mockSpanId
}
func (t mockTrace) Finish() {
}
func (t mockTrace) Fork(ctx context.Context, serviceName, operationName string) (context.Context, tracespec.Trace) {
return nil, nil
}
func (t mockTrace) Follow(ctx context.Context, serviceName, operationName string) (context.Context, tracespec.Trace) {
return nil, nil
}
func (t mockTrace) Visit(fn func(key string, val string) bool) {
}