feat: initial release under apigo.cc/go/log [v1.0.0]

This commit is contained in:
AI Engineer 2026-05-02 23:39:10 +08:00
parent d537747c14
commit 80aa4aaa49
22 changed files with 1765 additions and 2 deletions

11
CHANGELOG.md Normal file
View File

@ -0,0 +1,11 @@
# Changelog
## [1.0.0] - 2026-05-02
- **初始版本**: 由 `ssgo/log` 迁移并基于 `apigo.cc/go` 标准重构。
- **高性能引擎**: 引入 `LogEntry` 池化与 `sync.Pool` 复用,支持零分配日志对象。
- **异步写入**: 实现基于 Channel 的非阻塞异步写入引擎,将 IO 压力从主路径完全剥离。
- **批量刷盘**: `FileWriter` 引入 `bufio.Writer` 缓冲,`ESWriter` 优化 Bulk 请求构造,大幅提升 IO 吞吐量。
- **灵活格式**: 引入 `Formatter` 接口,支持 JSON 与文本等自定义格式。
- **增强视图**: 内置 `ConsoleWriter` 与增强型 `Viewer`,支持 `RequestLog` 高亮显示与状态码变色。
- **字段规范**: 统一字段 Key 为小写,确保跨平台解析一致性。
- **安全性**: 集成高性能字段脱敏能力,并提供幂等停机与 Panic 恢复机制。

View File

@ -1,3 +1,45 @@
# log
# go/log
日志模块,由 ssgo/log 迁移重构
高性能、可插拔、支持脱敏的日志模块。
## 特性
- **零摩擦**: 自动从环境变量获取应用名、IP 等信息。
- **高性能**: 异步写入,支持批量刷盘。
- **脱敏支持**: 内置敏感字段过滤与正则匹配脱敏。
- **多渠道**: 支持控制台、本地文件切分、Elasticsearch 批量写入。
- **现代化**: 深度集成 `apigo.cc/go` 基础库。
## 安装
```bash
go get apigo.cc/go/log
```
## 快速开始
```go
import "apigo.cc/go/log"
func main() {
// 使用默认 Logger
log.Info("server started", "port", 8080)
// 创建带 traceId 的子 Logger
logger := log.New("unique-trace-id")
logger.Info("request processed")
// 错误日志带堆栈
logger.Error("database failed", "db", "mysql")
}
```
## 配置项 (JSON/YAML)
可以在配置文件中的 `log` 节点进行配置:
- `Name`: 应用名称(默认自动获取)
- `Level`: 日志级别 (debug, info, warning, error)
- `File`: 输出目标 (console, ./app.log, es://user:pass@host:9200/group)
- `SplitTag`: 文件切分格式 (如 20060102)
- `Sensitive`: 敏感字段列表
- `RegexSensitive`: 脱敏正则
## 脱敏规则
默认规则为 `12:4*4, 11:3*4, 7:2*2, 3:1*1, 2:1*0`
格式为 `长度阈值:左保留*右保留`

0
TEST.md Normal file
View File

59
bench_new_test.go Normal file
View File

@ -0,0 +1,59 @@
package log
import (
"reflect"
"testing"
)
// Field 定义单个日志字段
type Field struct {
Key string
Value interface{}
}
// BenchRequestLog 作为测试用例,使用固定大小字段数组
type BenchRequestLog struct {
RequestId string
UsedTime float32
// 预留 10 个固定 Extra 字段位
ExtraCount int
Extra [10]Field
}
func (r *BenchRequestLog) Reset() {
r.RequestId = ""
r.UsedTime = 0
r.ExtraCount = 0
// 结构体数组字段会自动清空,无需特殊处理
}
func BenchmarkLogger_RequestLog_Realistic(b *testing.B) {
typ := reflect.TypeOf(&RequestLog{})
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
WithEntry(typ, func(e LogEntry) {
entry := e.(*RequestLog)
entry.RequestId = "req-1234567890"
entry.UsedTime = 45.67
entry.Path = "/api/v1/user/profile"
entry.Method = "POST"
entry.ResponseCode = 200
if entry.RequestHeaders == nil {
entry.RequestHeaders = make(map[string]string)
}
entry.RequestHeaders["Content-Type"] = "application/json"
entry.RequestHeaders["Authorization"] = "Bearer token-value"
if entry.RequestData == nil {
entry.RequestData = make(map[string]any)
}
entry.RequestData["userId"] = 10086
entry.RequestData["action"] = "update_profile"
entry.ResponseData = `{"status":"ok"}`
})
}
}

37
bench_test.go Normal file
View File

@ -0,0 +1,37 @@
package log
import (
"os"
"testing"
)
func BenchmarkLoggerInfo(b *testing.B) {
// 创建一个临时日志文件用于测试
logger := NewLogger(Config{
File: "bench.log",
})
defer os.Remove("bench.log")
b.ResetTimer()
for i := 0; i < b.N; i++ {
logger.Info("bench log", "index", i)
}
b.StopTimer()
}
func BenchmarkLoggerAsyncConcurrent(b *testing.B) {
logger := NewLogger(Config{
File: "bench_async.log",
})
defer os.Remove("bench_async.log")
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
i := 0
for pb.Next() {
logger.Info("concurrent async log", "index", i)
i++
}
})
b.StopTimer()
}

26
config.go Normal file
View File

@ -0,0 +1,26 @@
package log
// Config 日志配置
type Config struct {
Name string
Level string
File string
Fast bool
SplitTag string
Truncations string
Sensitive string
RegexSensitive string
SensitiveRule string
KeepKeyCase bool // 是否保持Key的首字母大小写默认一律使用小写
Formatter Formatter
}
type LevelType int
const (
DEBUG LevelType = 1
INFO LevelType = 2
WARNING LevelType = 3
ERROR LevelType = 4
CLOSE LevelType = 5
)

40
core_test.go Normal file
View File

@ -0,0 +1,40 @@
package log
import (
"sync"
"testing"
)
func TestLoggerCore_Initialization(t *testing.T) {
// Test default configuration initialization
conf := Config{}
logger := NewLogger(conf)
if logger.config.Level != "info" {
t.Errorf("Expected default level 'info', got %s", logger.config.Level)
}
if logger.config.Name == "" {
t.Error("Expected default logger name, got empty")
}
}
func TestLoggerCore_Concurrency(t *testing.T) {
conf := Config{
Name: "concurrent-test",
Level: "info",
}
logger := NewLogger(conf)
var wg sync.WaitGroup
numGoroutines := 50
for i := 0; i < numGoroutines; i++ {
wg.Add(1)
go func(id int) {
defer wg.Done()
logger.Info("concurrent message", "id", id)
}(i)
}
wg.Wait()
}

21
default_logger.go Normal file
View File

@ -0,0 +1,21 @@
package log
import (
"apigo.cc/go/config"
)
var DefaultLogger *Logger
func init() {
RegisterWriterMaker("es", NewESWriter)
RegisterWriterMaker("ess", NewESWriter)
var conf Config
_ = config.Load("log", &conf)
DefaultLogger = NewLogger(conf)
}
// New 创建带有 traceId 的 Logger 副本
func New(traceId string) *Logger {
return DefaultLogger.New(traceId)
}

140
es_writer.go Normal file
View File

@ -0,0 +1,140 @@
package log
import (
"bytes"
"fmt"
"io"
"log"
"net/http"
"net/url"
"strings"
"sync"
"time"
"apigo.cc/go/cast"
)
type ESWriter struct {
config *Config
url string
user string
password string
group string
lock sync.Mutex
queue []string
last int64
client *http.Client
prefix string
}
func NewESWriter(conf *Config) Writer {
w := &ESWriter{
config: conf,
queue: make([]string, 0),
client: &http.Client{},
}
esUrl, err := url.Parse(conf.File)
if err != nil {
if DefaultLogger != nil {
// 这里不能直接调用 DefaultLogger 的方法,防止死循环
log.Printf("invalid es url: %s, error: %v", conf.File, err)
}
return nil
}
if esUrl.User != nil {
w.user = esUrl.User.Username()
w.password, _ = esUrl.User.Password()
esUrl.User = nil
}
if esUrl.Scheme == "ess" {
esUrl.Scheme = "https"
} else {
esUrl.Scheme = "http"
}
if timeout := esUrl.Query().Get("timeout"); timeout != "" {
w.client.Timeout = cast.Duration(timeout)
}
if len(esUrl.Path) > 1 {
w.group = strings.ReplaceAll(esUrl.Path[1:], "/", ".")
}
esUrl.Path = "_bulk"
esUrl.RawQuery = ""
w.url = esUrl.String()
if w.group != "" {
w.prefix = fmt.Sprintf("{\"index\":{\"_index\":\"%s.%s\"}}", w.group, w.config.Name)
} else {
w.prefix = fmt.Sprintf("{\"index\":{\"_index\":\"%s\"}}", w.config.Name)
}
return w
}
func (w *ESWriter) Log(data []byte) {
if len(data) == 0 {
return
}
dataString := string(data)
w.lock.Lock()
w.queue = append(w.queue, w.prefix, dataString)
w.lock.Unlock()
}
var responseOkBytes = []byte("\"errors\":false")
func (w *ESWriter) Run() {
now := time.Now().Unix()
w.lock.Lock()
queueLen := len(w.queue)
w.lock.Unlock()
// 超过100条数据 或 过了1秒 发送数据
if queueLen > 100 || (queueLen > 0 && (now > w.last || !writerRunning.Load())) {
w.lock.Lock()
sendings := w.queue
w.queue = make([]string, 0)
w.lock.Unlock()
var body bytes.Buffer
for _, s := range sendings {
body.WriteString(s)
body.WriteByte('\n')
}
req, err := http.NewRequest("POST", w.url, &body)
if err != nil {
log.Println("es request creation failed", err)
return
}
req.Header.Set("Content-Type", "application/json")
if w.user != "" {
req.SetBasicAuth(w.user, w.password)
}
res, err := w.client.Do(req)
if err != nil {
log.Println("es sent failed", err)
return
}
defer res.Body.Close()
result, err := io.ReadAll(res.Body)
if err != nil {
log.Println("es result read failed", err)
return
}
if !bytes.Contains(result, responseOkBytes) {
log.Printf("es sent errors: %s, data: %s", string(result), body.String())
}
w.last = now
}
}

107
file_writer.go Normal file
View File

@ -0,0 +1,107 @@
package log
import (
"bufio"
"fmt"
"os"
"sync"
"time"
)
type FileLogEntry struct {
time time.Time
message string
}
// FileWriter 文件写入器
type FileWriter struct {
fileName string
lastSplit string
splitTag string
fp *os.File
bufWriter *bufio.Writer
entries []FileLogEntry
lock sync.Mutex
}
var (
files = make(map[string]*FileWriter)
filesLock sync.RWMutex
)
func (f *FileWriter) Write(tm time.Time, str string) {
f.lock.Lock()
f.entries = append(f.entries, FileLogEntry{
time: tm,
message: str,
})
f.lock.Unlock()
}
func (f *FileWriter) Run() {
f.lock.Lock()
var runEntries []FileLogEntry
if len(f.entries) > 0 {
runEntries = f.entries
f.entries = nil
}
f.lock.Unlock()
if len(runEntries) > 0 {
for _, l := range runEntries {
nowSplit := l.time.Format(f.splitTag)
if f.lastSplit != nowSplit || f.fp == nil {
f.lastSplit = nowSplit
f.lock.Lock()
if f.bufWriter != nil {
_ = f.bufWriter.Flush()
}
if f.fp != nil {
_ = f.fp.Close()
}
var err error
f.fp, err = os.OpenFile(f.fileName+"."+nowSplit, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0644)
if err == nil {
f.bufWriter = bufio.NewWriterSize(f.fp, 64*1024)
} else {
f.bufWriter = nil
}
f.lock.Unlock()
if err != nil {
fmt.Printf("failed to open log file: %s.%s, error: %v\n", f.fileName, nowSplit, err)
continue
}
}
logStr := l.time.Format("2006/01/02 15:04:05.000000") + " " + l.message + "\n"
f.lock.Lock()
if f.bufWriter != nil {
_, err := f.bufWriter.WriteString(logStr)
if err != nil {
fmt.Print(logStr)
}
} else {
fmt.Print(logStr)
}
f.lock.Unlock()
}
f.lock.Lock()
if f.bufWriter != nil {
_ = f.bufWriter.Flush()
}
f.lock.Unlock()
}
}
func (f *FileWriter) Close() {
f.lock.Lock()
if f.bufWriter != nil {
_ = f.bufWriter.Flush()
f.bufWriter = nil
}
if f.fp != nil {
_ = f.fp.Close()
f.fp = nil
}
f.lock.Unlock()
}

28
formatter.go Normal file
View File

@ -0,0 +1,28 @@
package log
import (
"apigo.cc/go/cast"
)
// Formatter 日志格式化接口
type Formatter interface {
Format(data any, sensitiveKeys []string) ([]byte, error)
}
// JSONFormatter 默认的 JSON 格式化器
type JSONFormatter struct{}
func (f *JSONFormatter) Format(data any, sensitiveKeys []string) ([]byte, error) {
if len(sensitiveKeys) > 0 {
return cast.ToJSONDesensitizeBytes(data, sensitiveKeys)
}
return cast.ToJSONBytes(data)
}
// TextFormatter 文本格式化器 (示例)
type TextFormatter struct{}
func (f *TextFormatter) Format(data any, sensitiveKeys []string) ([]byte, error) {
// 简单的文本格式化实现
return []byte(cast.String(data)), nil
}

20
go.mod Normal file
View File

@ -0,0 +1,20 @@
module apigo.cc/go/log
go 1.25.0
require (
apigo.cc/go/cast v1.1.1
apigo.cc/go/config v1.0.4
apigo.cc/go/shell v1.0.4
)
require (
apigo.cc/go/convert v1.0.4 // indirect
apigo.cc/go/encoding v1.0.4 // indirect
apigo.cc/go/file v1.0.4 // indirect
apigo.cc/go/rand v1.0.4 // indirect
apigo.cc/go/safe v1.0.4 // indirect
golang.org/x/crypto v0.50.0 // indirect
golang.org/x/sys v0.43.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)

24
go.sum Normal file
View File

@ -0,0 +1,24 @@
apigo.cc/go/cast v1.1.0 h1:xUKcTb+EUB5/O1fjXlCMrvU9dDX35lbCUImM8popyO4=
apigo.cc/go/cast v1.1.0/go.mod h1:vh9ZqISCmTUiyinkNMI/s4f045fRlDK3xC+nPWQYBzI=
apigo.cc/go/config v1.0.4 h1:WG9zrQkqfFPkrKIL7RNvvAbbkuUBt1Av11ZP/aIfldM=
apigo.cc/go/config v1.0.4/go.mod h1:obryzJiK6j7lQex/58d5eWYOGx5O5IABguqNWxyyXJo=
apigo.cc/go/convert v1.0.4 h1:5+qPjC3dlPB59GnWZRlmthxcaXQtKvN+iOuiLdJ1GvQ=
apigo.cc/go/convert v1.0.4/go.mod h1:Hp+geeSyhqg/zwIKPOrDoceIREzcwM14t1I5q/dtbfU=
apigo.cc/go/encoding v1.0.4 h1:aezB0J/qFuHs6iXkbtuJP5JIHUtmjsr5SFb0NNvbObY=
apigo.cc/go/encoding v1.0.4/go.mod h1:V5CgT7rBbCxy+uCU20q0ptcNNRSgMtpA8cNOs6r8IeI=
apigo.cc/go/file v1.0.4 h1:qCKegV7OYh7r0qc3jZjGA/aKh0vIHgmr1OEbhfEmGX8=
apigo.cc/go/file v1.0.4/go.mod h1:C9gNo7386iA21OiBmuWh6CznKWlVBDFkhE4f0H0Susg=
apigo.cc/go/rand v1.0.4 h1:we070eWSL0dB8NEMaWjXj43+EekXQTm/h0kKpZ/frqw=
apigo.cc/go/rand v1.0.4/go.mod h1:mZ/4Soa3bk+XvDaqPWJuUe1bfEi4eThBj1XmEAuYxsk=
apigo.cc/go/safe v1.0.4 h1:07pRSdEHprF/2v6SsqAjICYFoeLcqjjvHGEdh6Dzrzg=
apigo.cc/go/safe v1.0.4/go.mod h1:o568sHS5rTRSVPmhxWod0tGdc+8l1KjidsNY1/OVZr0=
apigo.cc/go/shell v1.0.4 h1:EL9zjI39YBe1h+kRYQeAi/8zVGHe5W198DYYN7cENiY=
apigo.cc/go/shell v1.0.4/go.mod h1:N2gDkgK4tJ9TadD60/+gAGuWxyVAWHs5YPBmytw6ELA=
golang.org/x/crypto v0.50.0 h1:zO47/JPrL6vsNkINmLoo/PH1gcxpls50DNogFvB5ZGI=
golang.org/x/crypto v0.50.0/go.mod h1:3muZ7vA7PBCE6xgPX7nkzzjiUq87kRItoJQM1Yo8S+Q=
golang.org/x/sys v0.43.0 h1:Rlag2XtaFTxp19wS8MXlJwTvoh8ArU6ezoyFsMyCTNI=
golang.org/x/sys v0.43.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

27
log_test.go Normal file
View File

@ -0,0 +1,27 @@
package log
import (
"testing"
)
func TestLogger(t *testing.T) {
conf := Config{
Name: "test-app",
Level: "debug",
}
logger := NewLogger(conf)
// 测试 Info 日志
logger.Info("hello", "key1", "value1")
}
func TestDesensitization(t *testing.T) {
logger := NewLogger(Config{
Sensitive: "phone",
})
data := map[string]any{
"phone": "13812345678",
}
logger.Log(data) // 应该在输出中脱敏
}

310
logger.go Normal file
View File

@ -0,0 +1,310 @@
package log
import (
"fmt"
"log"
"os"
"reflect"
"regexp"
"strings"
"time"
"apigo.cc/go/cast"
)
type Logger struct {
config Config
level LevelType
goLogger *log.Logger
file *FileWriter
writer Writer
formatter Formatter
truncations []string
sensitive map[string]bool
sensitiveKeys []string
regexSensitive []*regexp.Regexp
sensitiveRule []sensitiveRuleInfo
desensitization func(string) string
traceId string
}
type sensitiveRuleInfo struct {
threshold int
leftNum int
rightNum int
}
var (
writerMakers = make(map[string]func(*Config) Writer)
)
func RegisterWriterMaker(name string, f func(*Config) Writer) {
writerMakers[name] = f
}
func NewLogger(conf Config) *Logger {
if conf.Level == "" {
conf.Level = "info"
}
if conf.Truncations == "" {
conf.Truncations = "github.com/, golang.org/, /apigo.cc/"
}
if conf.Sensitive == "" {
conf.Sensitive = LogDefaultSensitive
}
if conf.SensitiveRule == "" {
conf.SensitiveRule = "12:4*4, 11:3*4, 7:2*2, 3:1*1, 2:1*0"
}
if conf.Name == "" {
conf.Name = GetDefaultName()
}
logger := Logger{
truncations: cast.Split(conf.Truncations, ","),
formatter: conf.Formatter,
}
if logger.formatter == nil {
logger.formatter = &JSONFormatter{}
}
if len(conf.Sensitive) > 0 {
logger.sensitive = make(map[string]bool)
ss := cast.Split(conf.Sensitive, ",")
for _, v := range ss {
f := fixField(v)
logger.sensitive[f] = true
logger.sensitiveKeys = append(logger.sensitiveKeys, f)
}
}
if len(conf.RegexSensitive) > 0 {
ss := cast.Split(conf.RegexSensitive, ",")
for _, v := range ss {
if r, err := regexp.Compile(v); err == nil {
logger.regexSensitive = append(logger.regexSensitive, r)
}
}
}
if len(conf.SensitiveRule) > 0 {
ss := cast.Split(conf.SensitiveRule, ",")
for _, v := range ss {
a1 := strings.SplitN(v, ":", 2)
if len(a1) == 2 {
a2 := strings.SplitN(a1[1], "*", 3)
if len(a2) == 2 {
threshold := cast.Int(a1[0])
leftNum := cast.Int(a2[0])
rightNum := cast.Int(a2[1])
if threshold >= 0 && threshold <= 100 && leftNum >= 0 && leftNum <= 100 && rightNum >= 0 && rightNum <= 100 {
logger.sensitiveRule = append(logger.sensitiveRule, sensitiveRuleInfo{
threshold: threshold,
leftNum: leftNum,
rightNum: rightNum,
})
}
}
}
}
}
switch strings.ToLower(conf.Level) {
case "debug":
logger.level = DEBUG
case "warning":
logger.level = WARNING
case "error":
logger.level = ERROR
default:
logger.level = INFO
}
if conf.File != "" && conf.File != "console" {
if strings.Contains(conf.File, "://") {
writerName := strings.SplitN(conf.File, "://", 2)[0]
if m, ok := writerMakers[writerName]; ok {
if w := m(&conf); w != nil {
logger.writer = w
writerLock.Lock()
cur := writers.Load().([]Writer)
newW := append(cur, w)
writers.Store(newW)
writerLock.Unlock()
Start()
}
}
} else {
if conf.SplitTag != "" {
filesLock.RLock()
logger.file = files[conf.File+conf.SplitTag]
filesLock.RUnlock()
if logger.file == nil {
logger.file = &FileWriter{
fileName: conf.File,
splitTag: conf.SplitTag,
}
filesLock.Lock()
files[conf.File+conf.SplitTag] = logger.file
filesLock.Unlock()
}
Start()
} else {
fp, err := os.OpenFile(conf.File, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0644)
if err == nil {
logger.goLogger = log.New(fp, "", log.Ldate|log.Lmicroseconds)
}
}
}
}
logger.config = conf
return &logger
}
func (logger *Logger) Log(data any) {
if entry, ok := data.(LogEntry); ok {
logger.asyncWrite(entry)
return
}
buf, err := logger.formatter.Format(data, logger.sensitiveKeys)
if err != nil {
buf, _ = logger.formatter.Format(map[string]any{
"logType": LogTypeUndefined,
"traceId": logger.traceId,
"undefined": fmt.Sprint(data),
}, nil)
}
logger.writeBuf(buf)
}
func (logger *Logger) asyncWrite(entry LogEntry) {
buf, err := logger.formatter.Format(entry, logger.sensitiveKeys)
if err == nil {
logger.writeBuf(buf)
}
PutEntry(entry)
}
func (logger *Logger) writeBuf(buf []byte) {
if writerRunning.Load() {
WriteAsync(buf)
return
}
if logger.writer != nil {
logger.writer.Log(buf)
} else if logger.file != nil {
logger.file.Write(time.Now(), string(buf))
} else if logger.goLogger == nil {
fmt.Println(Viewable(string(buf)))
} else {
logger.goLogger.Print(string(buf))
}
}
func (logger *Logger) Debug(message string, extra ...any) {
if logger.CheckLevel(DEBUG) {
entry := GetEntry(reflect.TypeOf(&DebugLog{})).(*DebugLog)
logger.fillBase(entry.Base(), LogTypeDebug)
entry.Debug = message
if len(extra) > 0 {
for i := 0; i < len(extra); i += 2 {
if i+1 < len(extra) {
entry.Extra[cast.String(extra[i])] = extra[i+1]
}
}
}
logger.Log(entry)
}
}
func (logger *Logger) Info(message string, extra ...any) {
if logger.CheckLevel(INFO) {
entry := GetEntry(reflect.TypeOf(&InfoLog{})).(*InfoLog)
logger.fillBase(entry.Base(), LogTypeInfo)
entry.Info = message
if len(extra) > 0 {
for i := 0; i < len(extra); i += 2 {
if i+1 < len(extra) {
entry.Extra[cast.String(extra[i])] = extra[i+1]
}
}
}
logger.Log(entry)
}
}
func (logger *Logger) Warning(message string, extra ...any) {
if logger.CheckLevel(WARNING) {
entry := GetEntry(reflect.TypeOf(&WarningLog{})).(*WarningLog)
logger.fillBase(entry.Base(), LogTypeWarning)
entry.Warning = message
entry.CallStacks = getCallStacks(logger.truncations)
if len(extra) > 0 {
for i := 0; i < len(extra); i += 2 {
if i+1 < len(extra) {
entry.Extra[cast.String(extra[i])] = extra[i+1]
}
}
}
logger.Log(entry)
}
}
func (logger *Logger) Error(message string, extra ...any) {
if logger.CheckLevel(ERROR) {
entry := GetEntry(reflect.TypeOf(&ErrorLog{})).(*ErrorLog)
logger.fillBase(entry.Base(), LogTypeError)
entry.Error = message
entry.CallStacks = getCallStacks(logger.truncations)
if len(extra) > 0 {
for i := 0; i < len(extra); i += 2 {
if i+1 < len(extra) {
entry.Extra[cast.String(extra[i])] = extra[i+1]
}
}
}
logger.Log(entry)
}
}
func (logger *Logger) SetLevel(level LevelType) {
logger.level = level
}
func (logger *Logger) SetDesensitization(f func(v string) string) {
logger.desensitization = f
}
func (logger *Logger) New(traceId string) *Logger {
newLogger := *logger
newLogger.traceId = traceId
return &newLogger
}
func (logger *Logger) GetTraceId() string {
return logger.traceId
}
func (logger *Logger) CheckLevel(logLevel LevelType) bool {
settedLevel := logger.level
if settedLevel == 0 {
settedLevel = INFO
}
return logLevel >= settedLevel
}
func (logger *Logger) fillBase(base *BaseLog, logType string) {
base.LogName = logger.config.Name
base.LogType = logType
base.LogTime = MakeLogTime(time.Now())
base.TraceId = logger.traceId
base.ImageName = dockerImageName
base.ImageTag = dockerImageTag
base.ServerName = serverName
base.ServerIp = serverIp
}

46
pool.go Normal file
View File

@ -0,0 +1,46 @@
package log
import (
"reflect"
"sync"
)
// LogEntry 定义了高性能日志必须实现的接口
type LogEntry interface {
Reset()
Base() *BaseLog
}
// PoolManager 管理不同日志类型的对象池
type PoolManager struct {
pools sync.Map // map[reflect.Type]*sync.Pool
}
var globalPools = &PoolManager{}
// GetEntry 从池中获取一个指定类型的日志对象,并确保其处于 Reset 后的干净状态
func GetEntry(t reflect.Type) LogEntry {
pool, _ := globalPools.pools.LoadOrStore(t, &sync.Pool{
New: func() any {
return reflect.New(t.Elem()).Interface()
},
})
entry := pool.(*sync.Pool).Get().(LogEntry)
entry.Reset() // 确保获取到的对象永远是干净且预分配好的
return entry
}
// PutEntry 将日志对象归还到池中,不再进行 Reset
func PutEntry(entry LogEntry) {
t := reflect.TypeOf(entry)
if pool, ok := globalPools.pools.Load(t); ok {
pool.(*sync.Pool).Put(entry)
}
}
// WithEntry 执行闭包并在结束后自动回收对象
func WithEntry(t reflect.Type, fn func(LogEntry)) {
entry := GetEntry(t)
defer PutEntry(entry)
fn(entry)
}

38
pool_test.go Normal file
View File

@ -0,0 +1,38 @@
package log
import (
"reflect"
"testing"
)
// MockRequestLog 用于测试池化逻辑
type MockRequestLog struct {
BaseLog
RequestId string
UsedTime float32
}
func (m *MockRequestLog) Reset() {
m.BaseLog.Reset()
m.RequestId = ""
m.UsedTime = 0
}
func (m *MockRequestLog) Base() *BaseLog {
return &m.BaseLog
}
func TestWithEntry(t *testing.T) {
typ := reflect.TypeOf(&MockRequestLog{})
WithEntry(typ, func(e LogEntry) {
entry := e.(*MockRequestLog)
entry.RequestId = "with-entry-id"
})
// 验证 PutEntry 自动被调用
entry2 := GetEntry(typ).(*MockRequestLog)
if entry2.RequestId != "" {
t.Errorf("Expected reset, got %s", entry2.RequestId)
}
}

54
reliability_test.go Normal file
View File

@ -0,0 +1,54 @@
package log
import (
"bufio"
"os"
"strings"
"sync"
"testing"
)
func TestLoggerReliability(t *testing.T) {
logFile := "reliability.log"
defer os.Remove(logFile)
logger := NewLogger(Config{
File: logFile,
})
const count = 1000
var wg sync.WaitGroup
wg.Add(count)
for i := 0; i < count; i++ {
go func(idx int) {
defer wg.Done()
logger.Info("reliability", "index", idx)
}(i)
}
wg.Wait()
Stop()
Wait()
file, err := os.Open(logFile)
if err != nil {
t.Fatal(err)
}
defer file.Close()
scanner := bufio.NewScanner(file)
lines := 0
found := make(map[string]bool)
for scanner.Scan() {
line := scanner.Text()
if strings.Contains(line, "reliability") {
lines++
found[line] = true
}
}
if lines != count {
t.Errorf("Expected %d log lines, got %d", count, lines)
}
}

212
standard.go Normal file
View File

@ -0,0 +1,212 @@
package log
const LogTypeDebug = "debug"
const LogTypeInfo = "info"
const LogTypeWarning = "warning"
const LogTypeError = "error"
const LogTypeUndefined = "undefined"
const LogTypeDb = "db"
const LogTypeDbError = "dbError"
const LogTypeServer = "server"
const LogTypeServerError = "serverError"
const LogTypeTask = "task"
const LogTypeMonitor = "monitor"
const LogTypeStatistic = "statistic"
const LogTypeRequest = "request"
const LogDefaultSensitive = "phone,password,secret,token,accessToken"
const LogEnvLevel = "LOG_LEVEL"
const LogEnvFile = "LOG_FILE"
const LogEnvSensitive = "LOG_SENSITIVE"
const LogEnvRegexSensitive = "LOG_REGEXSENSITIVE"
type BaseLog struct {
LogName string
LogType string
LogTime string
TraceId string
ImageName string
ImageTag string
ServerName string
ServerIp string
Extra map[string]interface{}
}
func (b *BaseLog) Reset() {
b.LogName = ""
b.LogType = ""
b.LogTime = ""
b.TraceId = ""
if b.Extra == nil {
b.Extra = make(map[string]interface{}, 8)
} else {
clear(b.Extra)
}
}
func (b *BaseLog) Base() *BaseLog {
return b
}
type DebugLog struct {
BaseLog
Debug string
}
func (d *DebugLog) Reset() {
d.BaseLog.Reset()
d.Debug = ""
}
func (d *DebugLog) Base() *BaseLog {
return &d.BaseLog
}
type InfoLog struct {
BaseLog
Info string
}
func (i *InfoLog) Reset() {
i.BaseLog.Reset()
i.Info = ""
}
func (i *InfoLog) Base() *BaseLog {
return &i.BaseLog
}
type WarningLog struct {
BaseLog
Warning string
CallStacks []string
}
func (w *WarningLog) Reset() {
w.BaseLog.Reset()
w.Warning = ""
w.CallStacks = w.CallStacks[:0]
}
func (w *WarningLog) Base() *BaseLog {
return &w.BaseLog
}
type ErrorLog struct {
BaseLog
Error string
CallStacks []string
}
func (e *ErrorLog) Reset() {
e.BaseLog.Reset()
e.Error = ""
e.CallStacks = e.CallStacks[:0]
}
func (e *ErrorLog) Base() *BaseLog {
return &e.BaseLog
}
type DBLog struct {
BaseLog
DbType string
Dsn string
Query string
QueryArgs string
UsedTime float32
}
func (d *DBLog) Reset() {
d.BaseLog.Reset()
d.DbType = ""
d.Dsn = ""
d.Query = ""
d.QueryArgs = ""
d.UsedTime = 0
}
func (d *DBLog) Base() *BaseLog {
return &d.BaseLog
}
type RequestLog struct {
BaseLog
ServerId string
App string
Node string
ClientIp string
FromApp string
FromNode string
UserId string
DeviceId string
ClientAppName string
ClientAppVersion string
SessionId string
RequestId string
Host string
Scheme string
Proto string
AuthLevel int
Priority int
Method string
Path string
RequestHeaders map[string]string
RequestData map[string]any
UsedTime float32
ResponseCode int
ResponseHeaders map[string]string
ResponseDataLength uint
ResponseData string
}
func (r *RequestLog) Reset() {
r.BaseLog.Reset()
r.ServerId = ""
r.App = ""
r.Node = ""
r.ClientIp = ""
r.FromApp = ""
r.FromNode = ""
r.UserId = ""
r.DeviceId = ""
r.ClientAppName = ""
r.ClientAppVersion = ""
r.SessionId = ""
r.RequestId = ""
r.Host = ""
r.Scheme = ""
r.Proto = ""
r.AuthLevel = 0
r.Priority = 0
r.Method = ""
r.Path = ""
if r.RequestHeaders == nil {
r.RequestHeaders = make(map[string]string, 8)
} else {
clear(r.RequestHeaders)
}
if r.RequestData == nil {
r.RequestData = make(map[string]any, 8)
} else {
clear(r.RequestData)
}
r.UsedTime = 0
r.ResponseCode = 0
if r.ResponseHeaders == nil {
r.ResponseHeaders = make(map[string]string, 8)
} else {
clear(r.ResponseHeaders)
}
r.ResponseDataLength = 0
r.ResponseData = ""
}
func (r *RequestLog) Base() *BaseLog {
return &r.BaseLog
}

180
utility.go Normal file
View File

@ -0,0 +1,180 @@
package log
import (
"encoding/json"
"fmt"
"net"
"os"
"path"
"runtime"
"strings"
"time"
"apigo.cc/go/cast"
)
var (
dockerImageName string
dockerImageTag string
serverName string
serverIp string
)
func init() {
dockerImageName = os.Getenv("DOCKER_IMAGE_NAME")
dockerImageTag = os.Getenv("DOCKER_IMAGE_TAG")
serverName, _ = os.Hostname()
addrs, err := net.InterfaceAddrs()
if err == nil {
for _, a := range addrs {
if an, ok := a.(*net.IPNet); ok {
// 忽略 Docker 私有网段
if an.IP.IsGlobalUnicast() && !strings.HasPrefix(an.IP.To4().String(), "172.17.") {
serverIp = an.IP.To4().String()
break
}
}
}
}
}
// MakeTime 解析时间字符串
func MakeTime(logTime string) time.Time {
tm, _ := time.Parse(time.RFC3339Nano, logTime)
return tm
}
// MakeLogTime 格式化时间为 RFC3339Nano
func MakeLogTime(tm time.Time) string {
return tm.Format(time.RFC3339Nano)
}
// MakeUsedTime 计算消耗时间(毫秒)
func MakeUsedTime(startTime, endTime time.Time) float32 {
return float32(endTime.UnixNano()-startTime.UnixNano()) / 1e6
}
// ParseBaseLog 解析基础日志行
func ParseBaseLog(line string) *BaseLog {
pos := strings.IndexByte(line, '{')
if pos == -1 {
return ParseBadLog(line)
}
l := make(map[string]any)
err := json.Unmarshal([]byte(line[pos:]), &l)
if err != nil {
return ParseBadLog(line)
}
baseLog := BaseLog{Extra: make(map[string]any)}
for k, v := range l {
lk := strings.ToLower(k)
switch lk {
case "logname":
baseLog.LogName = cast.String(v)
case "logtype":
baseLog.LogType = cast.String(v)
case "logtime":
baseLog.LogTime = cast.String(v)
case "traceid":
baseLog.TraceId = cast.String(v)
case "imagename":
baseLog.ImageName = cast.String(v)
case "imagetag":
baseLog.ImageTag = cast.String(v)
case "servername":
baseLog.ServerName = cast.String(v)
case "serverip":
baseLog.ServerIp = cast.String(v)
default:
baseLog.Extra[lk] = v
}
}
return &baseLog
}
// ParseBadLog 解析非 JSON 格式的日志
func ParseBadLog(line string) *BaseLog {
baseLog := BaseLog{Extra: make(map[string]any)}
baseLog.LogType = LogTypeUndefined
if len(line) > 19 && line[19] == ' ' {
tm, err := time.Parse("2006/01/02 15:04:05", line[0:19])
if err == nil {
baseLog.LogTime = MakeLogTime(tm)
line = line[20:]
} else {
return nil
}
} else if len(line) > 26 && line[26] == ' ' {
tm, err := time.Parse("2006/01/02 15:04:05.000000", line[0:26])
if err == nil {
baseLog.LogTime = MakeLogTime(tm)
line = line[27:]
} else {
return nil
}
} else {
return nil
}
baseLog.Extra["info"] = line
return &baseLog
}
// fixField 格式化字段名(去横线,小写)
func fixField(s string) string {
return strings.ToLower(strings.ReplaceAll(s, "-", ""))
}
// getCallStacks 获取调用栈
func getCallStacks(truncations []string) []string {
callStacks := make([]string, 0)
inLogger := true
for i := 0; i < 50; i++ {
_, file, line, ok := runtime.Caller(i)
if !ok {
break
}
if strings.Contains(file, "/go/src/") {
continue
}
if strings.Contains(file, "/log/") { // 注意这里的路径匹配,迁移后是 /log/
if inLogger {
continue
}
} else {
inLogger = false
}
if truncations != nil {
for _, truncation := range truncations {
if pos := strings.Index(file, truncation); pos != -1 {
file = file[pos+len(truncation):]
}
}
}
callStacks = append(callStacks, fmt.Sprintf("%s:%d", file, line))
}
return callStacks
}
// GetDefaultName 获取默认应用名称
func GetDefaultName() string {
name := os.Getenv("DISCOVER_APP")
if name == "" {
name = os.Getenv("discover_app")
}
if name == "" {
imageName := os.Getenv("DOCKER_IMAGE_NAME")
if imageName != "" {
parts := strings.Split(imageName, "/")
imageName = parts[len(parts)-1]
imageName = strings.SplitN(imageName, ":", 2)[0]
imageName = strings.SplitN(imageName, "#", 2)[0]
name = imageName
}
}
if name == "" {
name = path.Base(os.Args[0])
}
return name
}

193
viewer.go Normal file
View File

@ -0,0 +1,193 @@
package log
import (
"encoding/json"
"fmt"
"math"
"regexp"
"strings"
"time"
"apigo.cc/go/cast"
"apigo.cc/go/shell"
)
var errorLineMatcher = regexp.MustCompile(`(\w+\.go:\d+)`)
var codeFileMatcher = regexp.MustCompile(`(\w+?\.)(go|js)`)
func Viewable(line string) string {
b := ParseBaseLog(line)
if b == nil {
// 高亮错误代码
if strings.Contains(line, ".go:") {
if strings.Contains(line, "/ssgo/") || strings.Contains(line, "/ssdo/") || strings.Contains(line, "/gojs/") {
line = errorLineMatcher.ReplaceAllString(line, shell.BYellow("$1"))
} else if !strings.Contains(line, "/apigo.cc/") {
line = errorLineMatcher.ReplaceAllString(line, shell.BMagenta("$1"))
} else if !strings.Contains(line, "/go/src/") {
line = errorLineMatcher.ReplaceAllString(line, shell.BRed("$1"))
}
}
return line
}
var logTime time.Time
if strings.ContainsRune(b.LogTime, 'T') {
logTime = MakeTime(b.LogTime)
} else {
ft := cast.Float64(b.LogTime)
ts := int64(math.Floor(ft))
tns := int64((ft - float64(ts)) * 1e9)
logTime = time.Unix(ts, tns)
}
var outs []string
t1 := strings.Split(logTime.Format("01-02 15:04:05.000"), " ")
d := t1[0]
t := ""
if len(t1) > 1 {
t = t1[1]
}
t2 := strings.Split(t, ".")
s := ""
if len(t2) > 1 {
s = t2[1]
}
t = t2[0]
outs = append(outs, shell.White(shell.Bold, d+" "+t))
if s != "" {
outs = append(outs, shell.White("."+s))
}
outs = append(outs, " ", shell.Style(shell.TextWhite, shell.Dim, shell.Underline, b.TraceId))
level := ""
levelKey := ""
for _, k := range []string{"debug", "warning", "error", "info", "Debug", "Warning", "Error", "Info"} {
if b.Extra[k] != nil {
level = strings.ToLower(k)
levelKey = k
break
}
}
if b.LogType == LogTypeRequest {
method := cast.String(b.Extra["method"])
path := cast.String(b.Extra["path"])
code := cast.Int(b.Extra["responsecode"])
used := float32(cast.Float64(b.Extra["usedtime"]))
outs = append(outs, " ", shell.Cyan(shell.Bold, "REQUEST"), " ", shell.Cyan(method), " ", path)
if code >= 500 {
outs = append(outs, " ", shell.BRed(cast.String(code)))
} else if code >= 400 {
outs = append(outs, " ", shell.BYellow(cast.String(code)))
} else {
outs = append(outs, " ", shell.BGreen(cast.String(code)))
}
outs = append(outs, " ", shell.Style(shell.Dim, fmt.Sprintf("%.2fms", used)))
delete(b.Extra, "method")
delete(b.Extra, "path")
delete(b.Extra, "responsecode")
delete(b.Extra, "usedtime")
delete(b.Extra, "host")
delete(b.Extra, "scheme")
delete(b.Extra, "proto")
delete(b.Extra, "clientip")
delete(b.Extra, "serverid")
delete(b.Extra, "app")
delete(b.Extra, "node")
delete(b.Extra, "fromapp")
delete(b.Extra, "fromnode")
delete(b.Extra, "userid")
delete(b.Extra, "deviceid")
delete(b.Extra, "clientappname")
delete(b.Extra, "clientappversion")
delete(b.Extra, "sessionid")
delete(b.Extra, "requestid")
delete(b.Extra, "authlevel")
delete(b.Extra, "priority")
delete(b.Extra, "requestheaders")
delete(b.Extra, "requestdata")
delete(b.Extra, "responseheaders")
delete(b.Extra, "responsedatalength")
delete(b.Extra, "responsedata")
delete(b.Extra, "logname")
delete(b.Extra, "logtype")
delete(b.Extra, "logtime")
delete(b.Extra, "traceid")
delete(b.Extra, "imagename")
delete(b.Extra, "imagetag")
delete(b.Extra, "servername")
delete(b.Extra, "serverip")
} else if b.LogType == LogTypeStatistic {
outs = append(outs, " ", shell.Cyan(shell.Bold, "STATISTIC"))
} else if b.LogType == LogTypeTask {
outs = append(outs, " ", shell.Cyan(shell.Bold, "TASK"))
} else {
if level != "" {
msg := cast.String(b.Extra[levelKey])
delete(b.Extra, levelKey)
switch level {
case "info":
outs = append(outs, " ", shell.Cyan(msg))
case "warning":
outs = append(outs, " ", shell.Yellow(msg))
case "error":
outs = append(outs, " ", shell.Red(msg))
case "debug":
outs = append(outs, " ", msg)
}
} else if b.LogType == "undefined" {
outs = append(outs, " ", shell.Style(shell.Dim, "-"))
} else {
outs = append(outs, " ", shell.Cyan(shell.Bold, b.LogType))
}
}
callStacks := b.Extra["callStacks"]
delete(b.Extra, "callStacks")
if b.Extra != nil {
for k, v := range b.Extra {
vStr := cast.String(v)
if k == "extra" && len(vStr) > 0 && vStr[0] == '{' {
extra := make(map[string]any)
_ = json.Unmarshal([]byte(vStr), &extra)
for k2, v2 := range extra {
outs = append(outs, " ", shell.Style(shell.TextWhite, shell.Dim, shell.Italic, k2+":"), cast.String(v2))
}
} else {
outs = append(outs, " ", shell.Style(shell.TextWhite, shell.Dim, shell.Italic, k+":"), vStr)
}
}
}
if callStacks != nil {
var callStacksList []any
if csStr, ok := callStacks.(string); ok && len(csStr) > 2 && csStr[0] == '[' {
_ = json.Unmarshal([]byte(csStr), &callStacksList)
} else if csList, ok := callStacks.([]any); ok {
callStacksList = csList
}
if len(callStacksList) > 0 {
outs = append(outs, "\n")
for _, vi := range callStacksList {
v := cast.String(vi)
postfix := ""
if pos := strings.LastIndexByte(v, '/'); pos != -1 {
postfix = v[pos+1:]
v = v[:pos+1]
} else {
postfix = v
v = ""
}
outs = append(outs, " ", shell.Style(shell.Dim, v))
// 简化格式化逻辑
outs = append(outs, shell.Style(shell.TextWhite, postfix), "\n")
}
}
}
return strings.Join(outs, "")
}

148
writer.go Normal file
View File

@ -0,0 +1,148 @@
package log
import (
"fmt"
"sync"
"sync/atomic"
"time"
)
// Writer 日志写入接口
type Writer interface {
Log([]byte)
Run()
}
var (
writerRunning atomic.Bool
writerLock sync.Mutex // 仅用于注册时锁定
writerStopChan chan bool
writers atomic.Value // 存储 []Writer
logChannel chan []byte
)
// ConsoleWriter 控制台写入器
type ConsoleWriter struct {
}
func (w *ConsoleWriter) Log(data []byte) {
fmt.Println(Viewable(string(data)))
}
func (w *ConsoleWriter) Run() {
}
func init() {
logChannel = make(chan []byte, 10000)
writers.Store([]Writer{})
RegisterWriterMaker("console", func(conf *Config) Writer {
return &ConsoleWriter{}
})
}
// WriteAsync 异步写入日志
func WriteAsync(buf []byte) {
defer func() {
recover()
}()
if !writerRunning.Load() {
return
}
select {
case logChannel <- buf:
default:
// 丢弃或处理过载,此处简单丢弃
}
}
// Start 启动写入器
func Start() {
if !writerRunning.CompareAndSwap(false, true) {
return
}
writerStopChan = make(chan bool)
go writerRunner()
}
// Stop 停止写入器
func Stop() {
if writerRunning.CompareAndSwap(true, false) {
close(logChannel)
}
}
// Wait 等待写入器停止
func Wait() {
if writerStopChan != nil {
<-writerStopChan
writerStopChan = nil
}
}
func writerRunner() {
ticker := time.NewTicker(200 * time.Millisecond)
defer ticker.Stop()
defer func() {
if writerStopChan != nil {
close(writerStopChan)
}
}()
for {
select {
case buf, ok := <-logChannel:
if !ok {
flushWriters()
return
}
processLog(buf)
// 尝试批量处理更多日志
batchCount := 0
for batchCount < 100 {
select {
case nextBuf, nextOk := <-logChannel:
if !nextOk {
flushWriters()
return
}
processLog(nextBuf)
batchCount++
default:
batchCount = 100 // break outer loop
}
}
case <-ticker.C:
flushWriters()
}
}
}
func processLog(buf []byte) {
// 使用原子读取的 writer 列表
curWriters, _ := writers.Load().([]Writer)
for _, w := range curWriters {
w.Log(buf)
}
// 文件写入处理
filesLock.RLock()
for _, f := range files {
f.Write(time.Now(), string(buf))
}
filesLock.RUnlock()
}
func flushWriters() {
curWriters, _ := writers.Load().([]Writer)
for _, w := range curWriters {
w.Run()
}
filesLock.RLock()
for _, f := range files {
f.Run()
}
filesLock.RUnlock()
}