@@ -7,3 +7,4 @@ | |||
*.iml | |||
out | |||
gen | |||
runtime |
@@ -4,8 +4,10 @@ import ( | |||
"bytes" | |||
"encoding/json" | |||
"fmt" | |||
"github.com/astaxie/beego" | |||
"github.com/astaxie/beego/logs" | |||
"golib.gaore.com/GaoreGo/grlogs/logs" | |||
"os" | |||
"path" | |||
"sync" | |||
) | |||
const ( | |||
@@ -42,7 +44,7 @@ type ConnLogConfig struct { | |||
Level int `json:"level"` | |||
} | |||
var loggers = make(map[string]*Logger) | |||
var loggers = sync.Map{} | |||
type Logger struct { | |||
*logs.BeeLogger | |||
@@ -63,18 +65,19 @@ func (l *Logger) Stop() { | |||
func GetLogger(name string) *Logger { | |||
if l, ok := loggers[name]; ok { | |||
return l | |||
if l, ok := loggers.Load(name); ok { | |||
return l.(*Logger) | |||
} else { | |||
var level int = LEVEL_WARN | |||
if beego.BConfig.RunMode == "dev" { | |||
if s := os.Getenv("CENTER_RUNMODE"); s == "dev" { | |||
level = LEVEL_ALL | |||
} | |||
wd, _ := os.Getwd() | |||
conf1 := FileLogConfig{ | |||
Filename: GetCwd(fmt.Sprintf("runtime/logs/%s.log", name)), | |||
Filename: path.Join(wd, fmt.Sprintf("runtime/logs/%s.log", name)), | |||
Level: LEVEL_ALL, | |||
Maxlines: 0, | |||
Daily: true, | |||
@@ -89,13 +92,14 @@ func GetLogger(name string) *Logger { | |||
confString, _ := json.Marshal(&conf1) | |||
confString2, _ := json.Marshal(&conf2) | |||
loggers[name] = &Logger{} | |||
loggers[name].BeeLogger = logs.NewLogger() | |||
loggers[name].SetLogger(logs.AdapterFile, bytes.NewBuffer(confString).String()) | |||
loggers[name].SetLogger(logs.AdapterConsole, bytes.NewBuffer(confString2).String()) | |||
loggers[name].BeeLogger.SetPrefix("_" + name) | |||
loggers[name].BeeLogger.EnableFuncCallDepth(true) | |||
loggers[name].BeeLogger.SetLogFuncCallDepth(2) | |||
return loggers[name] | |||
l := new(Logger) | |||
l.BeeLogger = logs.NewLogger() | |||
l.SetLogger(logs.AdapterFile, bytes.NewBuffer(confString).String()) | |||
l.SetLogger(logs.AdapterConsole, bytes.NewBuffer(confString2).String()) | |||
l.BeeLogger.SetPrefix("_" + name) | |||
l.BeeLogger.EnableFuncCallDepth(true) | |||
l.BeeLogger.SetLogFuncCallDepth(2) | |||
loggers.Store(name, l) | |||
return l | |||
} | |||
} |
@@ -1,12 +1,9 @@ | |||
package grlogs | |||
import ( | |||
"github.com/astaxie/beego/logs" | |||
"testing" | |||
) | |||
func TestGetLogger(t *testing.T) { | |||
logs.NewLogger() | |||
logs.Info(LEVEL_ALL) | |||
GetLogger("nds").Debug("akldalskflasfa") | |||
} |
@@ -0,0 +1,72 @@ | |||
## logs | |||
logs is a Go logs manager. It can use many logs adapters. The repo is inspired by `database/sql` . | |||
## How to install? | |||
go get github.com/astaxie/beego/logs | |||
## What adapters are supported? | |||
As of now this logs support console, file,smtp and conn. | |||
## How to use it? | |||
First you must import it | |||
```golang | |||
import ( | |||
"github.com/astaxie/beego/logs" | |||
) | |||
``` | |||
Then init a Log (example with console adapter) | |||
```golang | |||
log := logs.NewLogger(10000) | |||
log.SetLogger("console", "") | |||
``` | |||
> the first params stand for how many channel | |||
Use it like this: | |||
```golang | |||
log.Trace("trace") | |||
log.Info("info") | |||
log.Warn("warning") | |||
log.Debug("debug") | |||
log.Critical("critical") | |||
``` | |||
## File adapter | |||
Configure file adapter like this: | |||
```golang | |||
log := NewLogger(10000) | |||
log.SetLogger("file", `{"filename":"test.log"}`) | |||
``` | |||
## Conn adapter | |||
Configure like this: | |||
```golang | |||
log := NewLogger(1000) | |||
log.SetLogger("conn", `{"net":"tcp","addr":":7020"}`) | |||
log.Info("info") | |||
``` | |||
## Smtp adapter | |||
Configure like this: | |||
```golang | |||
log := NewLogger(10000) | |||
log.SetLogger("smtp", `{"username":"beegotest@gmail.com","password":"xxxxxxxx","host":"smtp.gmail.com:587","sendTos":["xiemengjun@gmail.com"]}`) | |||
log.Critical("sendmail critical") | |||
time.Sleep(time.Second * 30) | |||
``` |
@@ -0,0 +1,83 @@ | |||
// Copyright 2014 beego Author. All Rights Reserved. | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package logs | |||
import ( | |||
"bytes" | |||
"encoding/json" | |||
"fmt" | |||
"strings" | |||
"time" | |||
) | |||
const ( | |||
apacheFormatPattern = "%s - - [%s] \"%s %d %d\" %f %s %s" | |||
apacheFormat = "APACHE_FORMAT" | |||
jsonFormat = "JSON_FORMAT" | |||
) | |||
// AccessLogRecord struct for holding access log data. | |||
type AccessLogRecord struct { | |||
RemoteAddr string `json:"remote_addr"` | |||
RequestTime time.Time `json:"request_time"` | |||
RequestMethod string `json:"request_method"` | |||
Request string `json:"request"` | |||
ServerProtocol string `json:"server_protocol"` | |||
Host string `json:"host"` | |||
Status int `json:"status"` | |||
BodyBytesSent int64 `json:"body_bytes_sent"` | |||
ElapsedTime time.Duration `json:"elapsed_time"` | |||
HTTPReferrer string `json:"http_referrer"` | |||
HTTPUserAgent string `json:"http_user_agent"` | |||
RemoteUser string `json:"remote_user"` | |||
} | |||
func (r *AccessLogRecord) json() ([]byte, error) { | |||
buffer := &bytes.Buffer{} | |||
encoder := json.NewEncoder(buffer) | |||
disableEscapeHTML(encoder) | |||
err := encoder.Encode(r) | |||
return buffer.Bytes(), err | |||
} | |||
func disableEscapeHTML(i interface{}) { | |||
if e, ok := i.(interface { | |||
SetEscapeHTML(bool) | |||
}); ok { | |||
e.SetEscapeHTML(false) | |||
} | |||
} | |||
// AccessLog - Format and print access log. | |||
func AccessLog(r *AccessLogRecord, format string) { | |||
var msg string | |||
switch format { | |||
case apacheFormat: | |||
timeFormatted := r.RequestTime.Format("02/Jan/2006 03:04:05") | |||
msg = fmt.Sprintf(apacheFormatPattern, r.RemoteAddr, timeFormatted, r.Request, r.Status, r.BodyBytesSent, | |||
r.ElapsedTime.Seconds(), r.HTTPReferrer, r.HTTPUserAgent) | |||
case jsonFormat: | |||
fallthrough | |||
default: | |||
jsonData, err := r.json() | |||
if err != nil { | |||
msg = fmt.Sprintf(`{"Error": "%s"}`, err) | |||
} else { | |||
msg = string(jsonData) | |||
} | |||
} | |||
beeLogger.writeMsg(levelLoggerImpl, strings.TrimSpace(msg)) | |||
} |
@@ -0,0 +1,186 @@ | |||
package alils | |||
import ( | |||
"encoding/json" | |||
"strings" | |||
"sync" | |||
"time" | |||
"github.com/astaxie/beego/logs" | |||
"github.com/gogo/protobuf/proto" | |||
) | |||
const ( | |||
// CacheSize set the flush size | |||
CacheSize int = 64 | |||
// Delimiter define the topic delimiter | |||
Delimiter string = "##" | |||
) | |||
// Config is the Config for Ali Log | |||
type Config struct { | |||
Project string `json:"project"` | |||
Endpoint string `json:"endpoint"` | |||
KeyID string `json:"key_id"` | |||
KeySecret string `json:"key_secret"` | |||
LogStore string `json:"log_store"` | |||
Topics []string `json:"topics"` | |||
Source string `json:"source"` | |||
Level int `json:"level"` | |||
FlushWhen int `json:"flush_when"` | |||
} | |||
// aliLSWriter implements LoggerInterface. | |||
// it writes messages in keep-live tcp connection. | |||
type aliLSWriter struct { | |||
store *LogStore | |||
group []*LogGroup | |||
withMap bool | |||
groupMap map[string]*LogGroup | |||
lock *sync.Mutex | |||
Config | |||
} | |||
// NewAliLS create a new Logger | |||
func NewAliLS() logs.Logger { | |||
alils := new(aliLSWriter) | |||
alils.Level = logs.LevelTrace | |||
return alils | |||
} | |||
// Init parse config and init struct | |||
func (c *aliLSWriter) Init(jsonConfig string) (err error) { | |||
json.Unmarshal([]byte(jsonConfig), c) | |||
if c.FlushWhen > CacheSize { | |||
c.FlushWhen = CacheSize | |||
} | |||
prj := &LogProject{ | |||
Name: c.Project, | |||
Endpoint: c.Endpoint, | |||
AccessKeyID: c.KeyID, | |||
AccessKeySecret: c.KeySecret, | |||
} | |||
c.store, err = prj.GetLogStore(c.LogStore) | |||
if err != nil { | |||
return err | |||
} | |||
// Create default Log Group | |||
c.group = append(c.group, &LogGroup{ | |||
Topic: proto.String(""), | |||
Source: proto.String(c.Source), | |||
Logs: make([]*Log, 0, c.FlushWhen), | |||
}) | |||
// Create other Log Group | |||
c.groupMap = make(map[string]*LogGroup) | |||
for _, topic := range c.Topics { | |||
lg := &LogGroup{ | |||
Topic: proto.String(topic), | |||
Source: proto.String(c.Source), | |||
Logs: make([]*Log, 0, c.FlushWhen), | |||
} | |||
c.group = append(c.group, lg) | |||
c.groupMap[topic] = lg | |||
} | |||
if len(c.group) == 1 { | |||
c.withMap = false | |||
} else { | |||
c.withMap = true | |||
} | |||
c.lock = &sync.Mutex{} | |||
return nil | |||
} | |||
// WriteMsg write message in connection. | |||
// if connection is down, try to re-connect. | |||
func (c *aliLSWriter) WriteMsg(when time.Time, msg string, level int) (err error) { | |||
if level > c.Level { | |||
return nil | |||
} | |||
var topic string | |||
var content string | |||
var lg *LogGroup | |||
if c.withMap { | |||
// Topic,LogGroup | |||
strs := strings.SplitN(msg, Delimiter, 2) | |||
if len(strs) == 2 { | |||
pos := strings.LastIndex(strs[0], " ") | |||
topic = strs[0][pos+1 : len(strs[0])] | |||
content = strs[0][0:pos] + strs[1] | |||
lg = c.groupMap[topic] | |||
} | |||
// send to empty Topic | |||
if lg == nil { | |||
content = msg | |||
lg = c.group[0] | |||
} | |||
} else { | |||
content = msg | |||
lg = c.group[0] | |||
} | |||
c1 := &LogContent{ | |||
Key: proto.String("msg"), | |||
Value: proto.String(content), | |||
} | |||
l := &Log{ | |||
Time: proto.Uint32(uint32(when.Unix())), | |||
Contents: []*LogContent{ | |||
c1, | |||
}, | |||
} | |||
c.lock.Lock() | |||
lg.Logs = append(lg.Logs, l) | |||
c.lock.Unlock() | |||
if len(lg.Logs) >= c.FlushWhen { | |||
c.flush(lg) | |||
} | |||
return nil | |||
} | |||
// Flush implementing method. empty. | |||
func (c *aliLSWriter) Flush() { | |||
// flush all group | |||
for _, lg := range c.group { | |||
c.flush(lg) | |||
} | |||
} | |||
// Destroy destroy connection writer and close tcp listener. | |||
func (c *aliLSWriter) Destroy() { | |||
} | |||
func (c *aliLSWriter) flush(lg *LogGroup) { | |||
c.lock.Lock() | |||
defer c.lock.Unlock() | |||
err := c.store.PutLogs(lg) | |||
if err != nil { | |||
return | |||
} | |||
lg.Logs = make([]*Log, 0, c.FlushWhen) | |||
} | |||
func init() { | |||
logs.Register(logs.AdapterAliLS, NewAliLS) | |||
} |
@@ -0,0 +1,13 @@ | |||
package alils | |||
const ( | |||
version = "0.5.0" // SDK version | |||
signatureMethod = "hmac-sha1" // Signature method | |||
// OffsetNewest stands for the log head offset, i.e. the offset that will be | |||
// assigned to the next message that will be produced to the shard. | |||
OffsetNewest = "end" | |||
// OffsetOldest stands for the oldest offset available on the logstore for a | |||
// shard. | |||
OffsetOldest = "begin" | |||
) |
@@ -0,0 +1,42 @@ | |||
package alils | |||
// InputDetail define log detail | |||
type InputDetail struct { | |||
LogType string `json:"logType"` | |||
LogPath string `json:"logPath"` | |||
FilePattern string `json:"filePattern"` | |||
LocalStorage bool `json:"localStorage"` | |||
TimeFormat string `json:"timeFormat"` | |||
LogBeginRegex string `json:"logBeginRegex"` | |||
Regex string `json:"regex"` | |||
Keys []string `json:"key"` | |||
FilterKeys []string `json:"filterKey"` | |||
FilterRegex []string `json:"filterRegex"` | |||
TopicFormat string `json:"topicFormat"` | |||
} | |||
// OutputDetail define the output detail | |||
type OutputDetail struct { | |||
Endpoint string `json:"endpoint"` | |||
LogStoreName string `json:"logstoreName"` | |||
} | |||
// LogConfig define Log Config | |||
type LogConfig struct { | |||
Name string `json:"configName"` | |||
InputType string `json:"inputType"` | |||
InputDetail InputDetail `json:"inputDetail"` | |||
OutputType string `json:"outputType"` | |||
OutputDetail OutputDetail `json:"outputDetail"` | |||
CreateTime uint32 | |||
LastModifyTime uint32 | |||
project *LogProject | |||
} | |||
// GetAppliedMachineGroup returns applied machine group of this config. | |||
func (c *LogConfig) GetAppliedMachineGroup(confName string) (groupNames []string, err error) { | |||
groupNames, err = c.project.GetAppliedMachineGroups(c.Name) | |||
return | |||
} |
@@ -0,0 +1,819 @@ | |||
/* | |||
Package alils implements the SDK(v0.5.0) of Simple Log Service(abbr. SLS). | |||
For more description about SLS, please read this article: | |||
http://gitlab.alibaba-inc.com/sls/doc. | |||
*/ | |||
package alils | |||
import ( | |||
"encoding/json" | |||
"fmt" | |||
"io/ioutil" | |||
"net/http" | |||
"net/http/httputil" | |||
) | |||
// Error message in SLS HTTP response. | |||
type errorMessage struct { | |||
Code string `json:"errorCode"` | |||
Message string `json:"errorMessage"` | |||
} | |||
// LogProject Define the Ali Project detail | |||
type LogProject struct { | |||
Name string // Project name | |||
Endpoint string // IP or hostname of SLS endpoint | |||
AccessKeyID string | |||
AccessKeySecret string | |||
} | |||
// NewLogProject creates a new SLS project. | |||
func NewLogProject(name, endpoint, AccessKeyID, accessKeySecret string) (p *LogProject, err error) { | |||
p = &LogProject{ | |||
Name: name, | |||
Endpoint: endpoint, | |||
AccessKeyID: AccessKeyID, | |||
AccessKeySecret: accessKeySecret, | |||
} | |||
return p, nil | |||
} | |||
// ListLogStore returns all logstore names of project p. | |||
func (p *LogProject) ListLogStore() (storeNames []string, err error) { | |||
h := map[string]string{ | |||
"x-sls-bodyrawsize": "0", | |||
} | |||
uri := fmt.Sprintf("/logstores") | |||
r, err := request(p, "GET", uri, h, nil) | |||
if err != nil { | |||
return | |||
} | |||
buf, err := ioutil.ReadAll(r.Body) | |||
if err != nil { | |||
return | |||
} | |||
if r.StatusCode != http.StatusOK { | |||
errMsg := &errorMessage{} | |||
err = json.Unmarshal(buf, errMsg) | |||
if err != nil { | |||
err = fmt.Errorf("failed to list logstore") | |||
dump, _ := httputil.DumpResponse(r, true) | |||
fmt.Printf("%s\n", dump) | |||
return | |||
} | |||
err = fmt.Errorf("%v:%v", errMsg.Code, errMsg.Message) | |||
return | |||
} | |||
type Body struct { | |||
Count int | |||
LogStores []string | |||
} | |||
body := &Body{} | |||
err = json.Unmarshal(buf, body) | |||
if err != nil { | |||
return | |||
} | |||
storeNames = body.LogStores | |||
return | |||
} | |||
// GetLogStore returns logstore according by logstore name. | |||
func (p *LogProject) GetLogStore(name string) (s *LogStore, err error) { | |||
h := map[string]string{ | |||
"x-sls-bodyrawsize": "0", | |||
} | |||
r, err := request(p, "GET", "/logstores/"+name, h, nil) | |||
if err != nil { | |||
return | |||
} | |||
buf, err := ioutil.ReadAll(r.Body) | |||
if err != nil { | |||
return | |||
} | |||
if r.StatusCode != http.StatusOK { | |||
errMsg := &errorMessage{} | |||
err = json.Unmarshal(buf, errMsg) | |||
if err != nil { | |||
err = fmt.Errorf("failed to get logstore") | |||
dump, _ := httputil.DumpResponse(r, true) | |||
fmt.Printf("%s\n", dump) | |||
return | |||
} | |||
err = fmt.Errorf("%v:%v", errMsg.Code, errMsg.Message) | |||
return | |||
} | |||
s = &LogStore{} | |||
err = json.Unmarshal(buf, s) | |||
if err != nil { | |||
return | |||
} | |||
s.project = p | |||
return | |||
} | |||
// CreateLogStore creates a new logstore in SLS, | |||
// where name is logstore name, | |||
// and ttl is time-to-live(in day) of logs, | |||
// and shardCnt is the number of shards. | |||
func (p *LogProject) CreateLogStore(name string, ttl, shardCnt int) (err error) { | |||
type Body struct { | |||
Name string `json:"logstoreName"` | |||
TTL int `json:"ttl"` | |||
ShardCount int `json:"shardCount"` | |||
} | |||
store := &Body{ | |||
Name: name, | |||
TTL: ttl, | |||
ShardCount: shardCnt, | |||
} | |||
body, err := json.Marshal(store) | |||
if err != nil { | |||
return | |||
} | |||
h := map[string]string{ | |||
"x-sls-bodyrawsize": fmt.Sprintf("%v", len(body)), | |||
"Content-Type": "application/json", | |||
"Accept-Encoding": "deflate", // TODO: support lz4 | |||
} | |||
r, err := request(p, "POST", "/logstores", h, body) | |||
if err != nil { | |||
return | |||
} | |||
body, err = ioutil.ReadAll(r.Body) | |||
if err != nil { | |||
return | |||
} | |||
if r.StatusCode != http.StatusOK { | |||
errMsg := &errorMessage{} | |||
err = json.Unmarshal(body, errMsg) | |||
if err != nil { | |||
err = fmt.Errorf("failed to create logstore") | |||
dump, _ := httputil.DumpResponse(r, true) | |||
fmt.Printf("%s\n", dump) | |||
return | |||
} | |||
err = fmt.Errorf("%v:%v", errMsg.Code, errMsg.Message) | |||
return | |||
} | |||
return | |||
} | |||
// DeleteLogStore deletes a logstore according by logstore name. | |||
func (p *LogProject) DeleteLogStore(name string) (err error) { | |||
h := map[string]string{ | |||
"x-sls-bodyrawsize": "0", | |||
} | |||
r, err := request(p, "DELETE", "/logstores/"+name, h, nil) | |||
if err != nil { | |||
return | |||
} | |||
body, err := ioutil.ReadAll(r.Body) | |||
if err != nil { | |||
return | |||
} | |||
if r.StatusCode != http.StatusOK { | |||
errMsg := &errorMessage{} | |||
err = json.Unmarshal(body, errMsg) | |||
if err != nil { | |||
err = fmt.Errorf("failed to delete logstore") | |||
dump, _ := httputil.DumpResponse(r, true) | |||
fmt.Printf("%s\n", dump) | |||
return | |||
} | |||
err = fmt.Errorf("%v:%v", errMsg.Code, errMsg.Message) | |||
return | |||
} | |||
return | |||
} | |||
// UpdateLogStore updates a logstore according by logstore name, | |||
// obviously we can't modify the logstore name itself. | |||
func (p *LogProject) UpdateLogStore(name string, ttl, shardCnt int) (err error) { | |||
type Body struct { | |||
Name string `json:"logstoreName"` | |||
TTL int `json:"ttl"` | |||
ShardCount int `json:"shardCount"` | |||
} | |||
store := &Body{ | |||
Name: name, | |||
TTL: ttl, | |||
ShardCount: shardCnt, | |||
} | |||
body, err := json.Marshal(store) | |||
if err != nil { | |||
return | |||
} | |||
h := map[string]string{ | |||
"x-sls-bodyrawsize": fmt.Sprintf("%v", len(body)), | |||
"Content-Type": "application/json", | |||
"Accept-Encoding": "deflate", // TODO: support lz4 | |||
} | |||
r, err := request(p, "PUT", "/logstores", h, body) | |||
if err != nil { | |||
return | |||
} | |||
body, err = ioutil.ReadAll(r.Body) | |||
if err != nil { | |||
return | |||
} | |||
if r.StatusCode != http.StatusOK { | |||
errMsg := &errorMessage{} | |||
err = json.Unmarshal(body, errMsg) | |||
if err != nil { | |||
err = fmt.Errorf("failed to update logstore") | |||
dump, _ := httputil.DumpResponse(r, true) | |||
fmt.Printf("%s\n", dump) | |||
return | |||
} | |||
err = fmt.Errorf("%v:%v", errMsg.Code, errMsg.Message) | |||
return | |||
} | |||
return | |||
} | |||
// ListMachineGroup returns machine group name list and the total number of machine groups. | |||
// The offset starts from 0 and the size is the max number of machine groups could be returned. | |||
func (p *LogProject) ListMachineGroup(offset, size int) (m []string, total int, err error) { | |||
h := map[string]string{ | |||
"x-sls-bodyrawsize": "0", | |||
} | |||
if size <= 0 { | |||
size = 500 | |||
} | |||
uri := fmt.Sprintf("/machinegroups?offset=%v&size=%v", offset, size) | |||
r, err := request(p, "GET", uri, h, nil) | |||
if err != nil { | |||
return | |||
} | |||
buf, err := ioutil.ReadAll(r.Body) | |||
if err != nil { | |||
return | |||
} | |||
if r.StatusCode != http.StatusOK { | |||
errMsg := &errorMessage{} | |||
err = json.Unmarshal(buf, errMsg) | |||
if err != nil { | |||
err = fmt.Errorf("failed to list machine group") | |||
dump, _ := httputil.DumpResponse(r, true) | |||
fmt.Printf("%s\n", dump) | |||
return | |||
} | |||
err = fmt.Errorf("%v:%v", errMsg.Code, errMsg.Message) | |||
return | |||
} | |||
type Body struct { | |||
MachineGroups []string | |||
Count int | |||
Total int | |||
} | |||
body := &Body{} | |||
err = json.Unmarshal(buf, body) | |||
if err != nil { | |||
return | |||
} | |||
m = body.MachineGroups | |||
total = body.Total | |||
return | |||
} | |||
// GetMachineGroup retruns machine group according by machine group name. | |||
func (p *LogProject) GetMachineGroup(name string) (m *MachineGroup, err error) { | |||
h := map[string]string{ | |||
"x-sls-bodyrawsize": "0", | |||
} | |||
r, err := request(p, "GET", "/machinegroups/"+name, h, nil) | |||
if err != nil { | |||
return | |||
} | |||
buf, err := ioutil.ReadAll(r.Body) | |||
if err != nil { | |||
return | |||
} | |||
if r.StatusCode != http.StatusOK { | |||
errMsg := &errorMessage{} | |||
err = json.Unmarshal(buf, errMsg) | |||
if err != nil { | |||
err = fmt.Errorf("failed to get machine group:%v", name) | |||
dump, _ := httputil.DumpResponse(r, true) | |||
fmt.Printf("%s\n", dump) | |||
return | |||
} | |||
err = fmt.Errorf("%v:%v", errMsg.Code, errMsg.Message) | |||
return | |||
} | |||
m = &MachineGroup{} | |||
err = json.Unmarshal(buf, m) | |||
if err != nil { | |||
return | |||
} | |||
m.project = p | |||
return | |||
} | |||
// CreateMachineGroup creates a new machine group in SLS. | |||
func (p *LogProject) CreateMachineGroup(m *MachineGroup) (err error) { | |||
body, err := json.Marshal(m) | |||
if err != nil { | |||
return | |||
} | |||
h := map[string]string{ | |||
"x-sls-bodyrawsize": fmt.Sprintf("%v", len(body)), | |||
"Content-Type": "application/json", | |||
"Accept-Encoding": "deflate", // TODO: support lz4 | |||
} | |||
r, err := request(p, "POST", "/machinegroups", h, body) | |||
if err != nil { | |||
return | |||
} | |||
body, err = ioutil.ReadAll(r.Body) | |||
if err != nil { | |||
return | |||
} | |||
if r.StatusCode != http.StatusOK { | |||
errMsg := &errorMessage{} | |||
err = json.Unmarshal(body, errMsg) | |||
if err != nil { | |||
err = fmt.Errorf("failed to create machine group") | |||
dump, _ := httputil.DumpResponse(r, true) | |||
fmt.Printf("%s\n", dump) | |||
return | |||
} | |||
err = fmt.Errorf("%v:%v", errMsg.Code, errMsg.Message) | |||
return | |||
} | |||
return | |||
} | |||
// UpdateMachineGroup updates a machine group. | |||
func (p *LogProject) UpdateMachineGroup(m *MachineGroup) (err error) { | |||
body, err := json.Marshal(m) | |||
if err != nil { | |||
return | |||
} | |||
h := map[string]string{ | |||
"x-sls-bodyrawsize": fmt.Sprintf("%v", len(body)), | |||
"Content-Type": "application/json", | |||
"Accept-Encoding": "deflate", // TODO: support lz4 | |||
} | |||
r, err := request(p, "PUT", "/machinegroups/"+m.Name, h, body) | |||
if err != nil { | |||
return | |||
} | |||
body, err = ioutil.ReadAll(r.Body) | |||
if err != nil { | |||
return | |||
} | |||
if r.StatusCode != http.StatusOK { | |||
errMsg := &errorMessage{} | |||
err = json.Unmarshal(body, errMsg) | |||
if err != nil { | |||
err = fmt.Errorf("failed to update machine group") | |||
dump, _ := httputil.DumpResponse(r, true) | |||
fmt.Printf("%s\n", dump) | |||
return | |||
} | |||
err = fmt.Errorf("%v:%v", errMsg.Code, errMsg.Message) | |||
return | |||
} | |||
return | |||
} | |||
// DeleteMachineGroup deletes machine group according machine group name. | |||
func (p *LogProject) DeleteMachineGroup(name string) (err error) { | |||
h := map[string]string{ | |||
"x-sls-bodyrawsize": "0", | |||
} | |||
r, err := request(p, "DELETE", "/machinegroups/"+name, h, nil) | |||
if err != nil { | |||
return | |||
} | |||
body, err := ioutil.ReadAll(r.Body) | |||
if err != nil { | |||
return | |||
} | |||
if r.StatusCode != http.StatusOK { | |||
errMsg := &errorMessage{} | |||
err = json.Unmarshal(body, errMsg) | |||
if err != nil { | |||
err = fmt.Errorf("failed to delete machine group") | |||
dump, _ := httputil.DumpResponse(r, true) | |||
fmt.Printf("%s\n", dump) | |||
return | |||
} | |||
err = fmt.Errorf("%v:%v", errMsg.Code, errMsg.Message) | |||
return | |||
} | |||
return | |||
} | |||
// ListConfig returns config names list and the total number of configs. | |||
// The offset starts from 0 and the size is the max number of configs could be returned. | |||
func (p *LogProject) ListConfig(offset, size int) (cfgNames []string, total int, err error) { | |||
h := map[string]string{ | |||
"x-sls-bodyrawsize": "0", | |||
} | |||
if size <= 0 { | |||
size = 100 | |||
} | |||
uri := fmt.Sprintf("/configs?offset=%v&size=%v", offset, size) | |||
r, err := request(p, "GET", uri, h, nil) | |||
if err != nil { | |||
return | |||
} | |||
buf, err := ioutil.ReadAll(r.Body) | |||
if err != nil { | |||
return | |||
} | |||
if r.StatusCode != http.StatusOK { | |||
errMsg := &errorMessage{} | |||
err = json.Unmarshal(buf, errMsg) | |||
if err != nil { | |||
err = fmt.Errorf("failed to delete machine group") | |||
dump, _ := httputil.DumpResponse(r, true) | |||
fmt.Printf("%s\n", dump) | |||
return | |||
} | |||
err = fmt.Errorf("%v:%v", errMsg.Code, errMsg.Message) | |||
return | |||
} | |||
type Body struct { | |||
Total int | |||
Configs []string | |||
} | |||
body := &Body{} | |||
err = json.Unmarshal(buf, body) | |||
if err != nil { | |||
return | |||
} | |||
cfgNames = body.Configs | |||
total = body.Total | |||
return | |||
} | |||
// GetConfig returns config according by config name. | |||
func (p *LogProject) GetConfig(name string) (c *LogConfig, err error) { | |||
h := map[string]string{ | |||
"x-sls-bodyrawsize": "0", | |||
} | |||
r, err := request(p, "GET", "/configs/"+name, h, nil) | |||
if err != nil { | |||
return | |||
} | |||
buf, err := ioutil.ReadAll(r.Body) | |||
if err != nil { | |||
return | |||
} | |||
if r.StatusCode != http.StatusOK { | |||
errMsg := &errorMessage{} | |||
err = json.Unmarshal(buf, errMsg) | |||
if err != nil { | |||
err = fmt.Errorf("failed to delete config") | |||
dump, _ := httputil.DumpResponse(r, true) | |||
fmt.Printf("%s\n", dump) | |||
return | |||
} | |||
err = fmt.Errorf("%v:%v", errMsg.Code, errMsg.Message) | |||
return | |||
} | |||
c = &LogConfig{} | |||
err = json.Unmarshal(buf, c) | |||
if err != nil { | |||
return | |||
} | |||
c.project = p | |||
return | |||
} | |||
// UpdateConfig updates a config. | |||
func (p *LogProject) UpdateConfig(c *LogConfig) (err error) { | |||
body, err := json.Marshal(c) | |||
if err != nil { | |||
return | |||
} | |||
h := map[string]string{ | |||
"x-sls-bodyrawsize": fmt.Sprintf("%v", len(body)), | |||
"Content-Type": "application/json", | |||
"Accept-Encoding": "deflate", // TODO: support lz4 | |||
} | |||
r, err := request(p, "PUT", "/configs/"+c.Name, h, body) | |||
if err != nil { | |||
return | |||
} | |||
body, err = ioutil.ReadAll(r.Body) | |||
if err != nil { | |||
return | |||
} | |||
if r.StatusCode != http.StatusOK { | |||
errMsg := &errorMessage{} | |||
err = json.Unmarshal(body, errMsg) | |||
if err != nil { | |||
err = fmt.Errorf("failed to update config") | |||
dump, _ := httputil.DumpResponse(r, true) | |||
fmt.Printf("%s\n", dump) | |||
return | |||
} | |||
err = fmt.Errorf("%v:%v", errMsg.Code, errMsg.Message) | |||
return | |||
} | |||
return | |||
} | |||
// CreateConfig creates a new config in SLS. | |||
func (p *LogProject) CreateConfig(c *LogConfig) (err error) { | |||
body, err := json.Marshal(c) | |||
if err != nil { | |||
return | |||
} | |||
h := map[string]string{ | |||
"x-sls-bodyrawsize": fmt.Sprintf("%v", len(body)), | |||
"Content-Type": "application/json", | |||
"Accept-Encoding": "deflate", // TODO: support lz4 | |||
} | |||
r, err := request(p, "POST", "/configs", h, body) | |||
if err != nil { | |||
return | |||
} | |||
body, err = ioutil.ReadAll(r.Body) | |||
if err != nil { | |||
return | |||
} | |||
if r.StatusCode != http.StatusOK { | |||
errMsg := &errorMessage{} | |||
err = json.Unmarshal(body, errMsg) | |||
if err != nil { | |||
err = fmt.Errorf("failed to update config") | |||
dump, _ := httputil.DumpResponse(r, true) | |||
fmt.Printf("%s\n", dump) | |||
return | |||
} | |||
err = fmt.Errorf("%v:%v", errMsg.Code, errMsg.Message) | |||
return | |||
} | |||
return | |||
} | |||
// DeleteConfig deletes a config according by config name. | |||
func (p *LogProject) DeleteConfig(name string) (err error) { | |||
h := map[string]string{ | |||
"x-sls-bodyrawsize": "0", | |||
} | |||
r, err := request(p, "DELETE", "/configs/"+name, h, nil) | |||
if err != nil { | |||
return | |||
} | |||
body, err := ioutil.ReadAll(r.Body) | |||
if err != nil { | |||
return | |||
} | |||
if r.StatusCode != http.StatusOK { | |||
errMsg := &errorMessage{} | |||
err = json.Unmarshal(body, errMsg) | |||
if err != nil { | |||
err = fmt.Errorf("failed to delete config") | |||
dump, _ := httputil.DumpResponse(r, true) | |||
fmt.Printf("%s\n", dump) | |||
return | |||
} | |||
err = fmt.Errorf("%v:%v", errMsg.Code, errMsg.Message) | |||
return | |||
} | |||
return | |||
} | |||
// GetAppliedMachineGroups returns applied machine group names list according config name. | |||
func (p *LogProject) GetAppliedMachineGroups(confName string) (groupNames []string, err error) { | |||
h := map[string]string{ | |||
"x-sls-bodyrawsize": "0", | |||
} | |||
uri := fmt.Sprintf("/configs/%v/machinegroups", confName) | |||
r, err := request(p, "GET", uri, h, nil) | |||
if err != nil { | |||
return | |||
} | |||
buf, err := ioutil.ReadAll(r.Body) | |||
if err != nil { | |||
return | |||
} | |||
if r.StatusCode != http.StatusOK { | |||
errMsg := &errorMessage{} | |||
err = json.Unmarshal(buf, errMsg) | |||
if err != nil { | |||
err = fmt.Errorf("failed to get applied machine groups") | |||
dump, _ := httputil.DumpResponse(r, true) | |||
fmt.Printf("%s\n", dump) | |||
return | |||
} | |||
err = fmt.Errorf("%v:%v", errMsg.Code, errMsg.Message) | |||
return | |||
} | |||
type Body struct { | |||
Count int | |||
Machinegroups []string | |||
} | |||
body := &Body{} | |||
err = json.Unmarshal(buf, body) | |||
if err != nil { | |||
return | |||
} | |||
groupNames = body.Machinegroups | |||
return | |||
} | |||
// GetAppliedConfigs returns applied config names list according machine group name groupName. | |||
func (p *LogProject) GetAppliedConfigs(groupName string) (confNames []string, err error) { | |||
h := map[string]string{ | |||
"x-sls-bodyrawsize": "0", | |||
} | |||
uri := fmt.Sprintf("/machinegroups/%v/configs", groupName) | |||
r, err := request(p, "GET", uri, h, nil) | |||
if err != nil { | |||
return | |||
} | |||
buf, err := ioutil.ReadAll(r.Body) | |||
if err != nil { | |||
return | |||
} | |||
if r.StatusCode != http.StatusOK { | |||
errMsg := &errorMessage{} | |||
err = json.Unmarshal(buf, errMsg) | |||
if err != nil { | |||
err = fmt.Errorf("failed to applied configs") | |||
dump, _ := httputil.DumpResponse(r, true) | |||
fmt.Printf("%s\n", dump) | |||
return | |||
} | |||
err = fmt.Errorf("%v:%v", errMsg.Code, errMsg.Message) | |||
return | |||
} | |||
type Cfg struct { | |||
Count int `json:"count"` | |||
Configs []string `json:"configs"` | |||
} | |||
body := &Cfg{} | |||
err = json.Unmarshal(buf, body) | |||
if err != nil { | |||
return | |||
} | |||
confNames = body.Configs | |||
return | |||
} | |||
// ApplyConfigToMachineGroup applies config to machine group. | |||
func (p *LogProject) ApplyConfigToMachineGroup(confName, groupName string) (err error) { | |||
h := map[string]string{ | |||
"x-sls-bodyrawsize": "0", | |||
} | |||
uri := fmt.Sprintf("/machinegroups/%v/configs/%v", groupName, confName) | |||
r, err := request(p, "PUT", uri, h, nil) | |||
if err != nil { | |||
return | |||
} | |||
buf, err := ioutil.ReadAll(r.Body) | |||
if err != nil { | |||
return | |||
} | |||
if r.StatusCode != http.StatusOK { | |||
errMsg := &errorMessage{} | |||
err = json.Unmarshal(buf, errMsg) | |||
if err != nil { | |||
err = fmt.Errorf("failed to apply config to machine group") | |||
dump, _ := httputil.DumpResponse(r, true) | |||
fmt.Printf("%s\n", dump) | |||
return | |||
} | |||
err = fmt.Errorf("%v:%v", errMsg.Code, errMsg.Message) | |||
return | |||
} | |||
return | |||
} | |||
// RemoveConfigFromMachineGroup removes config from machine group. | |||
func (p *LogProject) RemoveConfigFromMachineGroup(confName, groupName string) (err error) { | |||
h := map[string]string{ | |||
"x-sls-bodyrawsize": "0", | |||
} | |||
uri := fmt.Sprintf("/machinegroups/%v/configs/%v", groupName, confName) | |||
r, err := request(p, "DELETE", uri, h, nil) | |||
if err != nil { | |||
return | |||
} | |||
buf, err := ioutil.ReadAll(r.Body) | |||
if err != nil { | |||
return | |||
} | |||
if r.StatusCode != http.StatusOK { | |||
errMsg := &errorMessage{} | |||
err = json.Unmarshal(buf, errMsg) | |||
if err != nil { | |||
err = fmt.Errorf("failed to remove config from machine group") | |||
dump, _ := httputil.DumpResponse(r, true) | |||
fmt.Printf("%s\n", dump) | |||
return | |||
} | |||
err = fmt.Errorf("%v:%v", errMsg.Code, errMsg.Message) | |||
return | |||
} | |||
return | |||
} |
@@ -0,0 +1,271 @@ | |||
package alils | |||
import ( | |||
"encoding/json" | |||
"fmt" | |||
"io/ioutil" | |||
"net/http" | |||
"net/http/httputil" | |||
"strconv" | |||
lz4 "github.com/cloudflare/golz4" | |||
"github.com/gogo/protobuf/proto" | |||
) | |||
// LogStore Store the logs | |||
type LogStore struct { | |||
Name string `json:"logstoreName"` | |||
TTL int | |||
ShardCount int | |||
CreateTime uint32 | |||
LastModifyTime uint32 | |||
project *LogProject | |||
} | |||
// Shard define the Log Shard | |||
type Shard struct { | |||
ShardID int `json:"shardID"` | |||
} | |||
// ListShards returns shard id list of this logstore. | |||
func (s *LogStore) ListShards() (shardIDs []int, err error) { | |||
h := map[string]string{ | |||
"x-sls-bodyrawsize": "0", | |||
} | |||
uri := fmt.Sprintf("/logstores/%v/shards", s.Name) | |||
r, err := request(s.project, "GET", uri, h, nil) | |||
if err != nil { | |||
return | |||
} | |||
buf, err := ioutil.ReadAll(r.Body) | |||
if err != nil { | |||
return | |||
} | |||
if r.StatusCode != http.StatusOK { | |||
errMsg := &errorMessage{} | |||
err = json.Unmarshal(buf, errMsg) | |||
if err != nil { | |||
err = fmt.Errorf("failed to list logstore") | |||
dump, _ := httputil.DumpResponse(r, true) | |||
fmt.Println(dump) | |||
return | |||
} | |||
err = fmt.Errorf("%v:%v", errMsg.Code, errMsg.Message) | |||
return | |||
} | |||
var shards []*Shard | |||
err = json.Unmarshal(buf, &shards) | |||
if err != nil { | |||
return | |||
} | |||
for _, v := range shards { | |||
shardIDs = append(shardIDs, v.ShardID) | |||
} | |||
return | |||
} | |||
// PutLogs put logs into logstore. | |||
// The callers should transform user logs into LogGroup. | |||
func (s *LogStore) PutLogs(lg *LogGroup) (err error) { | |||
body, err := proto.Marshal(lg) | |||
if err != nil { | |||
return | |||
} | |||
// Compresse body with lz4 | |||
out := make([]byte, lz4.CompressBound(body)) | |||
n, err := lz4.Compress(body, out) | |||
if err != nil { | |||
return | |||
} | |||
h := map[string]string{ | |||
"x-sls-compresstype": "lz4", | |||
"x-sls-bodyrawsize": fmt.Sprintf("%v", len(body)), | |||
"Content-Type": "application/x-protobuf", | |||
} | |||
uri := fmt.Sprintf("/logstores/%v", s.Name) | |||
r, err := request(s.project, "POST", uri, h, out[:n]) | |||
if err != nil { | |||
return | |||
} | |||
buf, err := ioutil.ReadAll(r.Body) | |||
if err != nil { | |||
return | |||
} | |||
if r.StatusCode != http.StatusOK { | |||
errMsg := &errorMessage{} | |||
err = json.Unmarshal(buf, errMsg) | |||
if err != nil { | |||
err = fmt.Errorf("failed to put logs") | |||
dump, _ := httputil.DumpResponse(r, true) | |||
fmt.Println(dump) | |||
return | |||
} | |||
err = fmt.Errorf("%v:%v", errMsg.Code, errMsg.Message) | |||
return | |||
} | |||
return | |||
} | |||
// GetCursor gets log cursor of one shard specified by shardID. | |||
// The from can be in three form: a) unix timestamp in seccond, b) "begin", c) "end". | |||
// For more detail please read: http://gitlab.alibaba-inc.com/sls/doc/blob/master/api/shard.md#logstore | |||
func (s *LogStore) GetCursor(shardID int, from string) (cursor string, err error) { | |||
h := map[string]string{ | |||
"x-sls-bodyrawsize": "0", | |||
} | |||
uri := fmt.Sprintf("/logstores/%v/shards/%v?type=cursor&from=%v", | |||
s.Name, shardID, from) | |||
r, err := request(s.project, "GET", uri, h, nil) | |||
if err != nil { | |||
return | |||
} | |||
buf, err := ioutil.ReadAll(r.Body) | |||
if err != nil { | |||
return | |||
} | |||
if r.StatusCode != http.StatusOK { | |||
errMsg := &errorMessage{} | |||
err = json.Unmarshal(buf, errMsg) | |||
if err != nil { | |||
err = fmt.Errorf("failed to get cursor") | |||
dump, _ := httputil.DumpResponse(r, true) | |||
fmt.Println(dump) | |||
return | |||
} | |||
err = fmt.Errorf("%v:%v", errMsg.Code, errMsg.Message) | |||
return | |||
} | |||
type Body struct { | |||
Cursor string | |||
} | |||
body := &Body{} | |||
err = json.Unmarshal(buf, body) | |||
if err != nil { | |||
return | |||
} | |||
cursor = body.Cursor | |||
return | |||
} | |||
// GetLogsBytes gets logs binary data from shard specified by shardID according cursor. | |||
// The logGroupMaxCount is the max number of logGroup could be returned. | |||
// The nextCursor is the next curosr can be used to read logs at next time. | |||
func (s *LogStore) GetLogsBytes(shardID int, cursor string, | |||
logGroupMaxCount int) (out []byte, nextCursor string, err error) { | |||
h := map[string]string{ | |||
"x-sls-bodyrawsize": "0", | |||
"Accept": "application/x-protobuf", | |||
"Accept-Encoding": "lz4", | |||
} | |||
uri := fmt.Sprintf("/logstores/%v/shards/%v?type=logs&cursor=%v&count=%v", | |||
s.Name, shardID, cursor, logGroupMaxCount) | |||
r, err := request(s.project, "GET", uri, h, nil) | |||
if err != nil { | |||
return | |||
} | |||
buf, err := ioutil.ReadAll(r.Body) | |||
if err != nil { | |||
return | |||
} | |||
if r.StatusCode != http.StatusOK { | |||
errMsg := &errorMessage{} | |||
err = json.Unmarshal(buf, errMsg) | |||
if err != nil { | |||
err = fmt.Errorf("failed to get cursor") | |||
dump, _ := httputil.DumpResponse(r, true) | |||
fmt.Println(dump) | |||
return | |||
} | |||
err = fmt.Errorf("%v:%v", errMsg.Code, errMsg.Message) | |||
return | |||
} | |||
v, ok := r.Header["X-Sls-Compresstype"] | |||
if !ok || len(v) == 0 { | |||
err = fmt.Errorf("can't find 'x-sls-compresstype' header") | |||
return | |||
} | |||
if v[0] != "lz4" { | |||
err = fmt.Errorf("unexpected compress type:%v", v[0]) | |||
return | |||
} | |||
v, ok = r.Header["X-Sls-Cursor"] | |||
if !ok || len(v) == 0 { | |||
err = fmt.Errorf("can't find 'x-sls-cursor' header") | |||
return | |||
} | |||
nextCursor = v[0] | |||
v, ok = r.Header["X-Sls-Bodyrawsize"] | |||
if !ok || len(v) == 0 { | |||
err = fmt.Errorf("can't find 'x-sls-bodyrawsize' header") | |||
return | |||
} | |||
bodyRawSize, err := strconv.Atoi(v[0]) | |||
if err != nil { | |||
return | |||
} | |||
out = make([]byte, bodyRawSize) | |||
err = lz4.Uncompress(buf, out) | |||
if err != nil { | |||
return | |||
} | |||
return | |||
} | |||
// LogsBytesDecode decodes logs binary data retruned by GetLogsBytes API | |||
func LogsBytesDecode(data []byte) (gl *LogGroupList, err error) { | |||
gl = &LogGroupList{} | |||
err = proto.Unmarshal(data, gl) | |||
if err != nil { | |||
return | |||
} | |||
return | |||
} | |||
// GetLogs gets logs from shard specified by shardID according cursor. | |||
// The logGroupMaxCount is the max number of logGroup could be returned. | |||
// The nextCursor is the next curosr can be used to read logs at next time. | |||
func (s *LogStore) GetLogs(shardID int, cursor string, | |||
logGroupMaxCount int) (gl *LogGroupList, nextCursor string, err error) { | |||
out, nextCursor, err := s.GetLogsBytes(shardID, cursor, logGroupMaxCount) | |||
if err != nil { | |||
return | |||
} | |||
gl, err = LogsBytesDecode(out) | |||
if err != nil { | |||
return | |||
} | |||
return | |||
} |
@@ -0,0 +1,91 @@ | |||
package alils | |||
import ( | |||
"encoding/json" | |||
"fmt" | |||
"io/ioutil" | |||
"net/http" | |||
"net/http/httputil" | |||
) | |||
// MachineGroupAttribute define the Attribute | |||
type MachineGroupAttribute struct { | |||
ExternalName string `json:"externalName"` | |||
TopicName string `json:"groupTopic"` | |||
} | |||
// MachineGroup define the machine Group | |||
type MachineGroup struct { | |||
Name string `json:"groupName"` | |||
Type string `json:"groupType"` | |||
MachineIDType string `json:"machineIdentifyType"` | |||
MachineIDList []string `json:"machineList"` | |||
Attribute MachineGroupAttribute `json:"groupAttribute"` | |||
CreateTime uint32 | |||
LastModifyTime uint32 | |||
project *LogProject | |||
} | |||
// Machine define the Machine | |||
type Machine struct { | |||
IP string | |||
UniqueID string `json:"machine-uniqueid"` | |||
UserdefinedID string `json:"userdefined-id"` | |||
} | |||
// MachineList define the Machine List | |||
type MachineList struct { | |||
Total int | |||
Machines []*Machine | |||
} | |||
// ListMachines returns machine list of this machine group. | |||
func (m *MachineGroup) ListMachines() (ms []*Machine, total int, err error) { | |||
h := map[string]string{ | |||
"x-sls-bodyrawsize": "0", | |||
} | |||
uri := fmt.Sprintf("/machinegroups/%v/machines", m.Name) | |||
r, err := request(m.project, "GET", uri, h, nil) | |||
if err != nil { | |||
return | |||
} | |||
buf, err := ioutil.ReadAll(r.Body) | |||
if err != nil { | |||
return | |||
} | |||
if r.StatusCode != http.StatusOK { | |||
errMsg := &errorMessage{} | |||
err = json.Unmarshal(buf, errMsg) | |||
if err != nil { | |||
err = fmt.Errorf("failed to remove config from machine group") | |||
dump, _ := httputil.DumpResponse(r, true) | |||
fmt.Println(dump) | |||
return | |||
} | |||
err = fmt.Errorf("%v:%v", errMsg.Code, errMsg.Message) | |||
return | |||
} | |||
body := &MachineList{} | |||
err = json.Unmarshal(buf, body) | |||
if err != nil { | |||
return | |||
} | |||
ms = body.Machines | |||
total = body.Total | |||
return | |||
} | |||
// GetAppliedConfigs returns applied configs of this machine group. | |||
func (m *MachineGroup) GetAppliedConfigs() (confNames []string, err error) { | |||
confNames, err = m.project.GetAppliedConfigs(m.Name) | |||
return | |||
} |
@@ -0,0 +1,62 @@ | |||
package alils | |||
import ( | |||
"bytes" | |||
"crypto/md5" | |||
"fmt" | |||
"net/http" | |||
) | |||
// request sends a request to SLS. | |||
func request(project *LogProject, method, uri string, headers map[string]string, | |||
body []byte) (resp *http.Response, err error) { | |||
// The caller should provide 'x-sls-bodyrawsize' header | |||
if _, ok := headers["x-sls-bodyrawsize"]; !ok { | |||
err = fmt.Errorf("Can't find 'x-sls-bodyrawsize' header") | |||
return | |||
} | |||
// SLS public request headers | |||
headers["Host"] = project.Name + "." + project.Endpoint | |||
headers["Date"] = nowRFC1123() | |||
headers["x-sls-apiversion"] = version | |||
headers["x-sls-signaturemethod"] = signatureMethod | |||
if body != nil { | |||
bodyMD5 := fmt.Sprintf("%X", md5.Sum(body)) | |||
headers["Content-MD5"] = bodyMD5 | |||
if _, ok := headers["Content-Type"]; !ok { | |||
err = fmt.Errorf("Can't find 'Content-Type' header") | |||
return | |||
} | |||
} | |||
// Calc Authorization | |||
// Authorization = "SLS <AccessKeyID>:<Signature>" | |||
digest, err := signature(project, method, uri, headers) | |||
if err != nil { | |||
return | |||
} | |||
auth := fmt.Sprintf("SLS %v:%v", project.AccessKeyID, digest) | |||
headers["Authorization"] = auth | |||
// Initialize http request | |||
reader := bytes.NewReader(body) | |||
urlStr := fmt.Sprintf("http://%v.%v%v", project.Name, project.Endpoint, uri) | |||
req, err := http.NewRequest(method, urlStr, reader) | |||
if err != nil { | |||
return | |||
} | |||
for k, v := range headers { | |||
req.Header.Add(k, v) | |||
} | |||
// Get ready to do request | |||
resp, err = http.DefaultClient.Do(req) | |||
if err != nil { | |||
return | |||
} | |||
return | |||
} |
@@ -0,0 +1,111 @@ | |||
package alils | |||
import ( | |||
"crypto/hmac" | |||
"crypto/sha1" | |||
"encoding/base64" | |||
"fmt" | |||
"net/url" | |||
"sort" | |||
"strings" | |||
"time" | |||
) | |||
// GMT location | |||
var gmtLoc = time.FixedZone("GMT", 0) | |||
// NowRFC1123 returns now time in RFC1123 format with GMT timezone, | |||
// eg. "Mon, 02 Jan 2006 15:04:05 GMT". | |||
func nowRFC1123() string { | |||
return time.Now().In(gmtLoc).Format(time.RFC1123) | |||
} | |||
// signature calculates a request's signature digest. | |||
func signature(project *LogProject, method, uri string, | |||
headers map[string]string) (digest string, err error) { | |||
var contentMD5, contentType, date, canoHeaders, canoResource string | |||
var slsHeaderKeys sort.StringSlice | |||
// SignString = VERB + "\n" | |||
// + CONTENT-MD5 + "\n" | |||
// + CONTENT-TYPE + "\n" | |||
// + DATE + "\n" | |||
// + CanonicalizedSLSHeaders + "\n" | |||
// + CanonicalizedResource | |||
if val, ok := headers["Content-MD5"]; ok { | |||
contentMD5 = val | |||
} | |||
if val, ok := headers["Content-Type"]; ok { | |||
contentType = val | |||
} | |||
date, ok := headers["Date"] | |||
if !ok { | |||
err = fmt.Errorf("Can't find 'Date' header") | |||
return | |||
} | |||
// Calc CanonicalizedSLSHeaders | |||
slsHeaders := make(map[string]string, len(headers)) | |||
for k, v := range headers { | |||
l := strings.TrimSpace(strings.ToLower(k)) | |||
if strings.HasPrefix(l, "x-sls-") { | |||
slsHeaders[l] = strings.TrimSpace(v) | |||
slsHeaderKeys = append(slsHeaderKeys, l) | |||
} | |||
} | |||
sort.Sort(slsHeaderKeys) | |||
for i, k := range slsHeaderKeys { | |||
canoHeaders += k + ":" + slsHeaders[k] | |||
if i+1 < len(slsHeaderKeys) { | |||
canoHeaders += "\n" | |||
} | |||
} | |||
// Calc CanonicalizedResource | |||
u, err := url.Parse(uri) | |||
if err != nil { | |||
return | |||
} | |||
canoResource += url.QueryEscape(u.Path) | |||
if u.RawQuery != "" { | |||
var keys sort.StringSlice | |||
vals := u.Query() | |||
for k := range vals { | |||
keys = append(keys, k) | |||
} | |||
sort.Sort(keys) | |||
canoResource += "?" | |||
for i, k := range keys { | |||
if i > 0 { | |||
canoResource += "&" | |||
} | |||
for _, v := range vals[k] { | |||
canoResource += k + "=" + v | |||
} | |||
} | |||
} | |||
signStr := method + "\n" + | |||
contentMD5 + "\n" + | |||
contentType + "\n" + | |||
date + "\n" + | |||
canoHeaders + "\n" + | |||
canoResource | |||
// Signature = base64(hmac-sha1(UTF8-Encoding-Of(SignString),AccessKeySecret)) | |||
mac := hmac.New(sha1.New, []byte(project.AccessKeySecret)) | |||
_, err = mac.Write([]byte(signStr)) | |||
if err != nil { | |||
return | |||
} | |||
digest = base64.StdEncoding.EncodeToString(mac.Sum(nil)) | |||
return | |||
} |
@@ -0,0 +1,117 @@ | |||
// Copyright 2014 beego Author. All Rights Reserved. | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package logs | |||
import ( | |||
"encoding/json" | |||
"io" | |||
"net" | |||
"time" | |||
) | |||
// connWriter implements LoggerInterface. | |||
// it writes messages in keep-live tcp connection. | |||
type connWriter struct { | |||
lg *logWriter | |||
innerWriter io.WriteCloser | |||
ReconnectOnMsg bool `json:"reconnectOnMsg"` | |||
Reconnect bool `json:"reconnect"` | |||
Net string `json:"net"` | |||
Addr string `json:"addr"` | |||
Level int `json:"level"` | |||
} | |||
// NewConn create new ConnWrite returning as LoggerInterface. | |||
func NewConn() Logger { | |||
conn := new(connWriter) | |||
conn.Level = LevelTrace | |||
return conn | |||
} | |||
// Init init connection writer with json config. | |||
// json config only need key "level". | |||
func (c *connWriter) Init(jsonConfig string) error { | |||
return json.Unmarshal([]byte(jsonConfig), c) | |||
} | |||
// WriteMsg write message in connection. | |||
// if connection is down, try to re-connect. | |||
func (c *connWriter) WriteMsg(when time.Time, msg string, level int) error { | |||
if level > c.Level { | |||
return nil | |||
} | |||
if c.needToConnectOnMsg() { | |||
err := c.connect() | |||
if err != nil { | |||
return err | |||
} | |||
} | |||
if c.ReconnectOnMsg { | |||
defer c.innerWriter.Close() | |||
} | |||
c.lg.writeln(when, msg) | |||
return nil | |||
} | |||
// Flush implementing method. empty. | |||
func (c *connWriter) Flush() { | |||
} | |||
// Destroy destroy connection writer and close tcp listener. | |||
func (c *connWriter) Destroy() { | |||
if c.innerWriter != nil { | |||
c.innerWriter.Close() | |||
} | |||
} | |||
func (c *connWriter) connect() error { | |||
if c.innerWriter != nil { | |||
c.innerWriter.Close() | |||
c.innerWriter = nil | |||
} | |||
conn, err := net.Dial(c.Net, c.Addr) | |||
if err != nil { | |||
return err | |||
} | |||
if tcpConn, ok := conn.(*net.TCPConn); ok { | |||
tcpConn.SetKeepAlive(true) | |||
} | |||
c.innerWriter = conn | |||
c.lg = newLogWriter(conn) | |||
return nil | |||
} | |||
func (c *connWriter) needToConnectOnMsg() bool { | |||
if c.Reconnect { | |||
c.Reconnect = false | |||
return true | |||
} | |||
if c.innerWriter == nil { | |||
return true | |||
} | |||
return c.ReconnectOnMsg | |||
} | |||
func init() { | |||
Register(AdapterConn, NewConn) | |||
} |
@@ -0,0 +1,25 @@ | |||
// Copyright 2014 beego Author. All Rights Reserved. | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package logs | |||
import ( | |||
"testing" | |||
) | |||
func TestConn(t *testing.T) { | |||
log := NewLogger(1000) | |||
log.SetLogger("conn", `{"net":"tcp","addr":":7020"}`) | |||
log.Informational("informational") | |||
} |
@@ -0,0 +1,99 @@ | |||
// Copyright 2014 beego Author. All Rights Reserved. | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package logs | |||
import ( | |||
"encoding/json" | |||
"os" | |||
"strings" | |||
"time" | |||
"github.com/shiena/ansicolor" | |||
) | |||
// brush is a color join function | |||
type brush func(string) string | |||
// newBrush return a fix color Brush | |||
func newBrush(color string) brush { | |||
pre := "\033[" | |||
reset := "\033[0m" | |||
return func(text string) string { | |||
return pre + color + "m" + text + reset | |||
} | |||
} | |||
var colors = []brush{ | |||
newBrush("1;37"), // Emergency white | |||
newBrush("1;36"), // Alert cyan | |||
newBrush("1;35"), // Critical magenta | |||
newBrush("1;31"), // Error red | |||
newBrush("1;33"), // Warning yellow | |||
newBrush("1;32"), // Notice green | |||
newBrush("1;34"), // Informational blue | |||
newBrush("1;44"), // Debug Background blue | |||
} | |||
// consoleWriter implements LoggerInterface and writes messages to terminal. | |||
type consoleWriter struct { | |||
lg *logWriter | |||
Level int `json:"level"` | |||
Colorful bool `json:"color"` //this filed is useful only when system's terminal supports color | |||
} | |||
// NewConsole create ConsoleWriter returning as LoggerInterface. | |||
func NewConsole() Logger { | |||
cw := &consoleWriter{ | |||
lg: newLogWriter(ansicolor.NewAnsiColorWriter(os.Stdout)), | |||
Level: LevelDebug, | |||
Colorful: true, | |||
} | |||
return cw | |||
} | |||
// Init init console logger. | |||
// jsonConfig like '{"level":LevelTrace}'. | |||
func (c *consoleWriter) Init(jsonConfig string) error { | |||
if len(jsonConfig) == 0 { | |||
return nil | |||
} | |||
return json.Unmarshal([]byte(jsonConfig), c) | |||
} | |||
// WriteMsg write message in console. | |||
func (c *consoleWriter) WriteMsg(when time.Time, msg string, level int) error { | |||
if level > c.Level { | |||
return nil | |||
} | |||
if c.Colorful { | |||
msg = strings.Replace(msg, levelPrefix[level], colors[level](levelPrefix[level]), 1) | |||
} | |||
c.lg.writeln(when, msg) | |||
return nil | |||
} | |||
// Destroy implementing method. empty. | |||
func (c *consoleWriter) Destroy() { | |||
} | |||
// Flush implementing method. empty. | |||
func (c *consoleWriter) Flush() { | |||
} | |||
func init() { | |||
Register(AdapterConsole, NewConsole) | |||
} |
@@ -0,0 +1,51 @@ | |||
// Copyright 2014 beego Author. All Rights Reserved. | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package logs | |||
import ( | |||
"testing" | |||
) | |||
// Try each log level in decreasing order of priority. | |||
func testConsoleCalls(bl *BeeLogger) { | |||
bl.Emergency("emergency") | |||
bl.Alert("alert") | |||
bl.Critical("critical") | |||
bl.Error("error") | |||
bl.Warning("warning") | |||
bl.Notice("notice") | |||
bl.Informational("informational") | |||
bl.Debug("debug") | |||
} | |||
// Test console logging by visually comparing the lines being output with and | |||
// without a log level specification. | |||
func TestConsole(t *testing.T) { | |||
log1 := NewLogger(10000) | |||
log1.EnableFuncCallDepth(true) | |||
log1.SetLogger("console", "") | |||
testConsoleCalls(log1) | |||
log2 := NewLogger(100) | |||
log2.SetLogger("console", `{"level":3}`) | |||
testConsoleCalls(log2) | |||
} | |||
// Test console without color | |||
func TestConsoleNoColor(t *testing.T) { | |||
log := NewLogger(100) | |||
log.SetLogger("console", `{"color":false}`) | |||
testConsoleCalls(log) | |||
} |
@@ -0,0 +1,80 @@ | |||
package es | |||
import ( | |||
"encoding/json" | |||
"errors" | |||
"fmt" | |||
"net" | |||
"net/url" | |||
"time" | |||
"github.com/OwnLocal/goes" | |||
"github.com/astaxie/beego/logs" | |||
) | |||
// NewES return a LoggerInterface | |||
func NewES() logs.Logger { | |||
cw := &esLogger{ | |||
Level: logs.LevelDebug, | |||
} | |||
return cw | |||
} | |||
type esLogger struct { | |||
*goes.Client | |||
DSN string `json:"dsn"` | |||
Level int `json:"level"` | |||
} | |||
// {"dsn":"http://localhost:9200/","level":1} | |||
func (el *esLogger) Init(jsonconfig string) error { | |||
err := json.Unmarshal([]byte(jsonconfig), el) | |||
if err != nil { | |||
return err | |||
} | |||
if el.DSN == "" { | |||
return errors.New("empty dsn") | |||
} else if u, err := url.Parse(el.DSN); err != nil { | |||
return err | |||
} else if u.Path == "" { | |||
return errors.New("missing prefix") | |||
} else if host, port, err := net.SplitHostPort(u.Host); err != nil { | |||
return err | |||
} else { | |||
conn := goes.NewClient(host, port) | |||
el.Client = conn | |||
} | |||
return nil | |||
} | |||
// WriteMsg will write the msg and level into es | |||
func (el *esLogger) WriteMsg(when time.Time, msg string, level int) error { | |||
if level > el.Level { | |||
return nil | |||
} | |||
vals := make(map[string]interface{}) | |||
vals["@timestamp"] = when.Format(time.RFC3339) | |||
vals["@msg"] = msg | |||
d := goes.Document{ | |||
Index: fmt.Sprintf("%04d.%02d.%02d", when.Year(), when.Month(), when.Day()), | |||
Type: "logs", | |||
Fields: vals, | |||
} | |||
_, err := el.Index(d, nil) | |||
return err | |||
} | |||
// Destroy is a empty method | |||
func (el *esLogger) Destroy() { | |||
} | |||
// Flush is a empty method | |||
func (el *esLogger) Flush() { | |||
} | |||
func init() { | |||
logs.Register(logs.AdapterEs, NewES) | |||
} |
@@ -0,0 +1,405 @@ | |||
// Copyright 2014 beego Author. All Rights Reserved. | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package logs | |||
import ( | |||
"bytes" | |||
"encoding/json" | |||
"errors" | |||
"fmt" | |||
"io" | |||
"os" | |||
"path" | |||
"path/filepath" | |||
"strconv" | |||
"strings" | |||
"sync" | |||
"time" | |||
) | |||
// fileLogWriter implements LoggerInterface. | |||
// It writes messages by lines limit, file size limit, or time frequency. | |||
type fileLogWriter struct { | |||
sync.RWMutex // write log order by order and atomic incr maxLinesCurLines and maxSizeCurSize | |||
// The opened file | |||
Filename string `json:"filename"` | |||
fileWriter *os.File | |||
// Rotate at line | |||
MaxLines int `json:"maxlines"` | |||
maxLinesCurLines int | |||
MaxFiles int `json:"maxfiles"` | |||
MaxFilesCurFiles int | |||
// Rotate at size | |||
MaxSize int `json:"maxsize"` | |||
maxSizeCurSize int | |||
// Rotate daily | |||
Daily bool `json:"daily"` | |||
MaxDays int64 `json:"maxdays"` | |||
dailyOpenDate int | |||
dailyOpenTime time.Time | |||
// Rotate hourly | |||
Hourly bool `json:"hourly"` | |||
MaxHours int64 `json:"maxhours"` | |||
hourlyOpenDate int | |||
hourlyOpenTime time.Time | |||
Rotate bool `json:"rotate"` | |||
Level int `json:"level"` | |||
Perm string `json:"perm"` | |||
RotatePerm string `json:"rotateperm"` | |||
fileNameOnly, suffix string // like "project.log", project is fileNameOnly and .log is suffix | |||
} | |||
// newFileWriter create a FileLogWriter returning as LoggerInterface. | |||
func newFileWriter() Logger { | |||
w := &fileLogWriter{ | |||
Daily: true, | |||
MaxDays: 7, | |||
Hourly: false, | |||
MaxHours: 168, | |||
Rotate: true, | |||
RotatePerm: "0440", | |||
Level: LevelTrace, | |||
Perm: "0660", | |||
MaxLines: 10000000, | |||
MaxFiles: 999, | |||
MaxSize: 1 << 28, | |||
} | |||
return w | |||
} | |||
// Init file logger with json config. | |||
// jsonConfig like: | |||
// { | |||
// "filename":"logs/beego.log", | |||
// "maxLines":10000, | |||
// "maxsize":1024, | |||
// "daily":true, | |||
// "maxDays":15, | |||
// "rotate":true, | |||
// "perm":"0600" | |||
// } | |||
func (w *fileLogWriter) Init(jsonConfig string) error { | |||
err := json.Unmarshal([]byte(jsonConfig), w) | |||
if err != nil { | |||
return err | |||
} | |||
if len(w.Filename) == 0 { | |||
return errors.New("jsonconfig must have filename") | |||
} | |||
w.suffix = filepath.Ext(w.Filename) | |||
w.fileNameOnly = strings.TrimSuffix(w.Filename, w.suffix) | |||
if w.suffix == "" { | |||
w.suffix = ".log" | |||
} | |||
err = w.startLogger() | |||
return err | |||
} | |||
// start file logger. create log file and set to locker-inside file writer. | |||
func (w *fileLogWriter) startLogger() error { | |||
file, err := w.createLogFile() | |||
if err != nil { | |||
return err | |||
} | |||
if w.fileWriter != nil { | |||
w.fileWriter.Close() | |||
} | |||
w.fileWriter = file | |||
return w.initFd() | |||
} | |||
func (w *fileLogWriter) needRotateDaily(size int, day int) bool { | |||
return (w.MaxLines > 0 && w.maxLinesCurLines >= w.MaxLines) || | |||
(w.MaxSize > 0 && w.maxSizeCurSize >= w.MaxSize) || | |||
(w.Daily && day != w.dailyOpenDate) | |||
} | |||
func (w *fileLogWriter) needRotateHourly(size int, hour int) bool { | |||
return (w.MaxLines > 0 && w.maxLinesCurLines >= w.MaxLines) || | |||
(w.MaxSize > 0 && w.maxSizeCurSize >= w.MaxSize) || | |||
(w.Hourly && hour != w.hourlyOpenDate) | |||
} | |||
// WriteMsg write logger message into file. | |||
func (w *fileLogWriter) WriteMsg(when time.Time, msg string, level int) error { | |||
if level > w.Level { | |||
return nil | |||
} | |||
hd, d, h := formatTimeHeader(when) | |||
msg = string(hd) + msg + "\n" | |||
if w.Rotate { | |||
w.RLock() | |||
if w.needRotateHourly(len(msg), h) { | |||
w.RUnlock() | |||
w.Lock() | |||
if w.needRotateHourly(len(msg), h) { | |||
if err := w.doRotate(when); err != nil { | |||
fmt.Fprintf(os.Stderr, "FileLogWriter(%q): %s\n", w.Filename, err) | |||
} | |||
} | |||
w.Unlock() | |||
} else if w.needRotateDaily(len(msg), d) { | |||
w.RUnlock() | |||
w.Lock() | |||
if w.needRotateDaily(len(msg), d) { | |||
if err := w.doRotate(when); err != nil { | |||
fmt.Fprintf(os.Stderr, "FileLogWriter(%q): %s\n", w.Filename, err) | |||
} | |||
} | |||
w.Unlock() | |||
} else { | |||
w.RUnlock() | |||
} | |||
} | |||
w.Lock() | |||
_, err := w.fileWriter.Write([]byte(msg)) | |||
if err == nil { | |||
w.maxLinesCurLines++ | |||
w.maxSizeCurSize += len(msg) | |||
} | |||
w.Unlock() | |||
return err | |||
} | |||
func (w *fileLogWriter) createLogFile() (*os.File, error) { | |||
// Open the log file | |||
perm, err := strconv.ParseInt(w.Perm, 8, 64) | |||
if err != nil { | |||
return nil, err | |||
} | |||
filepath := path.Dir(w.Filename) | |||
os.MkdirAll(filepath, os.FileMode(perm)) | |||
fd, err := os.OpenFile(w.Filename, os.O_WRONLY|os.O_APPEND|os.O_CREATE, os.FileMode(perm)) | |||
if err == nil { | |||
// Make sure file perm is user set perm cause of `os.OpenFile` will obey umask | |||
os.Chmod(w.Filename, os.FileMode(perm)) | |||
} | |||
return fd, err | |||
} | |||
func (w *fileLogWriter) initFd() error { | |||
fd := w.fileWriter | |||
fInfo, err := fd.Stat() | |||
if err != nil { | |||
return fmt.Errorf("get stat err: %s", err) | |||
} | |||
w.maxSizeCurSize = int(fInfo.Size()) | |||
w.dailyOpenTime = time.Now() | |||
w.dailyOpenDate = w.dailyOpenTime.Day() | |||
w.hourlyOpenTime = time.Now() | |||
w.hourlyOpenDate = w.hourlyOpenTime.Hour() | |||
w.maxLinesCurLines = 0 | |||
if w.Hourly { | |||
go w.hourlyRotate(w.hourlyOpenTime) | |||
} else if w.Daily { | |||
go w.dailyRotate(w.dailyOpenTime) | |||
} | |||
if fInfo.Size() > 0 && w.MaxLines > 0 { | |||
count, err := w.lines() | |||
if err != nil { | |||
return err | |||
} | |||
w.maxLinesCurLines = count | |||
} | |||
return nil | |||
} | |||
func (w *fileLogWriter) dailyRotate(openTime time.Time) { | |||
y, m, d := openTime.Add(24 * time.Hour).Date() | |||
nextDay := time.Date(y, m, d, 0, 0, 0, 0, openTime.Location()) | |||
tm := time.NewTimer(time.Duration(nextDay.UnixNano() - openTime.UnixNano() + 100)) | |||
<-tm.C | |||
w.Lock() | |||
if w.needRotateDaily(0, time.Now().Day()) { | |||
if err := w.doRotate(time.Now()); err != nil { | |||
fmt.Fprintf(os.Stderr, "FileLogWriter(%q): %s\n", w.Filename, err) | |||
} | |||
} | |||
w.Unlock() | |||
} | |||
func (w *fileLogWriter) hourlyRotate(openTime time.Time) { | |||
y, m, d := openTime.Add(1 * time.Hour).Date() | |||
h, _, _ := openTime.Add(1 * time.Hour).Clock() | |||
nextHour := time.Date(y, m, d, h, 0, 0, 0, openTime.Location()) | |||
tm := time.NewTimer(time.Duration(nextHour.UnixNano() - openTime.UnixNano() + 100)) | |||
<-tm.C | |||
w.Lock() | |||
if w.needRotateHourly(0, time.Now().Hour()) { | |||
if err := w.doRotate(time.Now()); err != nil { | |||
fmt.Fprintf(os.Stderr, "FileLogWriter(%q): %s\n", w.Filename, err) | |||
} | |||
} | |||
w.Unlock() | |||
} | |||
func (w *fileLogWriter) lines() (int, error) { | |||
fd, err := os.Open(w.Filename) | |||
if err != nil { | |||
return 0, err | |||
} | |||
defer fd.Close() | |||
buf := make([]byte, 32768) // 32k | |||
count := 0 | |||
lineSep := []byte{'\n'} | |||
for { | |||
c, err := fd.Read(buf) | |||
if err != nil && err != io.EOF { | |||
return count, err | |||
} | |||
count += bytes.Count(buf[:c], lineSep) | |||
if err == io.EOF { | |||
break | |||
} | |||
} | |||
return count, nil | |||
} | |||
// DoRotate means it need to write file in new file. | |||
// new file name like xx.2013-01-01.log (daily) or xx.001.log (by line or size) | |||
func (w *fileLogWriter) doRotate(logTime time.Time) error { | |||
// file exists | |||
// Find the next available number | |||
num := w.MaxFilesCurFiles + 1 | |||
fName := "" | |||
format := "" | |||
var openTime time.Time | |||
rotatePerm, err := strconv.ParseInt(w.RotatePerm, 8, 64) | |||
if err != nil { | |||
return err | |||
} | |||
_, err = os.Lstat(w.Filename) | |||
if err != nil { | |||
//even if the file is not exist or other ,we should RESTART the logger | |||
goto RESTART_LOGGER | |||
} | |||
if w.Hourly { | |||
format = "2006010215" | |||
openTime = w.hourlyOpenTime | |||
} else if w.Daily { | |||
format = "2006-01-02" | |||
openTime = w.dailyOpenTime | |||
} | |||
// only when one of them be setted, then the file would be splited | |||
if w.MaxLines > 0 || w.MaxSize > 0 { | |||
for ; err == nil && num <= w.MaxFiles; num++ { | |||
fName = w.fileNameOnly + fmt.Sprintf(".%s.%03d%s", logTime.Format(format), num, w.suffix) | |||
_, err = os.Lstat(fName) | |||
} | |||
} else { | |||
fName = w.fileNameOnly + fmt.Sprintf(".%s.%03d%s", openTime.Format(format), num, w.suffix) | |||
_, err = os.Lstat(fName) | |||
w.MaxFilesCurFiles = num | |||
} | |||
// return error if the last file checked still existed | |||
if err == nil { | |||
return fmt.Errorf("Rotate: Cannot find free log number to rename %s", w.Filename) | |||
} | |||
// close fileWriter before rename | |||
w.fileWriter.Close() | |||
// Rename the file to its new found name | |||
// even if occurs error,we MUST guarantee to restart new logger | |||
err = os.Rename(w.Filename, fName) | |||
if err != nil { | |||
goto RESTART_LOGGER | |||
} | |||
err = os.Chmod(fName, os.FileMode(rotatePerm)) | |||
RESTART_LOGGER: | |||
startLoggerErr := w.startLogger() | |||
go w.deleteOldLog() | |||
if startLoggerErr != nil { | |||
return fmt.Errorf("Rotate StartLogger: %s", startLoggerErr) | |||
} | |||
if err != nil { | |||
return fmt.Errorf("Rotate: %s", err) | |||
} | |||
return nil | |||
} | |||
func (w *fileLogWriter) deleteOldLog() { | |||
dir := filepath.Dir(w.Filename) | |||
filepath.Walk(dir, func(path string, info os.FileInfo, err error) (returnErr error) { | |||
defer func() { | |||
if r := recover(); r != nil { | |||
fmt.Fprintf(os.Stderr, "Unable to delete old log '%s', error: %v\n", path, r) | |||
} | |||
}() | |||
if info == nil { | |||
return | |||
} | |||
if w.Hourly { | |||
if !info.IsDir() && info.ModTime().Add(1*time.Hour*time.Duration(w.MaxHours)).Before(time.Now()) { | |||
if strings.HasPrefix(filepath.Base(path), filepath.Base(w.fileNameOnly)) && | |||
strings.HasSuffix(filepath.Base(path), w.suffix) { | |||
os.Remove(path) | |||
} | |||
} | |||
} else if w.Daily { | |||
if !info.IsDir() && info.ModTime().Add(24*time.Hour*time.Duration(w.MaxDays)).Before(time.Now()) { | |||
if strings.HasPrefix(filepath.Base(path), filepath.Base(w.fileNameOnly)) && | |||
strings.HasSuffix(filepath.Base(path), w.suffix) { | |||
os.Remove(path) | |||
} | |||
} | |||
} | |||
return | |||
}) | |||
} | |||
// Destroy close the file description, close file writer. | |||
func (w *fileLogWriter) Destroy() { | |||
w.fileWriter.Close() | |||
} | |||
// Flush flush file logger. | |||
// there are no buffering messages in file logger in memory. | |||
// flush file means sync file from disk. | |||
func (w *fileLogWriter) Flush() { | |||
w.fileWriter.Sync() | |||
} | |||
func init() { | |||
Register(AdapterFile, newFileWriter) | |||
} |
@@ -0,0 +1,420 @@ | |||
// Copyright 2014 beego Author. All Rights Reserved. | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package logs | |||
import ( | |||
"bufio" | |||
"fmt" | |||
"io/ioutil" | |||
"os" | |||
"strconv" | |||
"testing" | |||
"time" | |||
) | |||
func TestFilePerm(t *testing.T) { | |||
log := NewLogger(10000) | |||
// use 0666 as test perm cause the default umask is 022 | |||
log.SetLogger("file", `{"filename":"test.log", "perm": "0666"}`) | |||
log.Debug("debug") | |||
log.Informational("info") | |||
log.Notice("notice") | |||
log.Warning("warning") | |||
log.Error("error") | |||
log.Alert("alert") | |||
log.Critical("critical") | |||
log.Emergency("emergency") | |||
file, err := os.Stat("test.log") | |||
if err != nil { | |||
t.Fatal(err) | |||
} | |||
if file.Mode() != 0666 { | |||
t.Fatal("unexpected log file permission") | |||
} | |||
os.Remove("test.log") | |||
} | |||
func TestFile1(t *testing.T) { | |||
log := NewLogger(10000) | |||
log.SetLogger("file", `{"filename":"test.log"}`) | |||
log.Debug("debug") | |||
log.Informational("info") | |||
log.Notice("notice") | |||
log.Warning("warning") | |||
log.Error("error") | |||
log.Alert("alert") | |||
log.Critical("critical") | |||
log.Emergency("emergency") | |||
f, err := os.Open("test.log") | |||
if err != nil { | |||
t.Fatal(err) | |||
} | |||
b := bufio.NewReader(f) | |||
lineNum := 0 | |||
for { | |||
line, _, err := b.ReadLine() | |||
if err != nil { | |||
break | |||
} | |||
if len(line) > 0 { | |||
lineNum++ | |||
} | |||
} | |||
var expected = LevelDebug + 1 | |||
if lineNum != expected { | |||
t.Fatal(lineNum, "not "+strconv.Itoa(expected)+" lines") | |||
} | |||
os.Remove("test.log") | |||
} | |||
func TestFile2(t *testing.T) { | |||
log := NewLogger(10000) | |||
log.SetLogger("file", fmt.Sprintf(`{"filename":"test2.log","level":%d}`, LevelError)) | |||
log.Debug("debug") | |||
log.Info("info") | |||
log.Notice("notice") | |||
log.Warning("warning") | |||
log.Error("error") | |||
log.Alert("alert") | |||
log.Critical("critical") | |||
log.Emergency("emergency") | |||
f, err := os.Open("test2.log") | |||
if err != nil { | |||
t.Fatal(err) | |||
} | |||
b := bufio.NewReader(f) | |||
lineNum := 0 | |||
for { | |||
line, _, err := b.ReadLine() | |||
if err != nil { | |||
break | |||
} | |||
if len(line) > 0 { | |||
lineNum++ | |||
} | |||
} | |||
var expected = LevelError + 1 | |||
if lineNum != expected { | |||
t.Fatal(lineNum, "not "+strconv.Itoa(expected)+" lines") | |||
} | |||
os.Remove("test2.log") | |||
} | |||
func TestFileDailyRotate_01(t *testing.T) { | |||
log := NewLogger(10000) | |||
log.SetLogger("file", `{"filename":"test3.log","maxlines":4}`) | |||
log.Debug("debug") | |||
log.Info("info") | |||
log.Notice("notice") | |||
log.Warning("warning") | |||
log.Error("error") | |||
log.Alert("alert") | |||
log.Critical("critical") | |||
log.Emergency("emergency") | |||
rotateName := "test3" + fmt.Sprintf(".%s.%03d", time.Now().Format("2006-01-02"), 1) + ".log" | |||
b, err := exists(rotateName) | |||
if !b || err != nil { | |||
os.Remove("test3.log") | |||
t.Fatal("rotate not generated") | |||
} | |||
os.Remove(rotateName) | |||
os.Remove("test3.log") | |||
} | |||
func TestFileDailyRotate_02(t *testing.T) { | |||
fn1 := "rotate_day.log" | |||
fn2 := "rotate_day." + time.Now().Add(-24*time.Hour).Format("2006-01-02") + ".001.log" | |||
testFileRotate(t, fn1, fn2, true, false) | |||
} | |||
func TestFileDailyRotate_03(t *testing.T) { | |||
fn1 := "rotate_day.log" | |||
fn := "rotate_day." + time.Now().Add(-24*time.Hour).Format("2006-01-02") + ".log" | |||
os.Create(fn) | |||
fn2 := "rotate_day." + time.Now().Add(-24*time.Hour).Format("2006-01-02") + ".001.log" | |||
testFileRotate(t, fn1, fn2, true, false) | |||
os.Remove(fn) | |||
} | |||
func TestFileDailyRotate_04(t *testing.T) { | |||
fn1 := "rotate_day.log" | |||
fn2 := "rotate_day." + time.Now().Add(-24*time.Hour).Format("2006-01-02") + ".001.log" | |||
testFileDailyRotate(t, fn1, fn2) | |||
} | |||
func TestFileDailyRotate_05(t *testing.T) { | |||
fn1 := "rotate_day.log" | |||
fn := "rotate_day." + time.Now().Add(-24*time.Hour).Format("2006-01-02") + ".log" | |||
os.Create(fn) | |||
fn2 := "rotate_day." + time.Now().Add(-24*time.Hour).Format("2006-01-02") + ".001.log" | |||
testFileDailyRotate(t, fn1, fn2) | |||
os.Remove(fn) | |||
} | |||
func TestFileDailyRotate_06(t *testing.T) { //test file mode | |||
log := NewLogger(10000) | |||
log.SetLogger("file", `{"filename":"test3.log","maxlines":4}`) | |||
log.Debug("debug") | |||
log.Info("info") | |||
log.Notice("notice") | |||
log.Warning("warning") | |||
log.Error("error") | |||
log.Alert("alert") | |||
log.Critical("critical") | |||
log.Emergency("emergency") | |||
rotateName := "test3" + fmt.Sprintf(".%s.%03d", time.Now().Format("2006-01-02"), 1) + ".log" | |||
s, _ := os.Lstat(rotateName) | |||
if s.Mode() != 0440 { | |||
os.Remove(rotateName) | |||
os.Remove("test3.log") | |||
t.Fatal("rotate file mode error") | |||
} | |||
os.Remove(rotateName) | |||
os.Remove("test3.log") | |||
} | |||
func TestFileHourlyRotate_01(t *testing.T) { | |||
log := NewLogger(10000) | |||
log.SetLogger("file", `{"filename":"test3.log","hourly":true,"maxlines":4}`) | |||
log.Debug("debug") | |||
log.Info("info") | |||
log.Notice("notice") | |||
log.Warning("warning") | |||
log.Error("error") | |||
log.Alert("alert") | |||
log.Critical("critical") | |||
log.Emergency("emergency") | |||
rotateName := "test3" + fmt.Sprintf(".%s.%03d", time.Now().Format("2006010215"), 1) + ".log" | |||
b, err := exists(rotateName) | |||
if !b || err != nil { | |||
os.Remove("test3.log") | |||
t.Fatal("rotate not generated") | |||
} | |||
os.Remove(rotateName) | |||
os.Remove("test3.log") | |||
} | |||
func TestFileHourlyRotate_02(t *testing.T) { | |||
fn1 := "rotate_hour.log" | |||
fn2 := "rotate_hour." + time.Now().Add(-1*time.Hour).Format("2006010215") + ".001.log" | |||
testFileRotate(t, fn1, fn2, false, true) | |||
} | |||
func TestFileHourlyRotate_03(t *testing.T) { | |||
fn1 := "rotate_hour.log" | |||
fn := "rotate_hour." + time.Now().Add(-1*time.Hour).Format("2006010215") + ".log" | |||
os.Create(fn) | |||
fn2 := "rotate_hour." + time.Now().Add(-1*time.Hour).Format("2006010215") + ".001.log" | |||
testFileRotate(t, fn1, fn2, false, true) | |||
os.Remove(fn) | |||
} | |||
func TestFileHourlyRotate_04(t *testing.T) { | |||
fn1 := "rotate_hour.log" | |||
fn2 := "rotate_hour." + time.Now().Add(-1*time.Hour).Format("2006010215") + ".001.log" | |||
testFileHourlyRotate(t, fn1, fn2) | |||
} | |||
func TestFileHourlyRotate_05(t *testing.T) { | |||
fn1 := "rotate_hour.log" | |||
fn := "rotate_hour." + time.Now().Add(-1*time.Hour).Format("2006010215") + ".log" | |||
os.Create(fn) | |||
fn2 := "rotate_hour." + time.Now().Add(-1*time.Hour).Format("2006010215") + ".001.log" | |||
testFileHourlyRotate(t, fn1, fn2) | |||
os.Remove(fn) | |||
} | |||
func TestFileHourlyRotate_06(t *testing.T) { //test file mode | |||
log := NewLogger(10000) | |||
log.SetLogger("file", `{"filename":"test3.log", "hourly":true, "maxlines":4}`) | |||
log.Debug("debug") | |||
log.Info("info") | |||
log.Notice("notice") | |||
log.Warning("warning") | |||
log.Error("error") | |||
log.Alert("alert") | |||
log.Critical("critical") | |||
log.Emergency("emergency") | |||
rotateName := "test3" + fmt.Sprintf(".%s.%03d", time.Now().Format("2006010215"), 1) + ".log" | |||
s, _ := os.Lstat(rotateName) | |||
if s.Mode() != 0440 { | |||
os.Remove(rotateName) | |||
os.Remove("test3.log") | |||
t.Fatal("rotate file mode error") | |||
} | |||
os.Remove(rotateName) | |||
os.Remove("test3.log") | |||
} | |||
func testFileRotate(t *testing.T, fn1, fn2 string, daily, hourly bool) { | |||
fw := &fileLogWriter{ | |||
Daily: daily, | |||
MaxDays: 7, | |||
Hourly: hourly, | |||
MaxHours: 168, | |||
Rotate: true, | |||
Level: LevelTrace, | |||
Perm: "0660", | |||
RotatePerm: "0440", | |||
} | |||
if daily { | |||
fw.Init(fmt.Sprintf(`{"filename":"%v","maxdays":1}`, fn1)) | |||
fw.dailyOpenTime = time.Now().Add(-24 * time.Hour) | |||
fw.dailyOpenDate = fw.dailyOpenTime.Day() | |||
} | |||
if hourly { | |||
fw.Init(fmt.Sprintf(`{"filename":"%v","maxhours":1}`, fn1)) | |||
fw.hourlyOpenTime = time.Now().Add(-1 * time.Hour) | |||
fw.hourlyOpenDate = fw.hourlyOpenTime.Day() | |||
} | |||
fw.WriteMsg(time.Now(), "this is a msg for test", LevelDebug) | |||
for _, file := range []string{fn1, fn2} { | |||
_, err := os.Stat(file) | |||
if err != nil { | |||
t.Log(err) | |||
t.FailNow() | |||
} | |||
os.Remove(file) | |||
} | |||
fw.Destroy() | |||
} | |||
func testFileDailyRotate(t *testing.T, fn1, fn2 string) { | |||
fw := &fileLogWriter{ | |||
Daily: true, | |||
MaxDays: 7, | |||
Rotate: true, | |||
Level: LevelTrace, | |||
Perm: "0660", | |||
RotatePerm: "0440", | |||
} | |||
fw.Init(fmt.Sprintf(`{"filename":"%v","maxdays":1}`, fn1)) | |||
fw.dailyOpenTime = time.Now().Add(-24 * time.Hour) | |||
fw.dailyOpenDate = fw.dailyOpenTime.Day() | |||
today, _ := time.ParseInLocation("2006-01-02", time.Now().Format("2006-01-02"), fw.dailyOpenTime.Location()) | |||
today = today.Add(-1 * time.Second) | |||
fw.dailyRotate(today) | |||
for _, file := range []string{fn1, fn2} { | |||
_, err := os.Stat(file) | |||
if err != nil { | |||
t.FailNow() | |||
} | |||
content, err := ioutil.ReadFile(file) | |||
if err != nil { | |||
t.FailNow() | |||
} | |||
if len(content) > 0 { | |||
t.FailNow() | |||
} | |||
os.Remove(file) | |||
} | |||
fw.Destroy() | |||
} | |||
func testFileHourlyRotate(t *testing.T, fn1, fn2 string) { | |||
fw := &fileLogWriter{ | |||
Hourly: true, | |||
MaxHours: 168, | |||
Rotate: true, | |||
Level: LevelTrace, | |||
Perm: "0660", | |||
RotatePerm: "0440", | |||
} | |||
fw.Init(fmt.Sprintf(`{"filename":"%v","maxhours":1}`, fn1)) | |||
fw.hourlyOpenTime = time.Now().Add(-1 * time.Hour) | |||
fw.hourlyOpenDate = fw.hourlyOpenTime.Hour() | |||
hour, _ := time.ParseInLocation("2006010215", time.Now().Format("2006010215"), fw.hourlyOpenTime.Location()) | |||
hour = hour.Add(-1 * time.Second) | |||
fw.hourlyRotate(hour) | |||
for _, file := range []string{fn1, fn2} { | |||
_, err := os.Stat(file) | |||
if err != nil { | |||
t.FailNow() | |||
} | |||
content, err := ioutil.ReadFile(file) | |||
if err != nil { | |||
t.FailNow() | |||
} | |||
if len(content) > 0 { | |||
t.FailNow() | |||
} | |||
os.Remove(file) | |||
} | |||
fw.Destroy() | |||
} | |||
func exists(path string) (bool, error) { | |||
_, err := os.Stat(path) | |||
if err == nil { | |||
return true, nil | |||
} | |||
if os.IsNotExist(err) { | |||
return false, nil | |||
} | |||
return false, err | |||
} | |||
func BenchmarkFile(b *testing.B) { | |||
log := NewLogger(100000) | |||
log.SetLogger("file", `{"filename":"test4.log"}`) | |||
for i := 0; i < b.N; i++ { | |||
log.Debug("debug") | |||
} | |||
os.Remove("test4.log") | |||
} | |||
func BenchmarkFileAsynchronous(b *testing.B) { | |||
log := NewLogger(100000) | |||
log.SetLogger("file", `{"filename":"test4.log"}`) | |||
log.Async() | |||
for i := 0; i < b.N; i++ { | |||
log.Debug("debug") | |||
} | |||
os.Remove("test4.log") | |||
} | |||
func BenchmarkFileCallDepth(b *testing.B) { | |||
log := NewLogger(100000) | |||
log.SetLogger("file", `{"filename":"test4.log"}`) | |||
log.EnableFuncCallDepth(true) | |||
log.SetLogFuncCallDepth(2) | |||
for i := 0; i < b.N; i++ { | |||
log.Debug("debug") | |||
} | |||
os.Remove("test4.log") | |||
} | |||
func BenchmarkFileAsynchronousCallDepth(b *testing.B) { | |||
log := NewLogger(100000) | |||
log.SetLogger("file", `{"filename":"test4.log"}`) | |||
log.EnableFuncCallDepth(true) | |||
log.SetLogFuncCallDepth(2) | |||
log.Async() | |||
for i := 0; i < b.N; i++ { | |||
log.Debug("debug") | |||
} | |||
os.Remove("test4.log") | |||
} | |||
func BenchmarkFileOnGoroutine(b *testing.B) { | |||
log := NewLogger(100000) | |||
log.SetLogger("file", `{"filename":"test4.log"}`) | |||
for i := 0; i < b.N; i++ { | |||
go log.Debug("debug") | |||
} | |||
os.Remove("test4.log") | |||
} |
@@ -0,0 +1,72 @@ | |||
package logs | |||
import ( | |||
"encoding/json" | |||
"fmt" | |||
"net/http" | |||
"net/url" | |||
"time" | |||
) | |||
// JLWriter implements beego LoggerInterface and is used to send jiaoliao webhook | |||
type JLWriter struct { | |||
AuthorName string `json:"authorname"` | |||
Title string `json:"title"` | |||
WebhookURL string `json:"webhookurl"` | |||
RedirectURL string `json:"redirecturl,omitempty"` | |||
ImageURL string `json:"imageurl,omitempty"` | |||
Level int `json:"level"` | |||
} | |||
// newJLWriter create jiaoliao writer. | |||
func newJLWriter() Logger { | |||
return &JLWriter{Level: LevelTrace} | |||
} | |||
// Init JLWriter with json config string | |||
func (s *JLWriter) Init(jsonconfig string) error { | |||
return json.Unmarshal([]byte(jsonconfig), s) | |||
} | |||
// WriteMsg write message in smtp writer. | |||
// it will send an email with subject and only this message. | |||
func (s *JLWriter) WriteMsg(when time.Time, msg string, level int) error { | |||
if level > s.Level { | |||
return nil | |||
} | |||
text := fmt.Sprintf("%s %s", when.Format("2006-01-02 15:04:05"), msg) | |||
form := url.Values{} | |||
form.Add("authorName", s.AuthorName) | |||
form.Add("title", s.Title) | |||
form.Add("text", text) | |||
if s.RedirectURL != "" { | |||
form.Add("redirectUrl", s.RedirectURL) | |||
} | |||
if s.ImageURL != "" { | |||
form.Add("imageUrl", s.ImageURL) | |||
} | |||
resp, err := http.PostForm(s.WebhookURL, form) | |||
if err != nil { | |||
return err | |||
} | |||
defer resp.Body.Close() | |||
if resp.StatusCode != http.StatusOK { | |||
return fmt.Errorf("Post webhook failed %s %d", resp.Status, resp.StatusCode) | |||
} | |||
return nil | |||
} | |||
// Flush implementing method. empty. | |||
func (s *JLWriter) Flush() { | |||
} | |||
// Destroy implementing method. empty. | |||
func (s *JLWriter) Destroy() { | |||
} | |||
func init() { | |||
Register(AdapterJianLiao, newJLWriter) | |||
} |
@@ -0,0 +1,665 @@ | |||
// Copyright 2014 beego Author. All Rights Reserved. | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
// Package logs provide a general log interface | |||
// Usage: | |||
// | |||
// import "github.com/astaxie/beego/logs" | |||
// | |||
// log := NewLogger(10000) | |||
// log.SetLogger("console", "") | |||
// | |||
// > the first params stand for how many channel | |||
// | |||
// Use it like this: | |||
// | |||
// log.Trace("trace") | |||
// log.Info("info") | |||
// log.Warn("warning") | |||
// log.Debug("debug") | |||
// log.Critical("critical") | |||
// | |||
// more docs http://beego.me/docs/module/logs.md | |||
package logs | |||
import ( | |||
"fmt" | |||
"log" | |||
"os" | |||
"path" | |||
"runtime" | |||
"strconv" | |||
"strings" | |||
"sync" | |||
"time" | |||
) | |||
// RFC5424 log message levels. | |||
const ( | |||
LevelEmergency = iota | |||
LevelAlert | |||
LevelCritical | |||
LevelError | |||
LevelWarning | |||
LevelNotice | |||
LevelInformational | |||
LevelDebug | |||
) | |||
// levelLogLogger is defined to implement log.Logger | |||
// the real log level will be LevelEmergency | |||
const levelLoggerImpl = -1 | |||
// Name for adapter with beego official support | |||
const ( | |||
AdapterConsole = "console" | |||
AdapterFile = "file" | |||
AdapterMultiFile = "multifile" | |||
AdapterMail = "smtp" | |||
AdapterConn = "conn" | |||
AdapterEs = "es" | |||
AdapterJianLiao = "jianliao" | |||
AdapterSlack = "slack" | |||
AdapterAliLS = "alils" | |||
) | |||
// Legacy log level constants to ensure backwards compatibility. | |||
const ( | |||
LevelInfo = LevelInformational | |||
LevelTrace = LevelDebug | |||
LevelWarn = LevelWarning | |||
) | |||
type newLoggerFunc func() Logger | |||
// Logger defines the behavior of a log provider. | |||
type Logger interface { | |||
Init(config string) error | |||
WriteMsg(when time.Time, msg string, level int) error | |||
Destroy() | |||
Flush() | |||
} | |||
var adapters = make(map[string]newLoggerFunc) | |||
var levelPrefix = [LevelDebug + 1]string{"[M]", "[A]", "[C]", "[E]", "[W]", "[N]", "[I]", "[D]"} | |||
// Register makes a log provide available by the provided name. | |||
// If Register is called twice with the same name or if driver is nil, | |||
// it panics. | |||
func Register(name string, log newLoggerFunc) { | |||
if log == nil { | |||
panic("logs: Register provide is nil") | |||
} | |||
if _, dup := adapters[name]; dup { | |||
panic("logs: Register called twice for provider " + name) | |||
} | |||
adapters[name] = log | |||
} | |||
// BeeLogger is default logger in beego application. | |||
// it can contain several providers and log message into all providers. | |||
type BeeLogger struct { | |||
lock sync.Mutex | |||
level int | |||
init bool | |||
enableFuncCallDepth bool | |||
loggerFuncCallDepth int | |||
asynchronous bool | |||
prefix string | |||
msgChanLen int64 | |||
msgChan chan *logMsg | |||
signalChan chan string | |||
wg sync.WaitGroup | |||
outputs []*nameLogger | |||
} | |||
const defaultAsyncMsgLen = 1e3 | |||
type nameLogger struct { | |||
Logger | |||
name string | |||
} | |||
type logMsg struct { | |||
level int | |||
msg string | |||
when time.Time | |||
} | |||
var logMsgPool *sync.Pool | |||
// NewLogger returns a new BeeLogger. | |||
// channelLen means the number of messages in chan(used where asynchronous is true). | |||
// if the buffering chan is full, logger adapters write to file or other way. | |||
func NewLogger(channelLens ...int64) *BeeLogger { | |||
bl := new(BeeLogger) | |||
bl.level = LevelDebug | |||
bl.loggerFuncCallDepth = 2 | |||
bl.msgChanLen = append(channelLens, 0)[0] | |||
if bl.msgChanLen <= 0 { | |||
bl.msgChanLen = defaultAsyncMsgLen | |||
} | |||
bl.signalChan = make(chan string, 1) | |||
bl.setLogger(AdapterConsole) | |||
return bl | |||
} | |||
// Async set the log to asynchronous and start the goroutine | |||
func (bl *BeeLogger) Async(msgLen ...int64) *BeeLogger { | |||
bl.lock.Lock() | |||
defer bl.lock.Unlock() | |||
if bl.asynchronous { | |||
return bl | |||
} | |||
bl.asynchronous = true | |||
if len(msgLen) > 0 && msgLen[0] > 0 { | |||
bl.msgChanLen = msgLen[0] | |||
} | |||
bl.msgChan = make(chan *logMsg, bl.msgChanLen) | |||
logMsgPool = &sync.Pool{ | |||
New: func() interface{} { | |||
return &logMsg{} | |||
}, | |||
} | |||
bl.wg.Add(1) | |||
go bl.startLogger() | |||
return bl | |||
} | |||
// SetLogger provides a given logger adapter into BeeLogger with config string. | |||
// config need to be correct JSON as string: {"interval":360}. | |||
func (bl *BeeLogger) setLogger(adapterName string, configs ...string) error { | |||
config := append(configs, "{}")[0] | |||
for _, l := range bl.outputs { | |||
if l.name == adapterName { | |||
return fmt.Errorf("logs: duplicate adaptername %q (you have set this logger before)", adapterName) | |||
} | |||
} | |||
logAdapter, ok := adapters[adapterName] | |||
if !ok { | |||
return fmt.Errorf("logs: unknown adaptername %q (forgotten Register?)", adapterName) | |||
} | |||
lg := logAdapter() | |||
err := lg.Init(config) | |||
if err != nil { | |||
fmt.Fprintln(os.Stderr, "logs.BeeLogger.SetLogger: "+err.Error()) | |||
return err | |||
} | |||
bl.outputs = append(bl.outputs, &nameLogger{name: adapterName, Logger: lg}) | |||
return nil | |||
} | |||
// SetLogger provides a given logger adapter into BeeLogger with config string. | |||
// config need to be correct JSON as string: {"interval":360}. | |||
func (bl *BeeLogger) SetLogger(adapterName string, configs ...string) error { | |||
bl.lock.Lock() | |||
defer bl.lock.Unlock() | |||
if !bl.init { | |||
bl.outputs = []*nameLogger{} | |||
bl.init = true | |||
} | |||
return bl.setLogger(adapterName, configs...) | |||
} | |||
// DelLogger remove a logger adapter in BeeLogger. | |||
func (bl *BeeLogger) DelLogger(adapterName string) error { | |||
bl.lock.Lock() | |||
defer bl.lock.Unlock() | |||
outputs := []*nameLogger{} | |||
for _, lg := range bl.outputs { | |||
if lg.name == adapterName { | |||
lg.Destroy() | |||
} else { | |||
outputs = append(outputs, lg) | |||
} | |||
} | |||
if len(outputs) == len(bl.outputs) { | |||
return fmt.Errorf("logs: unknown adaptername %q (forgotten Register?)", adapterName) | |||
} | |||
bl.outputs = outputs | |||
return nil | |||
} | |||
func (bl *BeeLogger) writeToLoggers(when time.Time, msg string, level int) { | |||
for _, l := range bl.outputs { | |||
err := l.WriteMsg(when, msg, level) | |||
if err != nil { | |||
fmt.Fprintf(os.Stderr, "unable to WriteMsg to adapter:%v,error:%v\n", l.name, err) | |||
} | |||
} | |||
} | |||
func (bl *BeeLogger) Write(p []byte) (n int, err error) { | |||
if len(p) == 0 { | |||
return 0, nil | |||
} | |||
// writeMsg will always add a '\n' character | |||
if p[len(p)-1] == '\n' { | |||
p = p[0 : len(p)-1] | |||
} | |||
// set levelLoggerImpl to ensure all log message will be write out | |||
err = bl.writeMsg(levelLoggerImpl, string(p)) | |||
if err == nil { | |||
return len(p), err | |||
} | |||
return 0, err | |||
} | |||
func (bl *BeeLogger) writeMsg(logLevel int, msg string, v ...interface{}) error { | |||
if !bl.init { | |||
bl.lock.Lock() | |||
bl.setLogger(AdapterConsole) | |||
bl.lock.Unlock() | |||
} | |||
if len(v) > 0 { | |||
msg = fmt.Sprintf(msg, v...) | |||
} | |||
msg = bl.prefix + " " + msg | |||
when := time.Now() | |||
if bl.enableFuncCallDepth { | |||
_, file, line, ok := runtime.Caller(bl.loggerFuncCallDepth) | |||
if !ok { | |||
file = "???" | |||
line = 0 | |||
} | |||
_, filename := path.Split(file) | |||
msg = "[" + filename + ":" + strconv.Itoa(line) + "] " + msg | |||
} | |||
//set level info in front of filename info | |||
if logLevel == levelLoggerImpl { | |||
// set to emergency to ensure all log will be print out correctly | |||
logLevel = LevelEmergency | |||
} else { | |||
msg = levelPrefix[logLevel] + " " + msg | |||
} | |||
if bl.asynchronous { | |||
lm := logMsgPool.Get().(*logMsg) | |||
lm.level = logLevel | |||
lm.msg = msg | |||
lm.when = when | |||
bl.msgChan <- lm | |||
} else { | |||
bl.writeToLoggers(when, msg, logLevel) | |||
} | |||
return nil | |||
} | |||
// SetLevel Set log message level. | |||
// If message level (such as LevelDebug) is higher than logger level (such as LevelWarning), | |||
// log providers will not even be sent the message. | |||
func (bl *BeeLogger) SetLevel(l int) { | |||
bl.level = l | |||
} | |||
// GetLevel Get Current log message level. | |||
func (bl *BeeLogger) GetLevel() int { | |||
return bl.level | |||
} | |||
// SetLogFuncCallDepth set log funcCallDepth | |||
func (bl *BeeLogger) SetLogFuncCallDepth(d int) { | |||
bl.loggerFuncCallDepth = d | |||
} | |||
// GetLogFuncCallDepth return log funcCallDepth for wrapper | |||
func (bl *BeeLogger) GetLogFuncCallDepth() int { | |||
return bl.loggerFuncCallDepth | |||
} | |||
// EnableFuncCallDepth enable log funcCallDepth | |||
func (bl *BeeLogger) EnableFuncCallDepth(b bool) { | |||
bl.enableFuncCallDepth = b | |||
} | |||
// set prefix | |||
func (bl *BeeLogger) SetPrefix(s string) { | |||
bl.prefix = s | |||
} | |||
// start logger chan reading. | |||
// when chan is not empty, write logs. | |||
func (bl *BeeLogger) startLogger() { | |||
gameOver := false | |||
for { | |||
select { | |||
case bm := <-bl.msgChan: | |||
bl.writeToLoggers(bm.when, bm.msg, bm.level) | |||
logMsgPool.Put(bm) | |||
case sg := <-bl.signalChan: | |||
// Now should only send "flush" or "close" to bl.signalChan | |||
bl.flush() | |||
if sg == "close" { | |||
for _, l := range bl.outputs { | |||
l.Destroy() | |||
} | |||
bl.outputs = nil | |||
gameOver = true | |||
} | |||
bl.wg.Done() | |||
} | |||
if gameOver { | |||
break | |||
} | |||
} | |||
} | |||
// Emergency Log EMERGENCY level message. | |||
func (bl *BeeLogger) Emergency(format string, v ...interface{}) { | |||
if LevelEmergency > bl.level { | |||
return | |||
} | |||
bl.writeMsg(LevelEmergency, format, v...) | |||
} | |||
// Alert Log ALERT level message. | |||
func (bl *BeeLogger) Alert(format string, v ...interface{}) { | |||
if LevelAlert > bl.level { | |||
return | |||
} | |||
bl.writeMsg(LevelAlert, format, v...) | |||
} | |||
// Critical Log CRITICAL level message. | |||
func (bl *BeeLogger) Critical(format string, v ...interface{}) { | |||
if LevelCritical > bl.level { | |||
return | |||
} | |||
bl.writeMsg(LevelCritical, format, v...) | |||
} | |||
// Error Log ERROR level message. | |||
func (bl *BeeLogger) Error(format string, v ...interface{}) { | |||
if LevelError > bl.level { | |||
return | |||
} | |||
bl.writeMsg(LevelError, format, v...) | |||
} | |||
// Warning Log WARNING level message. | |||
func (bl *BeeLogger) Warning(format string, v ...interface{}) { | |||
if LevelWarn > bl.level { | |||
return | |||
} | |||
bl.writeMsg(LevelWarn, format, v...) | |||
} | |||
// Notice Log NOTICE level message. | |||
func (bl *BeeLogger) Notice(format string, v ...interface{}) { | |||
if LevelNotice > bl.level { | |||
return | |||
} | |||
bl.writeMsg(LevelNotice, format, v...) | |||
} | |||
// Informational Log INFORMATIONAL level message. | |||
func (bl *BeeLogger) Informational(format string, v ...interface{}) { | |||
if LevelInfo > bl.level { | |||
return | |||
} | |||
bl.writeMsg(LevelInfo, format, v...) | |||
} | |||
// Debug Log DEBUG level message. | |||
func (bl *BeeLogger) Debug(format string, v ...interface{}) { | |||
if LevelDebug > bl.level { | |||
return | |||
} | |||
bl.writeMsg(LevelDebug, format, v...) | |||
} | |||
// Warn Log WARN level message. | |||
// compatibility alias for Warning() | |||
func (bl *BeeLogger) Warn(format string, v ...interface{}) { | |||
if LevelWarn > bl.level { | |||
return | |||
} | |||
bl.writeMsg(LevelWarn, format, v...) | |||
} | |||
// Info Log INFO level message. | |||
// compatibility alias for Informational() | |||
func (bl *BeeLogger) Info(format string, v ...interface{}) { | |||
if LevelInfo > bl.level { | |||
return | |||
} | |||
bl.writeMsg(LevelInfo, format, v...) | |||
} | |||
// Trace Log TRACE level message. | |||
// compatibility alias for Debug() | |||
func (bl *BeeLogger) Trace(format string, v ...interface{}) { | |||
if LevelDebug > bl.level { | |||
return | |||
} | |||
bl.writeMsg(LevelDebug, format, v...) | |||
} | |||
// Flush flush all chan data. | |||
func (bl *BeeLogger) Flush() { | |||
if bl.asynchronous { | |||
bl.signalChan <- "flush" | |||
bl.wg.Wait() | |||
bl.wg.Add(1) | |||
return | |||
} | |||
bl.flush() | |||
} | |||
// Close close logger, flush all chan data and destroy all adapters in BeeLogger. | |||
func (bl *BeeLogger) Close() { | |||
if bl.asynchronous { | |||
bl.signalChan <- "close" | |||
bl.wg.Wait() | |||
close(bl.msgChan) | |||
} else { | |||
bl.flush() | |||
for _, l := range bl.outputs { | |||
l.Destroy() | |||
} | |||
bl.outputs = nil | |||
} | |||
close(bl.signalChan) | |||
} | |||
// Reset close all outputs, and set bl.outputs to nil | |||
func (bl *BeeLogger) Reset() { | |||
bl.Flush() | |||
for _, l := range bl.outputs { | |||
l.Destroy() | |||
} | |||
bl.outputs = nil | |||
} | |||
func (bl *BeeLogger) flush() { | |||
if bl.asynchronous { | |||
for { | |||
if len(bl.msgChan) > 0 { | |||
bm := <-bl.msgChan | |||
bl.writeToLoggers(bm.when, bm.msg, bm.level) | |||
logMsgPool.Put(bm) | |||
continue | |||
} | |||
break | |||
} | |||
} | |||
for _, l := range bl.outputs { | |||
l.Flush() | |||
} | |||
} | |||
// beeLogger references the used application logger. | |||
var beeLogger = NewLogger() | |||
// GetBeeLogger returns the default BeeLogger | |||
func GetBeeLogger() *BeeLogger { | |||
return beeLogger | |||
} | |||
var beeLoggerMap = struct { | |||
sync.RWMutex | |||
logs map[string]*log.Logger | |||
}{ | |||
logs: map[string]*log.Logger{}, | |||
} | |||
// GetLogger returns the default BeeLogger | |||
func GetLogger(prefixes ...string) *log.Logger { | |||
prefix := append(prefixes, "")[0] | |||
if prefix != "" { | |||
prefix = fmt.Sprintf(`[%s] `, strings.ToUpper(prefix)) | |||
} | |||
beeLoggerMap.RLock() | |||
l, ok := beeLoggerMap.logs[prefix] | |||
if ok { | |||
beeLoggerMap.RUnlock() | |||
return l | |||
} | |||
beeLoggerMap.RUnlock() | |||
beeLoggerMap.Lock() | |||
defer beeLoggerMap.Unlock() | |||
l, ok = beeLoggerMap.logs[prefix] | |||
if !ok { | |||
l = log.New(beeLogger, prefix, 0) | |||
beeLoggerMap.logs[prefix] = l | |||
} | |||
return l | |||
} | |||
// Reset will remove all the adapter | |||
func Reset() { | |||
beeLogger.Reset() | |||
} | |||
// Async set the beelogger with Async mode and hold msglen messages | |||
func Async(msgLen ...int64) *BeeLogger { | |||
return beeLogger.Async(msgLen...) | |||
} | |||
// SetLevel sets the global log level used by the simple logger. | |||
func SetLevel(l int) { | |||
beeLogger.SetLevel(l) | |||
} | |||
// SetPrefix sets the prefix | |||
func SetPrefix(s string) { | |||
beeLogger.SetPrefix(s) | |||
} | |||
// EnableFuncCallDepth enable log funcCallDepth | |||
func EnableFuncCallDepth(b bool) { | |||
beeLogger.enableFuncCallDepth = b | |||
} | |||
// SetLogFuncCall set the CallDepth, default is 4 | |||
func SetLogFuncCall(b bool) { | |||
beeLogger.EnableFuncCallDepth(b) | |||
beeLogger.SetLogFuncCallDepth(4) | |||
} | |||
// SetLogFuncCallDepth set log funcCallDepth | |||
func SetLogFuncCallDepth(d int) { | |||
beeLogger.loggerFuncCallDepth = d | |||
} | |||
// SetLogger sets a new logger. | |||
func SetLogger(adapter string, config ...string) error { | |||
return beeLogger.SetLogger(adapter, config...) | |||
} | |||
// Emergency logs a message at emergency level. | |||
func Emergency(f interface{}, v ...interface{}) { | |||
beeLogger.Emergency(formatLog(f, v...)) | |||
} | |||
// Alert logs a message at alert level. | |||
func Alert(f interface{}, v ...interface{}) { | |||
beeLogger.Alert(formatLog(f, v...)) | |||
} | |||
// Critical logs a message at critical level. | |||
func Critical(f interface{}, v ...interface{}) { | |||
beeLogger.Critical(formatLog(f, v...)) | |||
} | |||
// Error logs a message at error level. | |||
func Error(f interface{}, v ...interface{}) { | |||
beeLogger.Error(formatLog(f, v...)) | |||
} | |||
// Warning logs a message at warning level. | |||
func Warning(f interface{}, v ...interface{}) { | |||
beeLogger.Warn(formatLog(f, v...)) | |||
} | |||
// Warn compatibility alias for Warning() | |||
func Warn(f interface{}, v ...interface{}) { | |||
beeLogger.Warn(formatLog(f, v...)) | |||
} | |||
// Notice logs a message at notice level. | |||
func Notice(f interface{}, v ...interface{}) { | |||
beeLogger.Notice(formatLog(f, v...)) | |||
} | |||
// Informational logs a message at info level. | |||
func Informational(f interface{}, v ...interface{}) { | |||
beeLogger.Info(formatLog(f, v...)) | |||
} | |||
// Info compatibility alias for Warning() | |||
func Info(f interface{}, v ...interface{}) { | |||
beeLogger.Info(formatLog(f, v...)) | |||
} | |||
// Debug logs a message at debug level. | |||
func Debug(f interface{}, v ...interface{}) { | |||
beeLogger.Debug(formatLog(f, v...)) | |||
} | |||
// Trace logs a message at trace level. | |||
// compatibility alias for Warning() | |||
func Trace(f interface{}, v ...interface{}) { | |||
beeLogger.Trace(formatLog(f, v...)) | |||
} | |||
func formatLog(f interface{}, v ...interface{}) string { | |||
var msg string | |||
switch f.(type) { | |||
case string: | |||
msg = f.(string) | |||
if len(v) == 0 { | |||
return msg | |||
} | |||
if strings.Contains(msg, "%") && !strings.Contains(msg, "%%") { | |||
//format string | |||
} else { | |||
//do not contain format char | |||
msg += strings.Repeat(" %v", len(v)) | |||
} | |||
default: | |||
msg = fmt.Sprint(f) | |||
if len(v) == 0 { | |||
return msg | |||
} | |||
msg += strings.Repeat(" %v", len(v)) | |||
} | |||
return fmt.Sprintf(msg, v...) | |||
} |
@@ -0,0 +1,175 @@ | |||
// Copyright 2014 beego Author. All Rights Reserved. | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package logs | |||
import ( | |||
"io" | |||
"runtime" | |||
"sync" | |||
"time" | |||
) | |||
type logWriter struct { | |||
sync.Mutex | |||
writer io.Writer | |||
} | |||
func newLogWriter(wr io.Writer) *logWriter { | |||
return &logWriter{writer: wr} | |||
} | |||
func (lg *logWriter) writeln(when time.Time, msg string) { | |||
lg.Lock() | |||
h, _, _ := formatTimeHeader(when) | |||
lg.writer.Write(append(append(h, msg...), '\n')) | |||
lg.Unlock() | |||
} | |||
const ( | |||
y1 = `0123456789` | |||
y2 = `0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789` | |||
y3 = `0000000000111111111122222222223333333333444444444455555555556666666666777777777788888888889999999999` | |||
y4 = `0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789` | |||
mo1 = `000000000111` | |||
mo2 = `123456789012` | |||
d1 = `0000000001111111111222222222233` | |||
d2 = `1234567890123456789012345678901` | |||
h1 = `000000000011111111112222` | |||
h2 = `012345678901234567890123` | |||
mi1 = `000000000011111111112222222222333333333344444444445555555555` | |||
mi2 = `012345678901234567890123456789012345678901234567890123456789` | |||
s1 = `000000000011111111112222222222333333333344444444445555555555` | |||
s2 = `012345678901234567890123456789012345678901234567890123456789` | |||
ns1 = `0123456789` | |||
) | |||
func formatTimeHeader(when time.Time) ([]byte, int, int) { | |||
y, mo, d := when.Date() | |||
h, mi, s := when.Clock() | |||
ns := when.Nanosecond() / 1000000 | |||
//len("2006/01/02 15:04:05.123 ")==24 | |||
var buf [24]byte | |||
buf[0] = y1[y/1000%10] | |||
buf[1] = y2[y/100] | |||
buf[2] = y3[y-y/100*100] | |||
buf[3] = y4[y-y/100*100] | |||
buf[4] = '/' | |||
buf[5] = mo1[mo-1] | |||
buf[6] = mo2[mo-1] | |||
buf[7] = '/' | |||
buf[8] = d1[d-1] | |||
buf[9] = d2[d-1] | |||
buf[10] = ' ' | |||
buf[11] = h1[h] | |||
buf[12] = h2[h] | |||
buf[13] = ':' | |||
buf[14] = mi1[mi] | |||
buf[15] = mi2[mi] | |||
buf[16] = ':' | |||
buf[17] = s1[s] | |||
buf[18] = s2[s] | |||
buf[19] = '.' | |||
buf[20] = ns1[ns/100] | |||
buf[21] = ns1[ns%100/10] | |||
buf[22] = ns1[ns%10] | |||
buf[23] = ' ' | |||
return buf[0:], d, h | |||
} | |||
var ( | |||
green = string([]byte{27, 91, 57, 55, 59, 52, 50, 109}) | |||
white = string([]byte{27, 91, 57, 48, 59, 52, 55, 109}) | |||
yellow = string([]byte{27, 91, 57, 55, 59, 52, 51, 109}) | |||
red = string([]byte{27, 91, 57, 55, 59, 52, 49, 109}) | |||
blue = string([]byte{27, 91, 57, 55, 59, 52, 52, 109}) | |||
magenta = string([]byte{27, 91, 57, 55, 59, 52, 53, 109}) | |||
cyan = string([]byte{27, 91, 57, 55, 59, 52, 54, 109}) | |||
w32Green = string([]byte{27, 91, 52, 50, 109}) | |||
w32White = string([]byte{27, 91, 52, 55, 109}) | |||
w32Yellow = string([]byte{27, 91, 52, 51, 109}) | |||
w32Red = string([]byte{27, 91, 52, 49, 109}) | |||
w32Blue = string([]byte{27, 91, 52, 52, 109}) | |||
w32Magenta = string([]byte{27, 91, 52, 53, 109}) | |||
w32Cyan = string([]byte{27, 91, 52, 54, 109}) | |||
reset = string([]byte{27, 91, 48, 109}) | |||
) | |||
var once sync.Once | |||
var colorMap map[string]string | |||
func initColor() { | |||
if runtime.GOOS == "windows" { | |||
green = w32Green | |||
white = w32White | |||
yellow = w32Yellow | |||
red = w32Red | |||
blue = w32Blue | |||
magenta = w32Magenta | |||
cyan = w32Cyan | |||
} | |||
colorMap = map[string]string{ | |||
//by color | |||
"green": green, | |||
"white": white, | |||
"yellow": yellow, | |||
"red": red, | |||
//by method | |||
"GET": blue, | |||
"POST": cyan, | |||
"PUT": yellow, | |||
"DELETE": red, | |||
"PATCH": green, | |||
"HEAD": magenta, | |||
"OPTIONS": white, | |||
} | |||
} | |||
// ColorByStatus return color by http code | |||
// 2xx return Green | |||
// 3xx return White | |||
// 4xx return Yellow | |||
// 5xx return Red | |||
func ColorByStatus(code int) string { | |||
once.Do(initColor) | |||
switch { | |||
case code >= 200 && code < 300: | |||
return colorMap["green"] | |||
case code >= 300 && code < 400: | |||
return colorMap["white"] | |||
case code >= 400 && code < 500: | |||
return colorMap["yellow"] | |||
default: | |||
return colorMap["red"] | |||
} | |||
} | |||
// ColorByMethod return color by http code | |||
func ColorByMethod(method string) string { | |||
once.Do(initColor) | |||
if c := colorMap[method]; c != "" { | |||
return c | |||
} | |||
return reset | |||
} | |||
// ResetColor return reset color | |||
func ResetColor() string { | |||
return reset | |||
} |
@@ -0,0 +1,57 @@ | |||
// Copyright 2016 beego Author. All Rights Reserved. | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package logs | |||
import ( | |||
"testing" | |||
"time" | |||
) | |||
func TestFormatHeader_0(t *testing.T) { | |||
tm := time.Now() | |||
if tm.Year() >= 2100 { | |||
t.FailNow() | |||
} | |||
dur := time.Second | |||
for { | |||
if tm.Year() >= 2100 { | |||
break | |||
} | |||
h, _, _ := formatTimeHeader(tm) | |||
if tm.Format("2006/01/02 15:04:05.000 ") != string(h) { | |||
t.Log(tm) | |||
t.FailNow() | |||
} | |||
tm = tm.Add(dur) | |||
dur *= 2 | |||
} | |||
} | |||
func TestFormatHeader_1(t *testing.T) { | |||
tm := time.Now() | |||
year := tm.Year() | |||
dur := time.Second | |||
for { | |||
if tm.Year() >= year+1 { | |||
break | |||
} | |||
h, _, _ := formatTimeHeader(tm) | |||
if tm.Format("2006/01/02 15:04:05.000 ") != string(h) { | |||
t.Log(tm) | |||
t.FailNow() | |||
} | |||
tm = tm.Add(dur) | |||
} | |||
} |
@@ -0,0 +1,119 @@ | |||
// Copyright 2014 beego Author. All Rights Reserved. | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package logs | |||
import ( | |||
"encoding/json" | |||
"time" | |||
) | |||
// A filesLogWriter manages several fileLogWriter | |||
// filesLogWriter will write logs to the file in json configuration and write the same level log to correspond file | |||
// means if the file name in configuration is project.log filesLogWriter will create project.error.log/project.debug.log | |||
// and write the error-level logs to project.error.log and write the debug-level logs to project.debug.log | |||
// the rotate attribute also acts like fileLogWriter | |||
type multiFileLogWriter struct { | |||
writers [LevelDebug + 1 + 1]*fileLogWriter // the last one for fullLogWriter | |||
fullLogWriter *fileLogWriter | |||
Separate []string `json:"separate"` | |||
} | |||
var levelNames = [...]string{"emergency", "alert", "critical", "error", "warning", "notice", "info", "debug"} | |||
// Init file logger with json config. | |||
// jsonConfig like: | |||
// { | |||
// "filename":"logs/beego.log", | |||
// "maxLines":0, | |||
// "maxsize":0, | |||
// "daily":true, | |||
// "maxDays":15, | |||
// "rotate":true, | |||
// "perm":0600, | |||
// "separate":["emergency", "alert", "critical", "error", "warning", "notice", "info", "debug"], | |||
// } | |||
func (f *multiFileLogWriter) Init(config string) error { | |||
writer := newFileWriter().(*fileLogWriter) | |||
err := writer.Init(config) | |||
if err != nil { | |||
return err | |||
} | |||
f.fullLogWriter = writer | |||
f.writers[LevelDebug+1] = writer | |||
//unmarshal "separate" field to f.Separate | |||
json.Unmarshal([]byte(config), f) | |||
jsonMap := map[string]interface{}{} | |||
json.Unmarshal([]byte(config), &jsonMap) | |||
for i := LevelEmergency; i < LevelDebug+1; i++ { | |||
for _, v := range f.Separate { | |||
if v == levelNames[i] { | |||
jsonMap["filename"] = f.fullLogWriter.fileNameOnly + "." + levelNames[i] + f.fullLogWriter.suffix | |||
jsonMap["level"] = i | |||
bs, _ := json.Marshal(jsonMap) | |||
writer = newFileWriter().(*fileLogWriter) | |||
err := writer.Init(string(bs)) | |||
if err != nil { | |||
return err | |||
} | |||
f.writers[i] = writer | |||
} | |||
} | |||
} | |||
return nil | |||
} | |||
func (f *multiFileLogWriter) Destroy() { | |||
for i := 0; i < len(f.writers); i++ { | |||
if f.writers[i] != nil { | |||
f.writers[i].Destroy() | |||
} | |||
} | |||
} | |||
func (f *multiFileLogWriter) WriteMsg(when time.Time, msg string, level int) error { | |||
if f.fullLogWriter != nil { | |||
f.fullLogWriter.WriteMsg(when, msg, level) | |||
} | |||
for i := 0; i < len(f.writers)-1; i++ { | |||
if f.writers[i] != nil { | |||
if level == f.writers[i].Level { | |||
f.writers[i].WriteMsg(when, msg, level) | |||
} | |||
} | |||
} | |||
return nil | |||
} | |||
func (f *multiFileLogWriter) Flush() { | |||
for i := 0; i < len(f.writers); i++ { | |||
if f.writers[i] != nil { | |||
f.writers[i].Flush() | |||
} | |||
} | |||
} | |||
// newFilesWriter create a FileLogWriter returning as LoggerInterface. | |||
func newFilesWriter() Logger { | |||
return &multiFileLogWriter{} | |||
} | |||
func init() { | |||
Register(AdapterMultiFile, newFilesWriter) | |||
} |
@@ -0,0 +1,78 @@ | |||
// Copyright 2014 beego Author. All Rights Reserved. | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package logs | |||
import ( | |||
"bufio" | |||
"os" | |||
"strconv" | |||
"strings" | |||
"testing" | |||
) | |||
func TestFiles_1(t *testing.T) { | |||
log := NewLogger(10000) | |||
log.SetLogger("multifile", `{"filename":"test.log","separate":["emergency", "alert", "critical", "error", "warning", "notice", "info", "debug"]}`) | |||
log.Debug("debug") | |||
log.Informational("info") | |||
log.Notice("notice") | |||
log.Warning("warning") | |||
log.Error("error") | |||
log.Alert("alert") | |||
log.Critical("critical") | |||
log.Emergency("emergency") | |||
fns := []string{""} | |||
fns = append(fns, levelNames[0:]...) | |||
name := "test" | |||
suffix := ".log" | |||
for _, fn := range fns { | |||
file := name + suffix | |||
if fn != "" { | |||
file = name + "." + fn + suffix | |||
} | |||
f, err := os.Open(file) | |||
if err != nil { | |||
t.Fatal(err) | |||
} | |||
b := bufio.NewReader(f) | |||
lineNum := 0 | |||
lastLine := "" | |||
for { | |||
line, _, err := b.ReadLine() | |||
if err != nil { | |||
break | |||
} | |||
if len(line) > 0 { | |||
lastLine = string(line) | |||
lineNum++ | |||
} | |||
} | |||
var expected = 1 | |||
if fn == "" { | |||
expected = LevelDebug + 1 | |||
} | |||
if lineNum != expected { | |||
t.Fatal(file, "has", lineNum, "lines not "+strconv.Itoa(expected)+" lines") | |||
} | |||
if lineNum == 1 { | |||
if !strings.Contains(lastLine, fn) { | |||
t.Fatal(file + " " + lastLine + " not contains the log msg " + fn) | |||
} | |||
} | |||
os.Remove(file) | |||
} | |||
} |
@@ -0,0 +1,60 @@ | |||
package logs | |||
import ( | |||
"encoding/json" | |||
"fmt" | |||
"net/http" | |||
"net/url" | |||
"time" | |||
) | |||
// SLACKWriter implements beego LoggerInterface and is used to send jiaoliao webhook | |||
type SLACKWriter struct { | |||
WebhookURL string `json:"webhookurl"` | |||
Level int `json:"level"` | |||
} | |||
// newSLACKWriter create jiaoliao writer. | |||
func newSLACKWriter() Logger { | |||
return &SLACKWriter{Level: LevelTrace} | |||
} | |||
// Init SLACKWriter with json config string | |||
func (s *SLACKWriter) Init(jsonconfig string) error { | |||
return json.Unmarshal([]byte(jsonconfig), s) | |||
} | |||
// WriteMsg write message in smtp writer. | |||
// it will send an email with subject and only this message. | |||
func (s *SLACKWriter) WriteMsg(when time.Time, msg string, level int) error { | |||
if level > s.Level { | |||
return nil | |||
} | |||
text := fmt.Sprintf("{\"text\": \"%s %s\"}", when.Format("2006-01-02 15:04:05"), msg) | |||
form := url.Values{} | |||
form.Add("payload", text) | |||
resp, err := http.PostForm(s.WebhookURL, form) | |||
if err != nil { | |||
return err | |||
} | |||
defer resp.Body.Close() | |||
if resp.StatusCode != http.StatusOK { | |||
return fmt.Errorf("Post webhook failed %s %d", resp.Status, resp.StatusCode) | |||
} | |||
return nil | |||
} | |||
// Flush implementing method. empty. | |||
func (s *SLACKWriter) Flush() { | |||
} | |||
// Destroy implementing method. empty. | |||
func (s *SLACKWriter) Destroy() { | |||
} | |||
func init() { | |||
Register(AdapterSlack, newSLACKWriter) | |||
} |
@@ -0,0 +1,149 @@ | |||
// Copyright 2014 beego Author. All Rights Reserved. | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package logs | |||
import ( | |||
"crypto/tls" | |||
"encoding/json" | |||
"fmt" | |||
"net" | |||
"net/smtp" | |||
"strings" | |||
"time" | |||
) | |||
// SMTPWriter implements LoggerInterface and is used to send emails via given SMTP-server. | |||
type SMTPWriter struct { | |||
Username string `json:"username"` | |||
Password string `json:"password"` | |||
Host string `json:"host"` | |||
Subject string `json:"subject"` | |||
FromAddress string `json:"fromAddress"` | |||
RecipientAddresses []string `json:"sendTos"` | |||
Level int `json:"level"` | |||
} | |||
// NewSMTPWriter create smtp writer. | |||
func newSMTPWriter() Logger { | |||
return &SMTPWriter{Level: LevelTrace} | |||
} | |||
// Init smtp writer with json config. | |||
// config like: | |||
// { | |||
// "username":"example@gmail.com", | |||
// "password:"password", | |||
// "host":"smtp.gmail.com:465", | |||
// "subject":"email title", | |||
// "fromAddress":"from@example.com", | |||
// "sendTos":["email1","email2"], | |||
// "level":LevelError | |||
// } | |||
func (s *SMTPWriter) Init(jsonconfig string) error { | |||
return json.Unmarshal([]byte(jsonconfig), s) | |||
} | |||
func (s *SMTPWriter) getSMTPAuth(host string) smtp.Auth { | |||
if len(strings.Trim(s.Username, " ")) == 0 && len(strings.Trim(s.Password, " ")) == 0 { | |||
return nil | |||
} | |||
return smtp.PlainAuth( | |||
"", | |||
s.Username, | |||
s.Password, | |||
host, | |||
) | |||
} | |||
func (s *SMTPWriter) sendMail(hostAddressWithPort string, auth smtp.Auth, fromAddress string, recipients []string, msgContent []byte) error { | |||
client, err := smtp.Dial(hostAddressWithPort) | |||
if err != nil { | |||
return err | |||
} | |||
host, _, _ := net.SplitHostPort(hostAddressWithPort) | |||
tlsConn := &tls.Config{ | |||
InsecureSkipVerify: true, | |||
ServerName: host, | |||
} | |||
if err = client.StartTLS(tlsConn); err != nil { | |||
return err | |||
} | |||
if auth != nil { | |||
if err = client.Auth(auth); err != nil { | |||
return err | |||
} | |||
} | |||
if err = client.Mail(fromAddress); err != nil { | |||
return err | |||
} | |||
for _, rec := range recipients { | |||
if err = client.Rcpt(rec); err != nil { | |||
return err | |||
} | |||
} | |||
w, err := client.Data() | |||
if err != nil { | |||
return err | |||
} | |||
_, err = w.Write(msgContent) | |||
if err != nil { | |||
return err | |||
} | |||
err = w.Close() | |||
if err != nil { | |||
return err | |||
} | |||
return client.Quit() | |||
} | |||
// WriteMsg write message in smtp writer. | |||
// it will send an email with subject and only this message. | |||
func (s *SMTPWriter) WriteMsg(when time.Time, msg string, level int) error { | |||
if level > s.Level { | |||
return nil | |||
} | |||
hp := strings.Split(s.Host, ":") | |||
// Set up authentication information. | |||
auth := s.getSMTPAuth(hp[0]) | |||
// Connect to the server, authenticate, set the sender and recipient, | |||
// and send the email all in one step. | |||
contentType := "Content-Type: text/plain" + "; charset=UTF-8" | |||
mailmsg := []byte("To: " + strings.Join(s.RecipientAddresses, ";") + "\r\nFrom: " + s.FromAddress + "<" + s.FromAddress + | |||
">\r\nSubject: " + s.Subject + "\r\n" + contentType + "\r\n\r\n" + fmt.Sprintf(".%s", when.Format("2006-01-02 15:04:05")) + msg) | |||
return s.sendMail(s.Host, auth, s.FromAddress, s.RecipientAddresses, mailmsg) | |||
} | |||
// Flush implementing method. empty. | |||
func (s *SMTPWriter) Flush() { | |||
} | |||
// Destroy implementing method. empty. | |||
func (s *SMTPWriter) Destroy() { | |||
} | |||
func init() { | |||
Register(AdapterMail, newSMTPWriter) | |||
} |
@@ -0,0 +1,27 @@ | |||
// Copyright 2014 beego Author. All Rights Reserved. | |||
// | |||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||
// you may not use this file except in compliance with the License. | |||
// You may obtain a copy of the License at | |||
// | |||
// http://www.apache.org/licenses/LICENSE-2.0 | |||
// | |||
// Unless required by applicable law or agreed to in writing, software | |||
// distributed under the License is distributed on an "AS IS" BASIS, | |||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
// See the License for the specific language governing permissions and | |||
// limitations under the License. | |||
package logs | |||
import ( | |||
"testing" | |||
"time" | |||
) | |||
func TestSmtp(t *testing.T) { | |||
log := NewLogger(10000) | |||
log.SetLogger("smtp", `{"username":"beegotest@gmail.com","password":"xxxxxxxx","host":"smtp.gmail.com:587","sendTos":["xiemengjun@gmail.com"]}`) | |||
log.Critical("sendmail critical") | |||
time.Sleep(time.Second * 30) | |||
} |