parent
97156c3ad3
commit
b643a816cc
|
@ -168,9 +168,15 @@
|
|||
]
|
||||
revision = "dd2ff4accc098aceecb86b36eaa7829b2a17b1c9"
|
||||
|
||||
[[projects]]
|
||||
name = "gopkg.in/natefinch/lumberjack.v2"
|
||||
packages = ["."]
|
||||
revision = "a96e63847dc3c67d17befa69c303767e2f84e54f"
|
||||
version = "v2.1"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
inputs-digest = "e5ab398cf5b03e4f180453c2386951fa2fa56652e64628eb38d763adf7ec596a"
|
||||
inputs-digest = "32f9b1bb4dd9f1ca13e9daedf85fc6cc9f3a97a023171a32ac7a2144ba9c1956"
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
||||
|
|
|
@ -22,6 +22,10 @@
|
|||
branch = "master"
|
||||
name = "github.com/dchest/safefile"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/facebookgo/pidfile"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/hashicorp/go-immutable-radix"
|
||||
|
@ -59,5 +63,5 @@
|
|||
name = "golang.org/x/crypto"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/facebookgo/pidfile"
|
||||
name = "gopkg.in/natefinch/lumberjack.v2"
|
||||
version = "2.1.0"
|
||||
|
|
|
@ -52,6 +52,9 @@ type Config struct {
|
|||
FallbackResolver string `toml:"fallback_resolver"`
|
||||
IgnoreSystemDNS bool `toml:"ignore_system_dns"`
|
||||
AllWeeklyRanges map[string]WeeklyRangesStr `toml:"schedules"`
|
||||
LogMaxSize int `toml:"log_files_max_size"`
|
||||
LogMaxAge int `toml:"log_files_max_age"`
|
||||
LogMaxBackups int `toml:"log_files_max_backups"`
|
||||
}
|
||||
|
||||
func newConfig() Config {
|
||||
|
@ -75,6 +78,9 @@ func newConfig() Config {
|
|||
MaxClients: 250,
|
||||
FallbackResolver: DefaultFallbackResolver,
|
||||
IgnoreSystemDNS: false,
|
||||
LogMaxSize: 10,
|
||||
LogMaxAge: 7,
|
||||
LogMaxBackups: 1,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -169,6 +175,9 @@ func ConfigLoad(proxy *Proxy, svcFlag *string) error {
|
|||
} else if config.LogFile != nil {
|
||||
dlog.UseLogFile(*config.LogFile)
|
||||
}
|
||||
proxy.logMaxSize = config.LogMaxSize
|
||||
proxy.logMaxAge = config.LogMaxAge
|
||||
proxy.logMaxBackups = config.LogMaxBackups
|
||||
proxy.xTransport.fallbackResolver = config.FallbackResolver
|
||||
if len(config.FallbackResolver) > 0 {
|
||||
proxy.xTransport.ignoreSystemDNS = config.IgnoreSystemDNS
|
||||
|
|
|
@ -5,22 +5,20 @@ import (
|
|||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
"unicode"
|
||||
|
||||
"github.com/hashicorp/go-immutable-radix"
|
||||
"github.com/jedisct1/dlog"
|
||||
"github.com/miekg/dns"
|
||||
"gopkg.in/natefinch/lumberjack.v2"
|
||||
)
|
||||
|
||||
type PluginBlockIP struct {
|
||||
sync.Mutex
|
||||
blockedPrefixes *iradix.Tree
|
||||
blockedIPs map[string]interface{}
|
||||
outFd *os.File
|
||||
logger *lumberjack.Logger
|
||||
format string
|
||||
}
|
||||
|
||||
|
@ -75,13 +73,7 @@ func (plugin *PluginBlockIP) Init(proxy *Proxy) error {
|
|||
if len(proxy.blockIPLogFile) == 0 {
|
||||
return nil
|
||||
}
|
||||
plugin.Lock()
|
||||
defer plugin.Unlock()
|
||||
outFd, err := os.OpenFile(proxy.blockIPLogFile, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
plugin.outFd = outFd
|
||||
plugin.logger = &lumberjack.Logger{LocalTime: true, MaxSize: proxy.logMaxSize, MaxAge: proxy.logMaxAge, MaxBackups: proxy.logMaxBackups, Filename: proxy.blockIPLogFile, Compress: true}
|
||||
plugin.format = proxy.blockIPFormat
|
||||
|
||||
return nil
|
||||
|
@ -126,7 +118,7 @@ func (plugin *PluginBlockIP) Eval(pluginsState *PluginsState, msg *dns.Msg) erro
|
|||
}
|
||||
if reject {
|
||||
pluginsState.action = PluginsActionReject
|
||||
if plugin.outFd != nil {
|
||||
if plugin.logger != nil {
|
||||
questions := msg.Question
|
||||
if len(questions) != 1 {
|
||||
return nil
|
||||
|
@ -153,12 +145,10 @@ func (plugin *PluginBlockIP) Eval(pluginsState *PluginsState, msg *dns.Msg) erro
|
|||
} else {
|
||||
dlog.Fatalf("Unexpected log format: [%s]", plugin.format)
|
||||
}
|
||||
plugin.Lock()
|
||||
if plugin.outFd == nil {
|
||||
if plugin.logger == nil {
|
||||
return errors.New("Log file not initialized")
|
||||
}
|
||||
plugin.outFd.WriteString(line)
|
||||
defer plugin.Unlock()
|
||||
plugin.logger.Write([]byte(line))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -5,17 +5,16 @@ import (
|
|||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
"unicode"
|
||||
|
||||
"github.com/hashicorp/go-immutable-radix"
|
||||
"github.com/jedisct1/dlog"
|
||||
"github.com/miekg/dns"
|
||||
lumberjack "gopkg.in/natefinch/lumberjack.v2"
|
||||
)
|
||||
|
||||
type PluginBlockType int
|
||||
|
@ -29,14 +28,13 @@ const (
|
|||
)
|
||||
|
||||
type PluginBlockName struct {
|
||||
sync.Mutex
|
||||
blockedPrefixes *iradix.Tree
|
||||
blockedSuffixes *iradix.Tree
|
||||
allWeeklyRanges *map[string]WeeklyRanges
|
||||
weeklyRangesIndirect map[string]*WeeklyRanges
|
||||
blockedSubstrings []string
|
||||
blockedPatterns []string
|
||||
outFd *os.File
|
||||
logger *lumberjack.Logger
|
||||
format string
|
||||
}
|
||||
|
||||
|
@ -157,13 +155,7 @@ func (plugin *PluginBlockName) Init(proxy *Proxy) error {
|
|||
if len(proxy.blockNameLogFile) == 0 {
|
||||
return nil
|
||||
}
|
||||
plugin.Lock()
|
||||
defer plugin.Unlock()
|
||||
outFd, err := os.OpenFile(proxy.blockNameLogFile, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
plugin.outFd = outFd
|
||||
plugin.logger = &lumberjack.Logger{LocalTime: true, MaxSize: proxy.logMaxSize, MaxAge: proxy.logMaxAge, MaxBackups: proxy.logMaxBackups, Filename: proxy.blockNameLogFile, Compress: true}
|
||||
plugin.format = proxy.blockNameFormat
|
||||
|
||||
return nil
|
||||
|
@ -236,7 +228,7 @@ func (plugin *PluginBlockName) Eval(pluginsState *PluginsState, msg *dns.Msg) er
|
|||
}
|
||||
if reject {
|
||||
pluginsState.action = PluginsActionReject
|
||||
if plugin.outFd != nil {
|
||||
if plugin.logger != nil {
|
||||
var clientIPStr string
|
||||
if pluginsState.clientProto == "udp" {
|
||||
clientIPStr = (*pluginsState.clientAddr).(*net.UDPAddr).IP.String()
|
||||
|
@ -255,12 +247,10 @@ func (plugin *PluginBlockName) Eval(pluginsState *PluginsState, msg *dns.Msg) er
|
|||
} else {
|
||||
dlog.Fatalf("Unexpected log format: [%s]", plugin.format)
|
||||
}
|
||||
plugin.Lock()
|
||||
if plugin.outFd == nil {
|
||||
if plugin.logger == nil {
|
||||
return errors.New("Log file not initialized")
|
||||
}
|
||||
plugin.outFd.WriteString(line)
|
||||
defer plugin.Unlock()
|
||||
plugin.logger.Write([]byte(line))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -4,18 +4,16 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/jedisct1/dlog"
|
||||
"github.com/miekg/dns"
|
||||
lumberjack "gopkg.in/natefinch/lumberjack.v2"
|
||||
)
|
||||
|
||||
type PluginNxLog struct {
|
||||
sync.Mutex
|
||||
outFd *os.File
|
||||
format string
|
||||
logger *lumberjack.Logger
|
||||
format string
|
||||
}
|
||||
|
||||
func (plugin *PluginNxLog) Name() string {
|
||||
|
@ -27,13 +25,7 @@ func (plugin *PluginNxLog) Description() string {
|
|||
}
|
||||
|
||||
func (plugin *PluginNxLog) Init(proxy *Proxy) error {
|
||||
plugin.Lock()
|
||||
defer plugin.Unlock()
|
||||
outFd, err := os.OpenFile(proxy.nxLogFile, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
plugin.outFd = outFd
|
||||
plugin.logger = &lumberjack.Logger{LocalTime: true, MaxSize: proxy.logMaxSize, MaxAge: proxy.logMaxAge, MaxBackups: proxy.logMaxBackups, Filename: proxy.nxLogFile, Compress: true}
|
||||
plugin.format = proxy.nxLogFormat
|
||||
|
||||
return nil
|
||||
|
@ -81,12 +73,10 @@ func (plugin *PluginNxLog) Eval(pluginsState *PluginsState, msg *dns.Msg) error
|
|||
} else {
|
||||
dlog.Fatalf("Unexpected log format: [%s]", plugin.format)
|
||||
}
|
||||
plugin.Lock()
|
||||
if plugin.outFd == nil {
|
||||
if plugin.logger == nil {
|
||||
return errors.New("Log file not initialized")
|
||||
}
|
||||
plugin.outFd.WriteString(line)
|
||||
defer plugin.Unlock()
|
||||
plugin.logger.Write([]byte(line))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -4,18 +4,16 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/jedisct1/dlog"
|
||||
"github.com/miekg/dns"
|
||||
lumberjack "gopkg.in/natefinch/lumberjack.v2"
|
||||
)
|
||||
|
||||
type PluginQueryLog struct {
|
||||
sync.Mutex
|
||||
outFd *os.File
|
||||
logger *lumberjack.Logger
|
||||
format string
|
||||
ignoredQtypes []string
|
||||
}
|
||||
|
@ -29,13 +27,7 @@ func (plugin *PluginQueryLog) Description() string {
|
|||
}
|
||||
|
||||
func (plugin *PluginQueryLog) Init(proxy *Proxy) error {
|
||||
plugin.Lock()
|
||||
defer plugin.Unlock()
|
||||
outFd, err := os.OpenFile(proxy.queryLogFile, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
plugin.outFd = outFd
|
||||
plugin.logger = &lumberjack.Logger{LocalTime: true, MaxSize: proxy.logMaxSize, MaxAge: proxy.logMaxAge, MaxBackups: proxy.logMaxBackups, Filename: proxy.queryLogFile, Compress: true}
|
||||
plugin.format = proxy.queryLogFormat
|
||||
plugin.ignoredQtypes = proxy.queryLogIgnoredQtypes
|
||||
|
||||
|
@ -88,11 +80,9 @@ func (plugin *PluginQueryLog) Eval(pluginsState *PluginsState, msg *dns.Msg) err
|
|||
} else {
|
||||
dlog.Fatalf("Unexpected log format: [%s]", plugin.format)
|
||||
}
|
||||
plugin.Lock()
|
||||
if plugin.outFd == nil {
|
||||
if plugin.logger == nil {
|
||||
return errors.New("Log file not initialized")
|
||||
}
|
||||
plugin.outFd.WriteString(line)
|
||||
defer plugin.Unlock()
|
||||
plugin.logger.Write([]byte(line))
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -52,6 +52,9 @@ type Proxy struct {
|
|||
maxClients uint32
|
||||
xTransport *XTransport
|
||||
allWeeklyRanges *map[string]WeeklyRanges
|
||||
logMaxSize int
|
||||
logMaxAge int
|
||||
logMaxBackups int
|
||||
}
|
||||
|
||||
func (proxy *Proxy) StartProxy() {
|
||||
|
|
|
@ -0,0 +1,23 @@
|
|||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
*.test
|
|
@ -0,0 +1,21 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2014 Nate Finch
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
|
@ -0,0 +1,174 @@
|
|||
# lumberjack [![GoDoc](https://godoc.org/gopkg.in/natefinch/lumberjack.v2?status.png)](https://godoc.org/gopkg.in/natefinch/lumberjack.v2) [![Build Status](https://drone.io/github.com/natefinch/lumberjack/status.png)](https://drone.io/github.com/natefinch/lumberjack/latest) [![Build status](https://ci.appveyor.com/api/projects/status/00gchpxtg4gkrt5d)](https://ci.appveyor.com/project/natefinch/lumberjack) [![Coverage Status](https://coveralls.io/repos/natefinch/lumberjack/badge.svg?branch=v2.0)](https://coveralls.io/r/natefinch/lumberjack?branch=v2.0)
|
||||
|
||||
### Lumberjack is a Go package for writing logs to rolling files.
|
||||
|
||||
Package lumberjack provides a rolling logger.
|
||||
|
||||
Note that this is v2.0 of lumberjack, and should be imported using gopkg.in
|
||||
thusly:
|
||||
|
||||
import "gopkg.in/natefinch/lumberjack.v2"
|
||||
|
||||
The package name remains simply lumberjack, and the code resides at
|
||||
https://github.com/natefinch/lumberjack under the v2.0 branch.
|
||||
|
||||
Lumberjack is intended to be one part of a logging infrastructure.
|
||||
It is not an all-in-one solution, but instead is a pluggable
|
||||
component at the bottom of the logging stack that simply controls the files
|
||||
to which logs are written.
|
||||
|
||||
Lumberjack plays well with any logging package that can write to an
|
||||
io.Writer, including the standard library's log package.
|
||||
|
||||
Lumberjack assumes that only one process is writing to the output files.
|
||||
Using the same lumberjack configuration from multiple processes on the same
|
||||
machine will result in improper behavior.
|
||||
|
||||
|
||||
**Example**
|
||||
|
||||
To use lumberjack with the standard library's log package, just pass it into the SetOutput function when your application starts.
|
||||
|
||||
Code:
|
||||
|
||||
```go
|
||||
log.SetOutput(&lumberjack.Logger{
|
||||
Filename: "/var/log/myapp/foo.log",
|
||||
MaxSize: 500, // megabytes
|
||||
MaxBackups: 3,
|
||||
MaxAge: 28, //days
|
||||
})
|
||||
```
|
||||
|
||||
|
||||
|
||||
## type Logger
|
||||
``` go
|
||||
type Logger struct {
|
||||
// Filename is the file to write logs to. Backup log files will be retained
|
||||
// in the same directory. It uses <processname>-lumberjack.log in
|
||||
// os.TempDir() if empty.
|
||||
Filename string `json:"filename" yaml:"filename"`
|
||||
|
||||
// MaxSize is the maximum size in megabytes of the log file before it gets
|
||||
// rotated. It defaults to 100 megabytes.
|
||||
MaxSize int `json:"maxsize" yaml:"maxsize"`
|
||||
|
||||
// MaxAge is the maximum number of days to retain old log files based on the
|
||||
// timestamp encoded in their filename. Note that a day is defined as 24
|
||||
// hours and may not exactly correspond to calendar days due to daylight
|
||||
// savings, leap seconds, etc. The default is not to remove old log files
|
||||
// based on age.
|
||||
MaxAge int `json:"maxage" yaml:"maxage"`
|
||||
|
||||
// MaxBackups is the maximum number of old log files to retain. The default
|
||||
// is to retain all old log files (though MaxAge may still cause them to get
|
||||
// deleted.)
|
||||
MaxBackups int `json:"maxbackups" yaml:"maxbackups"`
|
||||
|
||||
// LocalTime determines if the time used for formatting the timestamps in
|
||||
// backup files is the computer's local time. The default is to use UTC
|
||||
// time.
|
||||
LocalTime bool `json:"localtime" yaml:"localtime"`
|
||||
// contains filtered or unexported fields
|
||||
}
|
||||
```
|
||||
Logger is an io.WriteCloser that writes to the specified filename.
|
||||
|
||||
Logger opens or creates the logfile on first Write. If the file exists and
|
||||
is less than MaxSize megabytes, lumberjack will open and append to that file.
|
||||
If the file exists and its size is >= MaxSize megabytes, the file is renamed
|
||||
by putting the current time in a timestamp in the name immediately before the
|
||||
file's extension (or the end of the filename if there's no extension). A new
|
||||
log file is then created using original filename.
|
||||
|
||||
Whenever a write would cause the current log file exceed MaxSize megabytes,
|
||||
the current file is closed, renamed, and a new log file created with the
|
||||
original name. Thus, the filename you give Logger is always the "current" log
|
||||
file.
|
||||
|
||||
Backups use the log file name given to Logger, in the form `name-timestamp.ext`
|
||||
where name is the filename without the extension, timestamp is the time at which
|
||||
the log was rotated formatted with the time.Time format of
|
||||
`2006-01-02T15-04-05.000` and the extension is the original extension. For
|
||||
example, if your Logger.Filename is `/var/log/foo/server.log`, a backup created
|
||||
at 6:30pm on Nov 11 2016 would use the filename
|
||||
`/var/log/foo/server-2016-11-04T18-30-00.000.log`
|
||||
|
||||
### Cleaning Up Old Log Files
|
||||
Whenever a new logfile gets created, old log files may be deleted. The most
|
||||
recent files according to the encoded timestamp will be retained, up to a
|
||||
number equal to MaxBackups (or all of them if MaxBackups is 0). Any files
|
||||
with an encoded timestamp older than MaxAge days are deleted, regardless of
|
||||
MaxBackups. Note that the time encoded in the timestamp is the rotation
|
||||
time, which may differ from the last time that file was written to.
|
||||
|
||||
If MaxBackups and MaxAge are both 0, no old log files will be deleted.
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
### func (\*Logger) Close
|
||||
``` go
|
||||
func (l *Logger) Close() error
|
||||
```
|
||||
Close implements io.Closer, and closes the current logfile.
|
||||
|
||||
|
||||
|
||||
### func (\*Logger) Rotate
|
||||
``` go
|
||||
func (l *Logger) Rotate() error
|
||||
```
|
||||
Rotate causes Logger to close the existing log file and immediately create a
|
||||
new one. This is a helper function for applications that want to initiate
|
||||
rotations outside of the normal rotation rules, such as in response to
|
||||
SIGHUP. After rotating, this initiates a cleanup of old log files according
|
||||
to the normal rules.
|
||||
|
||||
**Example**
|
||||
|
||||
Example of how to rotate in response to SIGHUP.
|
||||
|
||||
Code:
|
||||
|
||||
```go
|
||||
l := &lumberjack.Logger{}
|
||||
log.SetOutput(l)
|
||||
c := make(chan os.Signal, 1)
|
||||
signal.Notify(c, syscall.SIGHUP)
|
||||
|
||||
go func() {
|
||||
for {
|
||||
<-c
|
||||
l.Rotate()
|
||||
}
|
||||
}()
|
||||
```
|
||||
|
||||
### func (\*Logger) Write
|
||||
``` go
|
||||
func (l *Logger) Write(p []byte) (n int, err error)
|
||||
```
|
||||
Write implements io.Writer. If a write would cause the log file to be larger
|
||||
than MaxSize, the file is closed, renamed to include a timestamp of the
|
||||
current time, and a new log file is created using the original log file name.
|
||||
If the length of the write is greater than MaxSize, an error is returned.
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
- - -
|
||||
Generated by [godoc2md](http://godoc.org/github.com/davecheney/godoc2md)
|
|
@ -0,0 +1,11 @@
|
|||
// +build !linux
|
||||
|
||||
package lumberjack
|
||||
|
||||
import (
|
||||
"os"
|
||||
)
|
||||
|
||||
func chown(_ string, _ os.FileInfo) error {
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,19 @@
|
|||
package lumberjack
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// os_Chown is a var so we can mock it out during tests.
|
||||
var os_Chown = os.Chown
|
||||
|
||||
func chown(name string, info os.FileInfo) error {
|
||||
f, err := os.OpenFile(name, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, info.Mode())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.Close()
|
||||
stat := info.Sys().(*syscall.Stat_t)
|
||||
return os_Chown(name, int(stat.Uid), int(stat.Gid))
|
||||
}
|
|
@ -0,0 +1,18 @@
|
|||
package lumberjack_test
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"gopkg.in/natefinch/lumberjack.v2"
|
||||
)
|
||||
|
||||
// To use lumberjack with the standard library's log package, just pass it into
|
||||
// the SetOutput function when your application starts.
|
||||
func Example() {
|
||||
log.SetOutput(&lumberjack.Logger{
|
||||
Filename: "/var/log/myapp/foo.log",
|
||||
MaxSize: 500, // megabytes
|
||||
MaxBackups: 3,
|
||||
MaxAge: 28, // days
|
||||
})
|
||||
}
|
|
@ -0,0 +1,205 @@
|
|||
// +build linux
|
||||
|
||||
package lumberjack
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestMaintainMode(t *testing.T) {
|
||||
currentTime = fakeTime
|
||||
dir := makeTempDir("TestMaintainMode", t)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
filename := logFile(dir)
|
||||
|
||||
mode := os.FileMode(0600)
|
||||
f, err := os.OpenFile(filename, os.O_CREATE|os.O_RDWR, mode)
|
||||
isNil(err, t)
|
||||
f.Close()
|
||||
|
||||
l := &Logger{
|
||||
Filename: filename,
|
||||
MaxBackups: 1,
|
||||
MaxSize: 100, // megabytes
|
||||
}
|
||||
defer l.Close()
|
||||
b := []byte("boo!")
|
||||
n, err := l.Write(b)
|
||||
isNil(err, t)
|
||||
equals(len(b), n, t)
|
||||
|
||||
newFakeTime()
|
||||
|
||||
err = l.Rotate()
|
||||
isNil(err, t)
|
||||
|
||||
filename2 := backupFile(dir)
|
||||
info, err := os.Stat(filename)
|
||||
isNil(err, t)
|
||||
info2, err := os.Stat(filename2)
|
||||
isNil(err, t)
|
||||
equals(mode, info.Mode(), t)
|
||||
equals(mode, info2.Mode(), t)
|
||||
}
|
||||
|
||||
func TestMaintainOwner(t *testing.T) {
|
||||
fakeFS := newFakeFS()
|
||||
os_Chown = fakeFS.Chown
|
||||
os_Stat = fakeFS.Stat
|
||||
defer func() {
|
||||
os_Chown = os.Chown
|
||||
os_Stat = os.Stat
|
||||
}()
|
||||
currentTime = fakeTime
|
||||
dir := makeTempDir("TestMaintainOwner", t)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
filename := logFile(dir)
|
||||
|
||||
f, err := os.OpenFile(filename, os.O_CREATE|os.O_RDWR, 0644)
|
||||
isNil(err, t)
|
||||
f.Close()
|
||||
|
||||
l := &Logger{
|
||||
Filename: filename,
|
||||
MaxBackups: 1,
|
||||
MaxSize: 100, // megabytes
|
||||
}
|
||||
defer l.Close()
|
||||
b := []byte("boo!")
|
||||
n, err := l.Write(b)
|
||||
isNil(err, t)
|
||||
equals(len(b), n, t)
|
||||
|
||||
newFakeTime()
|
||||
|
||||
err = l.Rotate()
|
||||
isNil(err, t)
|
||||
|
||||
equals(555, fakeFS.files[filename].uid, t)
|
||||
equals(666, fakeFS.files[filename].gid, t)
|
||||
}
|
||||
|
||||
func TestCompressMaintainMode(t *testing.T) {
|
||||
currentTime = fakeTime
|
||||
|
||||
dir := makeTempDir("TestCompressMaintainMode", t)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
filename := logFile(dir)
|
||||
|
||||
mode := os.FileMode(0600)
|
||||
f, err := os.OpenFile(filename, os.O_CREATE|os.O_RDWR, mode)
|
||||
isNil(err, t)
|
||||
f.Close()
|
||||
|
||||
l := &Logger{
|
||||
Compress: true,
|
||||
Filename: filename,
|
||||
MaxBackups: 1,
|
||||
MaxSize: 100, // megabytes
|
||||
}
|
||||
defer l.Close()
|
||||
b := []byte("boo!")
|
||||
n, err := l.Write(b)
|
||||
isNil(err, t)
|
||||
equals(len(b), n, t)
|
||||
|
||||
newFakeTime()
|
||||
|
||||
err = l.Rotate()
|
||||
isNil(err, t)
|
||||
|
||||
// we need to wait a little bit since the files get compressed on a different
|
||||
// goroutine.
|
||||
<-time.After(10 * time.Millisecond)
|
||||
|
||||
// a compressed version of the log file should now exist with the correct
|
||||
// mode.
|
||||
filename2 := backupFile(dir)
|
||||
info, err := os.Stat(filename)
|
||||
isNil(err, t)
|
||||
info2, err := os.Stat(filename2+compressSuffix)
|
||||
isNil(err, t)
|
||||
equals(mode, info.Mode(), t)
|
||||
equals(mode, info2.Mode(), t)
|
||||
}
|
||||
|
||||
func TestCompressMaintainOwner(t *testing.T) {
|
||||
fakeFS := newFakeFS()
|
||||
os_Chown = fakeFS.Chown
|
||||
os_Stat = fakeFS.Stat
|
||||
defer func() {
|
||||
os_Chown = os.Chown
|
||||
os_Stat = os.Stat
|
||||
}()
|
||||
currentTime = fakeTime
|
||||
dir := makeTempDir("TestCompressMaintainOwner", t)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
filename := logFile(dir)
|
||||
|
||||
f, err := os.OpenFile(filename, os.O_CREATE|os.O_RDWR, 0644)
|
||||
isNil(err, t)
|
||||
f.Close()
|
||||
|
||||
l := &Logger{
|
||||
Compress: true,
|
||||
Filename: filename,
|
||||
MaxBackups: 1,
|
||||
MaxSize: 100, // megabytes
|
||||
}
|
||||
defer l.Close()
|
||||
b := []byte("boo!")
|
||||
n, err := l.Write(b)
|
||||
isNil(err, t)
|
||||
equals(len(b), n, t)
|
||||
|
||||
newFakeTime()
|
||||
|
||||
err = l.Rotate()
|
||||
isNil(err, t)
|
||||
|
||||
// we need to wait a little bit since the files get compressed on a different
|
||||
// goroutine.
|
||||
<-time.After(10 * time.Millisecond)
|
||||
|
||||
// a compressed version of the log file should now exist with the correct
|
||||
// owner.
|
||||
filename2 := backupFile(dir)
|
||||
equals(555, fakeFS.files[filename2+compressSuffix].uid, t)
|
||||
equals(666, fakeFS.files[filename2+compressSuffix].gid, t)
|
||||
}
|
||||
|
||||
type fakeFile struct {
|
||||
uid int
|
||||
gid int
|
||||
}
|
||||
|
||||
type fakeFS struct {
|
||||
files map[string]fakeFile
|
||||
}
|
||||
|
||||
func newFakeFS() *fakeFS {
|
||||
return &fakeFS{files: make(map[string]fakeFile)}
|
||||
}
|
||||
|
||||
func (fs *fakeFS) Chown(name string, uid, gid int) error {
|
||||
fs.files[name] = fakeFile{uid: uid, gid: gid}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fs *fakeFS) Stat(name string) (os.FileInfo, error) {
|
||||
info, err := os.Stat(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stat := info.Sys().(*syscall.Stat_t)
|
||||
stat.Uid = 555
|
||||
stat.Gid = 666
|
||||
return info, nil
|
||||
}
|
|
@ -0,0 +1,541 @@
|
|||
// Package lumberjack provides a rolling logger.
|
||||
//
|
||||
// Note that this is v2.0 of lumberjack, and should be imported using gopkg.in
|
||||
// thusly:
|
||||
//
|
||||
// import "gopkg.in/natefinch/lumberjack.v2"
|
||||
//
|
||||
// The package name remains simply lumberjack, and the code resides at
|
||||
// https://github.com/natefinch/lumberjack under the v2.0 branch.
|
||||
//
|
||||
// Lumberjack is intended to be one part of a logging infrastructure.
|
||||
// It is not an all-in-one solution, but instead is a pluggable
|
||||
// component at the bottom of the logging stack that simply controls the files
|
||||
// to which logs are written.
|
||||
//
|
||||
// Lumberjack plays well with any logging package that can write to an
|
||||
// io.Writer, including the standard library's log package.
|
||||
//
|
||||
// Lumberjack assumes that only one process is writing to the output files.
|
||||
// Using the same lumberjack configuration from multiple processes on the same
|
||||
// machine will result in improper behavior.
|
||||
package lumberjack
|
||||
|
||||
import (
|
||||
"compress/gzip"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
backupTimeFormat = "2006-01-02T15-04-05.000"
|
||||
compressSuffix = ".gz"
|
||||
defaultMaxSize = 100
|
||||
)
|
||||
|
||||
// ensure we always implement io.WriteCloser
|
||||
var _ io.WriteCloser = (*Logger)(nil)
|
||||
|
||||
// Logger is an io.WriteCloser that writes to the specified filename.
|
||||
//
|
||||
// Logger opens or creates the logfile on first Write. If the file exists and
|
||||
// is less than MaxSize megabytes, lumberjack will open and append to that file.
|
||||
// If the file exists and its size is >= MaxSize megabytes, the file is renamed
|
||||
// by putting the current time in a timestamp in the name immediately before the
|
||||
// file's extension (or the end of the filename if there's no extension). A new
|
||||
// log file is then created using original filename.
|
||||
//
|
||||
// Whenever a write would cause the current log file exceed MaxSize megabytes,
|
||||
// the current file is closed, renamed, and a new log file created with the
|
||||
// original name. Thus, the filename you give Logger is always the "current" log
|
||||
// file.
|
||||
//
|
||||
// Backups use the log file name given to Logger, in the form
|
||||
// `name-timestamp.ext` where name is the filename without the extension,
|
||||
// timestamp is the time at which the log was rotated formatted with the
|
||||
// time.Time format of `2006-01-02T15-04-05.000` and the extension is the
|
||||
// original extension. For example, if your Logger.Filename is
|
||||
// `/var/log/foo/server.log`, a backup created at 6:30pm on Nov 11 2016 would
|
||||
// use the filename `/var/log/foo/server-2016-11-04T18-30-00.000.log`
|
||||
//
|
||||
// Cleaning Up Old Log Files
|
||||
//
|
||||
// Whenever a new logfile gets created, old log files may be deleted. The most
|
||||
// recent files according to the encoded timestamp will be retained, up to a
|
||||
// number equal to MaxBackups (or all of them if MaxBackups is 0). Any files
|
||||
// with an encoded timestamp older than MaxAge days are deleted, regardless of
|
||||
// MaxBackups. Note that the time encoded in the timestamp is the rotation
|
||||
// time, which may differ from the last time that file was written to.
|
||||
//
|
||||
// If MaxBackups and MaxAge are both 0, no old log files will be deleted.
|
||||
type Logger struct {
|
||||
// Filename is the file to write logs to. Backup log files will be retained
|
||||
// in the same directory. It uses <processname>-lumberjack.log in
|
||||
// os.TempDir() if empty.
|
||||
Filename string `json:"filename" yaml:"filename"`
|
||||
|
||||
// MaxSize is the maximum size in megabytes of the log file before it gets
|
||||
// rotated. It defaults to 100 megabytes.
|
||||
MaxSize int `json:"maxsize" yaml:"maxsize"`
|
||||
|
||||
// MaxAge is the maximum number of days to retain old log files based on the
|
||||
// timestamp encoded in their filename. Note that a day is defined as 24
|
||||
// hours and may not exactly correspond to calendar days due to daylight
|
||||
// savings, leap seconds, etc. The default is not to remove old log files
|
||||
// based on age.
|
||||
MaxAge int `json:"maxage" yaml:"maxage"`
|
||||
|
||||
// MaxBackups is the maximum number of old log files to retain. The default
|
||||
// is to retain all old log files (though MaxAge may still cause them to get
|
||||
// deleted.)
|
||||
MaxBackups int `json:"maxbackups" yaml:"maxbackups"`
|
||||
|
||||
// LocalTime determines if the time used for formatting the timestamps in
|
||||
// backup files is the computer's local time. The default is to use UTC
|
||||
// time.
|
||||
LocalTime bool `json:"localtime" yaml:"localtime"`
|
||||
|
||||
// Compress determines if the rotated log files should be compressed
|
||||
// using gzip.
|
||||
Compress bool `json:"compress" yaml:"compress"`
|
||||
|
||||
size int64
|
||||
file *os.File
|
||||
mu sync.Mutex
|
||||
|
||||
millCh chan bool
|
||||
startMill sync.Once
|
||||
}
|
||||
|
||||
var (
|
||||
// currentTime exists so it can be mocked out by tests.
|
||||
currentTime = time.Now
|
||||
|
||||
// os_Stat exists so it can be mocked out by tests.
|
||||
os_Stat = os.Stat
|
||||
|
||||
// megabyte is the conversion factor between MaxSize and bytes. It is a
|
||||
// variable so tests can mock it out and not need to write megabytes of data
|
||||
// to disk.
|
||||
megabyte = 1024 * 1024
|
||||
)
|
||||
|
||||
// Write implements io.Writer. If a write would cause the log file to be larger
|
||||
// than MaxSize, the file is closed, renamed to include a timestamp of the
|
||||
// current time, and a new log file is created using the original log file name.
|
||||
// If the length of the write is greater than MaxSize, an error is returned.
|
||||
func (l *Logger) Write(p []byte) (n int, err error) {
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
|
||||
writeLen := int64(len(p))
|
||||
if writeLen > l.max() {
|
||||
return 0, fmt.Errorf(
|
||||
"write length %d exceeds maximum file size %d", writeLen, l.max(),
|
||||
)
|
||||
}
|
||||
|
||||
if l.file == nil {
|
||||
if err = l.openExistingOrNew(len(p)); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
if l.size+writeLen > l.max() {
|
||||
if err := l.rotate(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
n, err = l.file.Write(p)
|
||||
l.size += int64(n)
|
||||
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Close implements io.Closer, and closes the current logfile.
|
||||
func (l *Logger) Close() error {
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
return l.close()
|
||||
}
|
||||
|
||||
// close closes the file if it is open.
|
||||
func (l *Logger) close() error {
|
||||
if l.file == nil {
|
||||
return nil
|
||||
}
|
||||
err := l.file.Close()
|
||||
l.file = nil
|
||||
return err
|
||||
}
|
||||
|
||||
// Rotate causes Logger to close the existing log file and immediately create a
|
||||
// new one. This is a helper function for applications that want to initiate
|
||||
// rotations outside of the normal rotation rules, such as in response to
|
||||
// SIGHUP. After rotating, this initiates compression and removal of old log
|
||||
// files according to the configuration.
|
||||
func (l *Logger) Rotate() error {
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
return l.rotate()
|
||||
}
|
||||
|
||||
// rotate closes the current file, moves it aside with a timestamp in the name,
|
||||
// (if it exists), opens a new file with the original filename, and then runs
|
||||
// post-rotation processing and removal.
|
||||
func (l *Logger) rotate() error {
|
||||
if err := l.close(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := l.openNew(); err != nil {
|
||||
return err
|
||||
}
|
||||
l.mill()
|
||||
return nil
|
||||
}
|
||||
|
||||
// openNew opens a new log file for writing, moving any old log file out of the
|
||||
// way. This methods assumes the file has already been closed.
|
||||
func (l *Logger) openNew() error {
|
||||
err := os.MkdirAll(l.dir(), 0744)
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't make directories for new logfile: %s", err)
|
||||
}
|
||||
|
||||
name := l.filename()
|
||||
mode := os.FileMode(0644)
|
||||
info, err := os_Stat(name)
|
||||
if err == nil {
|
||||
// Copy the mode off the old logfile.
|
||||
mode = info.Mode()
|
||||
// move the existing file
|
||||
newname := backupName(name, l.LocalTime)
|
||||
if err := os.Rename(name, newname); err != nil {
|
||||
return fmt.Errorf("can't rename log file: %s", err)
|
||||
}
|
||||
|
||||
// this is a no-op anywhere but linux
|
||||
if err := chown(name, info); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// we use truncate here because this should only get called when we've moved
|
||||
// the file ourselves. if someone else creates the file in the meantime,
|
||||
// just wipe out the contents.
|
||||
f, err := os.OpenFile(name, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, mode)
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't open new logfile: %s", err)
|
||||
}
|
||||
l.file = f
|
||||
l.size = 0
|
||||
return nil
|
||||
}
|
||||
|
||||
// backupName creates a new filename from the given name, inserting a timestamp
|
||||
// between the filename and the extension, using the local time if requested
|
||||
// (otherwise UTC).
|
||||
func backupName(name string, local bool) string {
|
||||
dir := filepath.Dir(name)
|
||||
filename := filepath.Base(name)
|
||||
ext := filepath.Ext(filename)
|
||||
prefix := filename[:len(filename)-len(ext)]
|
||||
t := currentTime()
|
||||
if !local {
|
||||
t = t.UTC()
|
||||
}
|
||||
|
||||
timestamp := t.Format(backupTimeFormat)
|
||||
return filepath.Join(dir, fmt.Sprintf("%s-%s%s", prefix, timestamp, ext))
|
||||
}
|
||||
|
||||
// openExistingOrNew opens the logfile if it exists and if the current write
|
||||
// would not put it over MaxSize. If there is no such file or the write would
|
||||
// put it over the MaxSize, a new file is created.
|
||||
func (l *Logger) openExistingOrNew(writeLen int) error {
|
||||
l.mill()
|
||||
|
||||
filename := l.filename()
|
||||
info, err := os_Stat(filename)
|
||||
if os.IsNotExist(err) {
|
||||
return l.openNew()
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting log file info: %s", err)
|
||||
}
|
||||
|
||||
if info.Size()+int64(writeLen) >= l.max() {
|
||||
return l.rotate()
|
||||
}
|
||||
|
||||
file, err := os.OpenFile(filename, os.O_APPEND|os.O_WRONLY, 0644)
|
||||
if err != nil {
|
||||
// if we fail to open the old log file for some reason, just ignore
|
||||
// it and open a new log file.
|
||||
return l.openNew()
|
||||
}
|
||||
l.file = file
|
||||
l.size = info.Size()
|
||||
return nil
|
||||
}
|
||||
|
||||
// genFilename generates the name of the logfile from the current time.
|
||||
func (l *Logger) filename() string {
|
||||
if l.Filename != "" {
|
||||
return l.Filename
|
||||
}
|
||||
name := filepath.Base(os.Args[0]) + "-lumberjack.log"
|
||||
return filepath.Join(os.TempDir(), name)
|
||||
}
|
||||
|
||||
// millRunOnce performs compression and removal of stale log files.
|
||||
// Log files are compressed if enabled via configuration and old log
|
||||
// files are removed, keeping at most l.MaxBackups files, as long as
|
||||
// none of them are older than MaxAge.
|
||||
func (l *Logger) millRunOnce() error {
|
||||
if l.MaxBackups == 0 && l.MaxAge == 0 && !l.Compress {
|
||||
return nil
|
||||
}
|
||||
|
||||
files, err := l.oldLogFiles()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var compress, remove []logInfo
|
||||
|
||||
if l.MaxBackups > 0 && l.MaxBackups < len(files) {
|
||||
preserved := make(map[string]bool)
|
||||
var remaining []logInfo
|
||||
for _, f := range files {
|
||||
// Only count the uncompressed log file or the
|
||||
// compressed log file, not both.
|
||||
fn := f.Name()
|
||||
if strings.HasSuffix(fn, compressSuffix) {
|
||||
fn = fn[:len(fn)-len(compressSuffix)]
|
||||
}
|
||||
preserved[fn] = true
|
||||
|
||||
if len(preserved) > l.MaxBackups {
|
||||
remove = append(remove, f)
|
||||
} else {
|
||||
remaining = append(remaining, f)
|
||||
}
|
||||
}
|
||||
files = remaining
|
||||
}
|
||||
if l.MaxAge > 0 {
|
||||
diff := time.Duration(int64(24*time.Hour) * int64(l.MaxAge))
|
||||
cutoff := currentTime().Add(-1 * diff)
|
||||
|
||||
var remaining []logInfo
|
||||
for _, f := range files {
|
||||
if f.timestamp.Before(cutoff) {
|
||||
remove = append(remove, f)
|
||||
} else {
|
||||
remaining = append(remaining, f)
|
||||
}
|
||||
}
|
||||
files = remaining
|
||||
}
|
||||
|
||||
if l.Compress {
|
||||
for _, f := range files {
|
||||
if !strings.HasSuffix(f.Name(), compressSuffix) {
|
||||
compress = append(compress, f)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, f := range remove {
|
||||
errRemove := os.Remove(filepath.Join(l.dir(), f.Name()))
|
||||
if err == nil && errRemove != nil {
|
||||
err = errRemove
|
||||
}
|
||||
}
|
||||
for _, f := range compress {
|
||||
fn := filepath.Join(l.dir(), f.Name())
|
||||
errCompress := compressLogFile(fn, fn+compressSuffix)
|
||||
if err == nil && errCompress != nil {
|
||||
err = errCompress
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// millRun runs in a goroutine to manage post-rotation compression and removal
|
||||
// of old log files.
|
||||
func (l *Logger) millRun() {
|
||||
for _ = range l.millCh {
|
||||
// what am I going to do, log this?
|
||||
_ = l.millRunOnce()
|
||||
}
|
||||
}
|
||||
|
||||
// mill performs post-rotation compression and removal of stale log files,
|
||||
// starting the mill goroutine if necessary.
|
||||
func (l *Logger) mill() {
|
||||
l.startMill.Do(func() {
|
||||
l.millCh = make(chan bool, 1)
|
||||
go l.millRun()
|
||||
})
|
||||
select {
|
||||
case l.millCh <- true:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
// oldLogFiles returns the list of backup log files stored in the same
|
||||
// directory as the current log file, sorted by ModTime
|
||||
func (l *Logger) oldLogFiles() ([]logInfo, error) {
|
||||
files, err := ioutil.ReadDir(l.dir())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't read log file directory: %s", err)
|
||||
}
|
||||
logFiles := []logInfo{}
|
||||
|
||||
prefix, ext := l.prefixAndExt()
|
||||
|
||||
for _, f := range files {
|
||||
if f.IsDir() {
|
||||
continue
|
||||
}
|
||||
if t, err := l.timeFromName(f.Name(), prefix, ext); err == nil {
|
||||
logFiles = append(logFiles, logInfo{t, f})
|
||||
continue
|
||||
}
|
||||
if t, err := l.timeFromName(f.Name(), prefix, ext+compressSuffix); err == nil {
|
||||
logFiles = append(logFiles, logInfo{t, f})
|
||||
continue
|
||||
}
|
||||
// error parsing means that the suffix at the end was not generated
|
||||
// by lumberjack, and therefore it's not a backup file.
|
||||
}
|
||||
|
||||
sort.Sort(byFormatTime(logFiles))
|
||||
|
||||
return logFiles, nil
|
||||
}
|
||||
|
||||
// timeFromName extracts the formatted time from the filename by stripping off
|
||||
// the filename's prefix and extension. This prevents someone's filename from
|
||||
// confusing time.parse.
|
||||
func (l *Logger) timeFromName(filename, prefix, ext string) (time.Time, error) {
|
||||
if !strings.HasPrefix(filename, prefix) {
|
||||
return time.Time{}, errors.New("mismatched prefix")
|
||||
}
|
||||
if !strings.HasSuffix(filename, ext) {
|
||||
return time.Time{}, errors.New("mismatched extension")
|
||||
}
|
||||
ts := filename[len(prefix) : len(filename)-len(ext)]
|
||||
return time.Parse(backupTimeFormat, ts)
|
||||
}
|
||||
|
||||
// max returns the maximum size in bytes of log files before rolling.
|
||||
func (l *Logger) max() int64 {
|
||||
if l.MaxSize == 0 {
|
||||
return int64(defaultMaxSize * megabyte)
|
||||
}
|
||||
return int64(l.MaxSize) * int64(megabyte)
|
||||
}
|
||||
|
||||
// dir returns the directory for the current filename.
|
||||
func (l *Logger) dir() string {
|
||||
return filepath.Dir(l.filename())
|
||||
}
|
||||
|
||||
// prefixAndExt returns the filename part and extension part from the Logger's
|
||||
// filename.
|
||||
func (l *Logger) prefixAndExt() (prefix, ext string) {
|
||||
filename := filepath.Base(l.filename())
|
||||
ext = filepath.Ext(filename)
|
||||
prefix = filename[:len(filename)-len(ext)] + "-"
|
||||
return prefix, ext
|
||||
}
|
||||
|
||||
// compressLogFile compresses the given log file, removing the
|
||||
// uncompressed log file if successful.
|
||||
func compressLogFile(src, dst string) (err error) {
|
||||
f, err := os.Open(src)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open log file: %v", err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
fi, err := os_Stat(src)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to stat log file: %v", err)
|
||||
}
|
||||
|
||||
if err := chown(dst, fi); err != nil {
|
||||
return fmt.Errorf("failed to chown compressed log file: %v", err)
|
||||
}
|
||||
|
||||
// If this file already exists, we presume it was created by
|
||||
// a previous attempt to compress the log file.
|
||||
gzf, err := os.OpenFile(dst, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, fi.Mode())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open compressed log file: %v", err)
|
||||
}
|
||||
defer gzf.Close()
|
||||
|
||||
gz := gzip.NewWriter(gzf)
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
os.Remove(dst)
|
||||
err = fmt.Errorf("failed to compress log file: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
if _, err := io.Copy(gz, f); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := gz.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := gzf.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := f.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := os.Remove(src); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// logInfo is a convenience struct to return the filename and its embedded
|
||||
// timestamp.
|
||||
type logInfo struct {
|
||||
timestamp time.Time
|
||||
os.FileInfo
|
||||
}
|
||||
|
||||
// byFormatTime sorts by newest time formatted in the name.
|
||||
type byFormatTime []logInfo
|
||||
|
||||
func (b byFormatTime) Less(i, j int) bool {
|
||||
return b[i].timestamp.After(b[j].timestamp)
|
||||
}
|
||||
|
||||
func (b byFormatTime) Swap(i, j int) {
|
||||
b[i], b[j] = b[j], b[i]
|
||||
}
|
||||
|
||||
func (b byFormatTime) Len() int {
|
||||
return len(b)
|
||||
}
|
|
@ -0,0 +1,816 @@
|
|||
package lumberjack
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/BurntSushi/toml"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
// !!!NOTE!!!
|
||||
//
|
||||
// Running these tests in parallel will almost certainly cause sporadic (or even
|
||||
// regular) failures, because they're all messing with the same global variable
|
||||
// that controls the logic's mocked time.Now. So... don't do that.
|
||||
|
||||
// Since all the tests uses the time to determine filenames etc, we need to
|
||||
// control the wall clock as much as possible, which means having a wall clock
|
||||
// that doesn't change unless we want it to.
|
||||
var fakeCurrentTime = time.Now()
|
||||
|
||||
func fakeTime() time.Time {
|
||||
return fakeCurrentTime
|
||||
}
|
||||
|
||||
func TestNewFile(t *testing.T) {
|
||||
currentTime = fakeTime
|
||||
|
||||
dir := makeTempDir("TestNewFile", t)
|
||||
defer os.RemoveAll(dir)
|
||||
l := &Logger{
|
||||
Filename: logFile(dir),
|
||||
}
|
||||
defer l.Close()
|
||||
b := []byte("boo!")
|
||||
n, err := l.Write(b)
|
||||
isNil(err, t)
|
||||
equals(len(b), n, t)
|
||||
existsWithContent(logFile(dir), b, t)
|
||||
fileCount(dir, 1, t)
|
||||
}
|
||||
|
||||
func TestOpenExisting(t *testing.T) {
|
||||
currentTime = fakeTime
|
||||
dir := makeTempDir("TestOpenExisting", t)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
filename := logFile(dir)
|
||||
data := []byte("foo!")
|
||||
err := ioutil.WriteFile(filename, data, 0644)
|
||||
isNil(err, t)
|
||||
existsWithContent(filename, data, t)
|
||||
|
||||
l := &Logger{
|
||||
Filename: filename,
|
||||
}
|
||||
defer l.Close()
|
||||
b := []byte("boo!")
|
||||
n, err := l.Write(b)
|
||||
isNil(err, t)
|
||||
equals(len(b), n, t)
|
||||
|
||||
// make sure the file got appended
|
||||
existsWithContent(filename, append(data, b...), t)
|
||||
|
||||
// make sure no other files were created
|
||||
fileCount(dir, 1, t)
|
||||
}
|
||||
|
||||
func TestWriteTooLong(t *testing.T) {
|
||||
currentTime = fakeTime
|
||||
megabyte = 1
|
||||
dir := makeTempDir("TestWriteTooLong", t)
|
||||
defer os.RemoveAll(dir)
|
||||
l := &Logger{
|
||||
Filename: logFile(dir),
|
||||
MaxSize: 5,
|
||||
}
|
||||
defer l.Close()
|
||||
b := []byte("booooooooooooooo!")
|
||||
n, err := l.Write(b)
|
||||
notNil(err, t)
|
||||
equals(0, n, t)
|
||||
equals(err.Error(),
|
||||
fmt.Sprintf("write length %d exceeds maximum file size %d", len(b), l.MaxSize), t)
|
||||
_, err = os.Stat(logFile(dir))
|
||||
assert(os.IsNotExist(err), t, "File exists, but should not have been created")
|
||||
}
|
||||
|
||||
func TestMakeLogDir(t *testing.T) {
|
||||
currentTime = fakeTime
|
||||
dir := time.Now().Format("TestMakeLogDir" + backupTimeFormat)
|
||||
dir = filepath.Join(os.TempDir(), dir)
|
||||
defer os.RemoveAll(dir)
|
||||
filename := logFile(dir)
|
||||
l := &Logger{
|
||||
Filename: filename,
|
||||
}
|
||||
defer l.Close()
|
||||
b := []byte("boo!")
|
||||
n, err := l.Write(b)
|
||||
isNil(err, t)
|
||||
equals(len(b), n, t)
|
||||
existsWithContent(logFile(dir), b, t)
|
||||
fileCount(dir, 1, t)
|
||||
}
|
||||
|
||||
func TestDefaultFilename(t *testing.T) {
|
||||
currentTime = fakeTime
|
||||
dir := os.TempDir()
|
||||
filename := filepath.Join(dir, filepath.Base(os.Args[0])+"-lumberjack.log")
|
||||
defer os.Remove(filename)
|
||||
l := &Logger{}
|
||||
defer l.Close()
|
||||
b := []byte("boo!")
|
||||
n, err := l.Write(b)
|
||||
|
||||
isNil(err, t)
|
||||
equals(len(b), n, t)
|
||||
existsWithContent(filename, b, t)
|
||||
}
|
||||
|
||||
func TestAutoRotate(t *testing.T) {
|
||||
currentTime = fakeTime
|
||||
megabyte = 1
|
||||
|
||||
dir := makeTempDir("TestAutoRotate", t)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
filename := logFile(dir)
|
||||
l := &Logger{
|
||||
Filename: filename,
|
||||
MaxSize: 10,
|
||||
}
|
||||
defer l.Close()
|
||||
b := []byte("boo!")
|
||||
n, err := l.Write(b)
|
||||
isNil(err, t)
|
||||
equals(len(b), n, t)
|
||||
|
||||
existsWithContent(filename, b, t)
|
||||
fileCount(dir, 1, t)
|
||||
|
||||
newFakeTime()
|
||||
|
||||
b2 := []byte("foooooo!")
|
||||
n, err = l.Write(b2)
|
||||
isNil(err, t)
|
||||
equals(len(b2), n, t)
|
||||
|
||||
// the old logfile should be moved aside and the main logfile should have
|
||||
// only the last write in it.
|
||||
existsWithContent(filename, b2, t)
|
||||
|
||||
// the backup file will use the current fake time and have the old contents.
|
||||
existsWithContent(backupFile(dir), b, t)
|
||||
|
||||
fileCount(dir, 2, t)
|
||||
}
|
||||
|
||||
func TestFirstWriteRotate(t *testing.T) {
|
||||
currentTime = fakeTime
|
||||
megabyte = 1
|
||||
dir := makeTempDir("TestFirstWriteRotate", t)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
filename := logFile(dir)
|
||||
l := &Logger{
|
||||
Filename: filename,
|
||||
MaxSize: 10,
|
||||
}
|
||||
defer l.Close()
|
||||
|
||||
start := []byte("boooooo!")
|
||||
err := ioutil.WriteFile(filename, start, 0600)
|
||||
isNil(err, t)
|
||||
|
||||
newFakeTime()
|
||||
|
||||
// this would make us rotate
|
||||
b := []byte("fooo!")
|
||||
n, err := l.Write(b)
|
||||
isNil(err, t)
|
||||
equals(len(b), n, t)
|
||||
|
||||
existsWithContent(filename, b, t)
|
||||
existsWithContent(backupFile(dir), start, t)
|
||||
|
||||
fileCount(dir, 2, t)
|
||||
}
|
||||
|
||||
func TestMaxBackups(t *testing.T) {
|
||||
currentTime = fakeTime
|
||||
megabyte = 1
|
||||
dir := makeTempDir("TestMaxBackups", t)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
filename := logFile(dir)
|
||||
l := &Logger{
|
||||
Filename: filename,
|
||||
MaxSize: 10,
|
||||
MaxBackups: 1,
|
||||
}
|
||||
defer l.Close()
|
||||
b := []byte("boo!")
|
||||
n, err := l.Write(b)
|
||||
isNil(err, t)
|
||||
equals(len(b), n, t)
|
||||
|
||||
existsWithContent(filename, b, t)
|
||||
fileCount(dir, 1, t)
|
||||
|
||||
newFakeTime()
|
||||
|
||||
// this will put us over the max
|
||||
b2 := []byte("foooooo!")
|
||||
n, err = l.Write(b2)
|
||||
isNil(err, t)
|
||||
equals(len(b2), n, t)
|
||||
|
||||
// this will use the new fake time
|
||||
secondFilename := backupFile(dir)
|
||||
existsWithContent(secondFilename, b, t)
|
||||
|
||||
// make sure the old file still exists with the same content.
|
||||
existsWithContent(filename, b2, t)
|
||||
|
||||
fileCount(dir, 2, t)
|
||||
|
||||
newFakeTime()
|
||||
|
||||
// this will make us rotate again
|
||||
b3 := []byte("baaaaaar!")
|
||||
n, err = l.Write(b3)
|
||||
isNil(err, t)
|
||||
equals(len(b3), n, t)
|
||||
|
||||
// this will use the new fake time
|
||||
thirdFilename := backupFile(dir)
|
||||
existsWithContent(thirdFilename, b2, t)
|
||||
|
||||
existsWithContent(filename, b3, t)
|
||||
|
||||
// we need to wait a little bit since the files get deleted on a different
|
||||
// goroutine.
|
||||
<-time.After(time.Millisecond * 10)
|
||||
|
||||
// should only have two files in the dir still
|
||||
fileCount(dir, 2, t)
|
||||
|
||||
// second file name should still exist
|
||||
existsWithContent(thirdFilename, b2, t)
|
||||
|
||||
// should have deleted the first backup
|
||||
notExist(secondFilename, t)
|
||||
|
||||
// now test that we don't delete directories or non-logfile files
|
||||
|
||||
newFakeTime()
|
||||
|
||||
// create a file that is close to but different from the logfile name.
|
||||
// It shouldn't get caught by our deletion filters.
|
||||
notlogfile := logFile(dir) + ".foo"
|
||||
err = ioutil.WriteFile(notlogfile, []byte("data"), 0644)
|
||||
isNil(err, t)
|
||||
|
||||
// Make a directory that exactly matches our log file filters... it still
|
||||
// shouldn't get caught by the deletion filter since it's a directory.
|
||||
notlogfiledir := backupFile(dir)
|
||||
err = os.Mkdir(notlogfiledir, 0700)
|
||||
isNil(err, t)
|
||||
|
||||
newFakeTime()
|
||||
|
||||
// this will use the new fake time
|
||||
fourthFilename := backupFile(dir)
|
||||
|
||||
// Create a log file that is/was being compressed - this should
|
||||
// not be counted since both the compressed and the uncompressed
|
||||
// log files still exist.
|
||||
compLogFile := fourthFilename+compressSuffix
|
||||
err = ioutil.WriteFile(compLogFile, []byte("compress"), 0644)
|
||||
isNil(err, t)
|
||||
|
||||
// this will make us rotate again
|
||||
b4 := []byte("baaaaaaz!")
|
||||
n, err = l.Write(b4)
|
||||
isNil(err, t)
|
||||
equals(len(b4), n, t)
|
||||
|
||||
existsWithContent(fourthFilename, b3, t)
|
||||
existsWithContent(fourthFilename+compressSuffix, []byte("compress"), t)
|
||||
|
||||
// we need to wait a little bit since the files get deleted on a different
|
||||
// goroutine.
|
||||
<-time.After(time.Millisecond * 10)
|
||||
|
||||
// We should have four things in the directory now - the 2 log files, the
|
||||
// not log file, and the directory
|
||||
fileCount(dir, 5, t)
|
||||
|
||||
// third file name should still exist
|
||||
existsWithContent(filename, b4, t)
|
||||
|
||||
existsWithContent(fourthFilename, b3, t)
|
||||
|
||||
// should have deleted the first filename
|
||||
notExist(thirdFilename, t)
|
||||
|
||||
// the not-a-logfile should still exist
|
||||
exists(notlogfile, t)
|
||||
|
||||
// the directory
|
||||
exists(notlogfiledir, t)
|
||||
}
|
||||
|
||||
func TestCleanupExistingBackups(t *testing.T) {
|
||||
// test that if we start with more backup files than we're supposed to have
|
||||
// in total, that extra ones get cleaned up when we rotate.
|
||||
|
||||
currentTime = fakeTime
|
||||
megabyte = 1
|
||||
|
||||
dir := makeTempDir("TestCleanupExistingBackups", t)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
// make 3 backup files
|
||||
|
||||
data := []byte("data")
|
||||
backup := backupFile(dir)
|
||||
err := ioutil.WriteFile(backup, data, 0644)
|
||||
isNil(err, t)
|
||||
|
||||
newFakeTime()
|
||||
|
||||
backup = backupFile(dir)
|
||||
err = ioutil.WriteFile(backup+compressSuffix, data, 0644)
|
||||
isNil(err, t)
|
||||
|
||||
newFakeTime()
|
||||
|
||||
backup = backupFile(dir)
|
||||
err = ioutil.WriteFile(backup, data, 0644)
|
||||
isNil(err, t)
|
||||
|
||||
// now create a primary log file with some data
|
||||
filename := logFile(dir)
|
||||
err = ioutil.WriteFile(filename, data, 0644)
|
||||
isNil(err, t)
|
||||
|
||||
l := &Logger{
|
||||
Filename: filename,
|
||||
MaxSize: 10,
|
||||
MaxBackups: 1,
|
||||
}
|
||||
defer l.Close()
|
||||
|
||||
newFakeTime()
|
||||
|
||||
b2 := []byte("foooooo!")
|
||||
n, err := l.Write(b2)
|
||||
isNil(err, t)
|
||||
equals(len(b2), n, t)
|
||||
|
||||
// we need to wait a little bit since the files get deleted on a different
|
||||
// goroutine.
|
||||
<-time.After(time.Millisecond * 10)
|
||||
|
||||
// now we should only have 2 files left - the primary and one backup
|
||||
fileCount(dir, 2, t)
|
||||
}
|
||||
|
||||
func TestMaxAge(t *testing.T) {
|
||||
currentTime = fakeTime
|
||||
megabyte = 1
|
||||
|
||||
dir := makeTempDir("TestMaxAge", t)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
filename := logFile(dir)
|
||||
l := &Logger{
|
||||
Filename: filename,
|
||||
MaxSize: 10,
|
||||
MaxAge: 1,
|
||||
}
|
||||
defer l.Close()
|
||||
b := []byte("boo!")
|
||||
n, err := l.Write(b)
|
||||
isNil(err, t)
|
||||
equals(len(b), n, t)
|
||||
|
||||
existsWithContent(filename, b, t)
|
||||
fileCount(dir, 1, t)
|
||||
|
||||
// two days later
|
||||
newFakeTime()
|
||||
|
||||
b2 := []byte("foooooo!")
|
||||
n, err = l.Write(b2)
|
||||
isNil(err, t)
|
||||
equals(len(b2), n, t)
|
||||
existsWithContent(backupFile(dir), b, t)
|
||||
|
||||
// we need to wait a little bit since the files get deleted on a different
|
||||
// goroutine.
|
||||
<-time.After(10 * time.Millisecond)
|
||||
|
||||
// We should still have 2 log files, since the most recent backup was just
|
||||
// created.
|
||||
fileCount(dir, 2, t)
|
||||
|
||||
existsWithContent(filename, b2, t)
|
||||
|
||||
// we should have deleted the old file due to being too old
|
||||
existsWithContent(backupFile(dir), b, t)
|
||||
|
||||
// two days later
|
||||
newFakeTime()
|
||||
|
||||
b3 := []byte("baaaaar!")
|
||||
n, err = l.Write(b3)
|
||||
isNil(err, t)
|
||||
equals(len(b3), n, t)
|
||||
existsWithContent(backupFile(dir), b2, t)
|
||||
|
||||
// we need to wait a little bit since the files get deleted on a different
|
||||
// goroutine.
|
||||
<-time.After(10 * time.Millisecond)
|
||||
|
||||
// We should have 2 log files - the main log file, and the most recent
|
||||
// backup. The earlier backup is past the cutoff and should be gone.
|
||||
fileCount(dir, 2, t)
|
||||
|
||||
existsWithContent(filename, b3, t)
|
||||
|
||||
// we should have deleted the old file due to being too old
|
||||
existsWithContent(backupFile(dir), b2, t)
|
||||
}
|
||||
|
||||
func TestOldLogFiles(t *testing.T) {
|
||||
currentTime = fakeTime
|
||||
megabyte = 1
|
||||
|
||||
dir := makeTempDir("TestOldLogFiles", t)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
filename := logFile(dir)
|
||||
data := []byte("data")
|
||||
err := ioutil.WriteFile(filename, data, 07)
|
||||
isNil(err, t)
|
||||
|
||||
// This gives us a time with the same precision as the time we get from the
|
||||
// timestamp in the name.
|
||||
t1, err := time.Parse(backupTimeFormat, fakeTime().UTC().Format(backupTimeFormat))
|
||||
isNil(err, t)
|
||||
|
||||
backup := backupFile(dir)
|
||||
err = ioutil.WriteFile(backup, data, 07)
|
||||
isNil(err, t)
|
||||
|
||||
newFakeTime()
|
||||
|
||||
t2, err := time.Parse(backupTimeFormat, fakeTime().UTC().Format(backupTimeFormat))
|
||||
isNil(err, t)
|
||||
|
||||
backup2 := backupFile(dir)
|
||||
err = ioutil.WriteFile(backup2, data, 07)
|
||||
isNil(err, t)
|
||||
|
||||
l := &Logger{Filename: filename}
|
||||
files, err := l.oldLogFiles()
|
||||
isNil(err, t)
|
||||
equals(2, len(files), t)
|
||||
|
||||
// should be sorted by newest file first, which would be t2
|
||||
equals(t2, files[0].timestamp, t)
|
||||
equals(t1, files[1].timestamp, t)
|
||||
}
|
||||
|
||||
func TestTimeFromName(t *testing.T) {
|
||||
l := &Logger{Filename: "/var/log/myfoo/foo.log"}
|
||||
prefix, ext := l.prefixAndExt()
|
||||
|
||||
tests := []struct {
|
||||
filename string
|
||||
want time.Time
|
||||
wantErr bool
|
||||
}{
|
||||
{"foo-2014-05-04T14-44-33.555.log", time.Date(2014, 5, 4, 14, 44, 33, 555000000, time.UTC), false},
|
||||
{"foo-2014-05-04T14-44-33.555", time.Time{}, true},
|
||||
{"2014-05-04T14-44-33.555.log", time.Time{}, true},
|
||||
{"foo.log", time.Time{}, true},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
got, err := l.timeFromName(test.filename, prefix, ext)
|
||||
equals(got, test.want, t)
|
||||
equals(err != nil, test.wantErr, t)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLocalTime(t *testing.T) {
|
||||
currentTime = fakeTime
|
||||
megabyte = 1
|
||||
|
||||
dir := makeTempDir("TestLocalTime", t)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
l := &Logger{
|
||||
Filename: logFile(dir),
|
||||
MaxSize: 10,
|
||||
LocalTime: true,
|
||||
}
|
||||
defer l.Close()
|
||||
b := []byte("boo!")
|
||||
n, err := l.Write(b)
|
||||
isNil(err, t)
|
||||
equals(len(b), n, t)
|
||||
|
||||
b2 := []byte("fooooooo!")
|
||||
n2, err := l.Write(b2)
|
||||
isNil(err, t)
|
||||
equals(len(b2), n2, t)
|
||||
|
||||
existsWithContent(logFile(dir), b2, t)
|
||||
existsWithContent(backupFileLocal(dir), b, t)
|
||||
}
|
||||
|
||||
func TestRotate(t *testing.T) {
|
||||
currentTime = fakeTime
|
||||
dir := makeTempDir("TestRotate", t)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
filename := logFile(dir)
|
||||
|
||||
l := &Logger{
|
||||
Filename: filename,
|
||||
MaxBackups: 1,
|
||||
MaxSize: 100, // megabytes
|
||||
}
|
||||
defer l.Close()
|
||||
b := []byte("boo!")
|
||||
n, err := l.Write(b)
|
||||
isNil(err, t)
|
||||
equals(len(b), n, t)
|
||||
|
||||
existsWithContent(filename, b, t)
|
||||
fileCount(dir, 1, t)
|
||||
|
||||
newFakeTime()
|
||||
|
||||
err = l.Rotate()
|
||||
isNil(err, t)
|
||||
|
||||
// we need to wait a little bit since the files get deleted on a different
|
||||
// goroutine.
|
||||
<-time.After(10 * time.Millisecond)
|
||||
|
||||
filename2 := backupFile(dir)
|
||||
existsWithContent(filename2, b, t)
|
||||
existsWithContent(filename, []byte{}, t)
|
||||
fileCount(dir, 2, t)
|
||||
newFakeTime()
|
||||
|
||||
err = l.Rotate()
|
||||
isNil(err, t)
|
||||
|
||||
// we need to wait a little bit since the files get deleted on a different
|
||||
// goroutine.
|
||||
<-time.After(10 * time.Millisecond)
|
||||
|
||||
filename3 := backupFile(dir)
|
||||
existsWithContent(filename3, []byte{}, t)
|
||||
existsWithContent(filename, []byte{}, t)
|
||||
fileCount(dir, 2, t)
|
||||
|
||||
b2 := []byte("foooooo!")
|
||||
n, err = l.Write(b2)
|
||||
isNil(err, t)
|
||||
equals(len(b2), n, t)
|
||||
|
||||
// this will use the new fake time
|
||||
existsWithContent(filename, b2, t)
|
||||
}
|
||||
|
||||
func TestCompressOnRotate(t *testing.T) {
|
||||
currentTime = fakeTime
|
||||
megabyte = 1
|
||||
|
||||
dir := makeTempDir("TestCompressOnRotate", t)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
filename := logFile(dir)
|
||||
l := &Logger{
|
||||
Compress: true,
|
||||
Filename: filename,
|
||||
MaxSize: 10,
|
||||
}
|
||||
defer l.Close()
|
||||
b := []byte("boo!")
|
||||
n, err := l.Write(b)
|
||||
isNil(err, t)
|
||||
equals(len(b), n, t)
|
||||
|
||||
existsWithContent(filename, b, t)
|
||||
fileCount(dir, 1, t)
|
||||
|
||||
newFakeTime()
|
||||
|
||||
err = l.Rotate()
|
||||
isNil(err, t)
|
||||
|
||||
// the old logfile should be moved aside and the main logfile should have
|
||||
// nothing in it.
|
||||
existsWithContent(filename, []byte{}, t)
|
||||
|
||||
// we need to wait a little bit since the files get compressed on a different
|
||||
// goroutine.
|
||||
<-time.After(10 * time.Millisecond)
|
||||
|
||||
// a compressed version of the log file should now exist and the original
|
||||
// should have been removed.
|
||||
bc := new(bytes.Buffer)
|
||||
gz := gzip.NewWriter(bc)
|
||||
_, err = gz.Write(b)
|
||||
isNil(err, t)
|
||||
err = gz.Close()
|
||||
isNil(err, t)
|
||||
existsWithContent(backupFile(dir)+compressSuffix, bc.Bytes(), t)
|
||||
notExist(backupFile(dir), t)
|
||||
|
||||
fileCount(dir, 2, t)
|
||||
}
|
||||
|
||||
func TestCompressOnResume(t *testing.T) {
|
||||
currentTime = fakeTime
|
||||
megabyte = 1
|
||||
|
||||
dir := makeTempDir("TestCompressOnResume", t)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
filename := logFile(dir)
|
||||
l := &Logger{
|
||||
Compress: true,
|
||||
Filename: filename,
|
||||
MaxSize: 10,
|
||||
}
|
||||
defer l.Close()
|
||||
|
||||
// Create a backup file and empty "compressed" file.
|
||||
filename2 := backupFile(dir)
|
||||
b := []byte("foo!")
|
||||
err := ioutil.WriteFile(filename2, b, 0644)
|
||||
isNil(err, t)
|
||||
err = ioutil.WriteFile(filename2+compressSuffix, []byte{}, 0644)
|
||||
isNil(err, t)
|
||||
|
||||
newFakeTime()
|
||||
|
||||
b2 := []byte("boo!")
|
||||
n, err := l.Write(b2)
|
||||
isNil(err, t)
|
||||
equals(len(b2), n, t)
|
||||
existsWithContent(filename, b2, t)
|
||||
|
||||
// we need to wait a little bit since the files get compressed on a different
|
||||
// goroutine.
|
||||
<-time.After(10 * time.Millisecond)
|
||||
|
||||
// The write should have started the compression - a compressed version of
|
||||
// the log file should now exist and the original should have been removed.
|
||||
bc := new(bytes.Buffer)
|
||||
gz := gzip.NewWriter(bc)
|
||||
_, err = gz.Write(b)
|
||||
isNil(err, t)
|
||||
err = gz.Close()
|
||||
isNil(err, t)
|
||||
existsWithContent(filename2+compressSuffix, bc.Bytes(), t)
|
||||
notExist(filename2, t)
|
||||
|
||||
fileCount(dir, 2, t)
|
||||
}
|
||||
|
||||
func TestJson(t *testing.T) {
|
||||
data := []byte(`
|
||||
{
|
||||
"filename": "foo",
|
||||
"maxsize": 5,
|
||||
"maxage": 10,
|
||||
"maxbackups": 3,
|
||||
"localtime": true,
|
||||
"compress": true
|
||||
}`[1:])
|
||||
|
||||
l := Logger{}
|
||||
err := json.Unmarshal(data, &l)
|
||||
isNil(err, t)
|
||||
equals("foo", l.Filename, t)
|
||||
equals(5, l.MaxSize, t)
|
||||
equals(10, l.MaxAge, t)
|
||||
equals(3, l.MaxBackups, t)
|
||||
equals(true, l.LocalTime, t)
|
||||
equals(true, l.Compress, t)
|
||||
}
|
||||
|
||||
func TestYaml(t *testing.T) {
|
||||
data := []byte(`
|
||||
filename: foo
|
||||
maxsize: 5
|
||||
maxage: 10
|
||||
maxbackups: 3
|
||||
localtime: true
|
||||
compress: true`[1:])
|
||||
|
||||
l := Logger{}
|
||||
err := yaml.Unmarshal(data, &l)
|
||||
isNil(err, t)
|
||||
equals("foo", l.Filename, t)
|
||||
equals(5, l.MaxSize, t)
|
||||
equals(10, l.MaxAge, t)
|
||||
equals(3, l.MaxBackups, t)
|
||||
equals(true, l.LocalTime, t)
|
||||
equals(true, l.Compress, t)
|
||||
}
|
||||
|
||||
func TestToml(t *testing.T) {
|
||||
data := `
|
||||
filename = "foo"
|
||||
maxsize = 5
|
||||
maxage = 10
|
||||
maxbackups = 3
|
||||
localtime = true
|
||||
compress = true`[1:]
|
||||
|
||||
l := Logger{}
|
||||
md, err := toml.Decode(data, &l)
|
||||
isNil(err, t)
|
||||
equals("foo", l.Filename, t)
|
||||
equals(5, l.MaxSize, t)
|
||||
equals(10, l.MaxAge, t)
|
||||
equals(3, l.MaxBackups, t)
|
||||
equals(true, l.LocalTime, t)
|
||||
equals(true, l.Compress, t)
|
||||
equals(0, len(md.Undecoded()), t)
|
||||
}
|
||||
|
||||
// makeTempDir creates a file with a semi-unique name in the OS temp directory.
|
||||
// It should be based on the name of the test, to keep parallel tests from
|
||||
// colliding, and must be cleaned up after the test is finished.
|
||||
func makeTempDir(name string, t testing.TB) string {
|
||||
dir := time.Now().Format(name + backupTimeFormat)
|
||||
dir = filepath.Join(os.TempDir(), dir)
|
||||
isNilUp(os.Mkdir(dir, 0700), t, 1)
|
||||
return dir
|
||||
}
|
||||
|
||||
// existsWithContent checks that the given file exists and has the correct content.
|
||||
func existsWithContent(path string, content []byte, t testing.TB) {
|
||||
info, err := os.Stat(path)
|
||||
isNilUp(err, t, 1)
|
||||
equalsUp(int64(len(content)), info.Size(), t, 1)
|
||||
|
||||
b, err := ioutil.ReadFile(path)
|
||||
isNilUp(err, t, 1)
|
||||
equalsUp(content, b, t, 1)
|
||||
}
|
||||
|
||||
// logFile returns the log file name in the given directory for the current fake
|
||||
// time.
|
||||
func logFile(dir string) string {
|
||||
return filepath.Join(dir, "foobar.log")
|
||||
}
|
||||
|
||||
func backupFile(dir string) string {
|
||||
return filepath.Join(dir, "foobar-"+fakeTime().UTC().Format(backupTimeFormat)+".log")
|
||||
}
|
||||
|
||||
func backupFileLocal(dir string) string {
|
||||
return filepath.Join(dir, "foobar-"+fakeTime().Format(backupTimeFormat)+".log")
|
||||
}
|
||||
|
||||
// logFileLocal returns the log file name in the given directory for the current
|
||||
// fake time using the local timezone.
|
||||
func logFileLocal(dir string) string {
|
||||
return filepath.Join(dir, fakeTime().Format(backupTimeFormat))
|
||||
}
|
||||
|
||||
// fileCount checks that the number of files in the directory is exp.
|
||||
func fileCount(dir string, exp int, t testing.TB) {
|
||||
files, err := ioutil.ReadDir(dir)
|
||||
isNilUp(err, t, 1)
|
||||
// Make sure no other files were created.
|
||||
equalsUp(exp, len(files), t, 1)
|
||||
}
|
||||
|
||||
// newFakeTime sets the fake "current time" to two days later.
|
||||
func newFakeTime() {
|
||||
fakeCurrentTime = fakeCurrentTime.Add(time.Hour * 24 * 2)
|
||||
}
|
||||
|
||||
func notExist(path string, t testing.TB) {
|
||||
_, err := os.Stat(path)
|
||||
assertUp(os.IsNotExist(err), t, 1, "expected to get os.IsNotExist, but instead got %v", err)
|
||||
}
|
||||
|
||||
func exists(path string, t testing.TB) {
|
||||
_, err := os.Stat(path)
|
||||
assertUp(err == nil, t, 1, "expected file to exist, but got error from os.Stat: %v", err)
|
||||
}
|
|
@ -0,0 +1,27 @@
|
|||
// +build linux
|
||||
|
||||
package lumberjack_test
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"gopkg.in/natefinch/lumberjack.v2"
|
||||
)
|
||||
|
||||
// Example of how to rotate in response to SIGHUP.
|
||||
func ExampleLogger_Rotate() {
|
||||
l := &lumberjack.Logger{}
|
||||
log.SetOutput(l)
|
||||
c := make(chan os.Signal, 1)
|
||||
signal.Notify(c, syscall.SIGHUP)
|
||||
|
||||
go func() {
|
||||
for {
|
||||
<-c
|
||||
l.Rotate()
|
||||
}
|
||||
}()
|
||||
}
|
|
@ -0,0 +1,91 @@
|
|||
package lumberjack
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// assert will log the given message if condition is false.
|
||||
func assert(condition bool, t testing.TB, msg string, v ...interface{}) {
|
||||
assertUp(condition, t, 1, msg, v...)
|
||||
}
|
||||
|
||||
// assertUp is like assert, but used inside helper functions, to ensure that
|
||||
// the file and line number reported by failures corresponds to one or more
|
||||
// levels up the stack.
|
||||
func assertUp(condition bool, t testing.TB, caller int, msg string, v ...interface{}) {
|
||||
if !condition {
|
||||
_, file, line, _ := runtime.Caller(caller + 1)
|
||||
v = append([]interface{}{filepath.Base(file), line}, v...)
|
||||
fmt.Printf("%s:%d: "+msg+"\n", v...)
|
||||
t.FailNow()
|
||||
}
|
||||
}
|
||||
|
||||
// equals tests that the two values are equal according to reflect.DeepEqual.
|
||||
func equals(exp, act interface{}, t testing.TB) {
|
||||
equalsUp(exp, act, t, 1)
|
||||
}
|
||||
|
||||
// equalsUp is like equals, but used inside helper functions, to ensure that the
|
||||
// file and line number reported by failures corresponds to one or more levels
|
||||
// up the stack.
|
||||
func equalsUp(exp, act interface{}, t testing.TB, caller int) {
|
||||
if !reflect.DeepEqual(exp, act) {
|
||||
_, file, line, _ := runtime.Caller(caller + 1)
|
||||
fmt.Printf("%s:%d: exp: %v (%T), got: %v (%T)\n",
|
||||
filepath.Base(file), line, exp, exp, act, act)
|
||||
t.FailNow()
|
||||
}
|
||||
}
|
||||
|
||||
// isNil reports a failure if the given value is not nil. Note that values
|
||||
// which cannot be nil will always fail this check.
|
||||
func isNil(obtained interface{}, t testing.TB) {
|
||||
isNilUp(obtained, t, 1)
|
||||
}
|
||||
|
||||
// isNilUp is like isNil, but used inside helper functions, to ensure that the
|
||||
// file and line number reported by failures corresponds to one or more levels
|
||||
// up the stack.
|
||||
func isNilUp(obtained interface{}, t testing.TB, caller int) {
|
||||
if !_isNil(obtained) {
|
||||
_, file, line, _ := runtime.Caller(caller + 1)
|
||||
fmt.Printf("%s:%d: expected nil, got: %v\n", filepath.Base(file), line, obtained)
|
||||
t.FailNow()
|
||||
}
|
||||
}
|
||||
|
||||
// notNil reports a failure if the given value is nil.
|
||||
func notNil(obtained interface{}, t testing.TB) {
|
||||
notNilUp(obtained, t, 1)
|
||||
}
|
||||
|
||||
// notNilUp is like notNil, but used inside helper functions, to ensure that the
|
||||
// file and line number reported by failures corresponds to one or more levels
|
||||
// up the stack.
|
||||
func notNilUp(obtained interface{}, t testing.TB, caller int) {
|
||||
if _isNil(obtained) {
|
||||
_, file, line, _ := runtime.Caller(caller + 1)
|
||||
fmt.Printf("%s:%d: expected non-nil, got: %v\n", filepath.Base(file), line, obtained)
|
||||
t.FailNow()
|
||||
}
|
||||
}
|
||||
|
||||
// _isNil is a helper function for isNil and notNil, and should not be used
|
||||
// directly.
|
||||
func _isNil(obtained interface{}) bool {
|
||||
if obtained == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
switch v := reflect.ValueOf(obtained); v.Kind() {
|
||||
case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
|
||||
return v.IsNil()
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
Loading…
Reference in New Issue