[feature] Clean up/uncache remote media (#407)

* Add whereNotEmptyAndNotNull

* Add GetRemoteOlderThanDays

* Add GetRemoteOlderThanDays

* Add PruneRemote to Manager interface

* Start implementing PruneRemote

* add new attachment + status to tests

* fix up and test GetRemoteOlderThan

* fix bad import

* PruneRemote: return number pruned

* add Cached column to mediaattachment

* update + test pruneRemote

* update mediaTest

* use Cached column

* upstep bun to latest version

* embed structs in mediaAttachment

* migrate mediaAttachment to new format

* don't default cached to true

* select only remote media

* update db dependencies

* step bun back to last working version

* update pruneRemote to use Cached field

* fix storage path of test attachments

* add recache logic to manager

* fix trimmed aspect ratio

* test prune and recache

* return errwithcode

* tidy up different paths for emoji vs attachment

* fix incorrect thumbnail type being stored

* expose TransportController to media processor

* implement tee-ing recached content

* add thoughts of dog to test fedi attachments

* test get remote files

* add comment on PruneRemote

* add postData cleanup to recache

* test thumbnail fetching

* add incredible diagram

* go mod tidy

* buffer pipes for recache streaming

* test for client stops reading after 1kb

* add media-remote-cache-days to config

* add cron package

* wrap logrus so it's available to cron

* start and stop cron jobs gracefully
This commit is contained in:
tobi
2022-03-07 11:08:26 +01:00
committed by GitHub
parent 100f1280a6
commit 07727753b9
424 changed files with 637100 additions and 176498 deletions

View File

@ -1,3 +1,10 @@
# 1.11.0 (February 7, 2022)
* Support port in ip from LookupFunc to override config (James Hartig)
* Fix TLS connection timeout (Blake Embrey)
* Add support for read-only, primary, standby, prefer-standby target_session_attributes (Oscar)
* Fix connect when receiving NoticeResponse
# 1.10.1 (November 20, 2021)
* Close without waiting for response (Kei Kamikawa)

View File

@ -248,21 +248,21 @@ func ParseConfig(connString string) (*Config, error) {
config.LookupFunc = makeDefaultResolver().LookupHost
notRuntimeParams := map[string]struct{}{
"host": struct{}{},
"port": struct{}{},
"database": struct{}{},
"user": struct{}{},
"password": struct{}{},
"passfile": struct{}{},
"connect_timeout": struct{}{},
"sslmode": struct{}{},
"sslkey": struct{}{},
"sslcert": struct{}{},
"sslrootcert": struct{}{},
"target_session_attrs": struct{}{},
"min_read_buffer_size": struct{}{},
"service": struct{}{},
"servicefile": struct{}{},
"host": {},
"port": {},
"database": {},
"user": {},
"password": {},
"passfile": {},
"connect_timeout": {},
"sslmode": {},
"sslkey": {},
"sslcert": {},
"sslrootcert": {},
"target_session_attrs": {},
"min_read_buffer_size": {},
"service": {},
"servicefile": {},
}
for k, v := range settings {
@ -329,10 +329,19 @@ func ParseConfig(connString string) (*Config, error) {
}
}
if settings["target_session_attrs"] == "read-write" {
switch tsa := settings["target_session_attrs"]; tsa {
case "read-write":
config.ValidateConnect = ValidateConnectTargetSessionAttrsReadWrite
} else if settings["target_session_attrs"] != "any" {
return nil, &parseConfigError{connString: connString, msg: fmt.Sprintf("unknown target_session_attrs value: %v", settings["target_session_attrs"])}
case "read-only":
config.ValidateConnect = ValidateConnectTargetSessionAttrsReadOnly
case "primary":
config.ValidateConnect = ValidateConnectTargetSessionAttrsPrimary
case "standby":
config.ValidateConnect = ValidateConnectTargetSessionAttrsStandby
case "any", "prefer-standby":
// do nothing
default:
return nil, &parseConfigError{connString: connString, msg: fmt.Sprintf("unknown target_session_attrs value: %v", tsa)}
}
return config, nil
@ -727,3 +736,48 @@ func ValidateConnectTargetSessionAttrsReadWrite(ctx context.Context, pgConn *PgC
return nil
}
// ValidateConnectTargetSessionAttrsReadOnly is an ValidateConnectFunc that implements libpq compatible
// target_session_attrs=read-only.
func ValidateConnectTargetSessionAttrsReadOnly(ctx context.Context, pgConn *PgConn) error {
result := pgConn.ExecParams(ctx, "show transaction_read_only", nil, nil, nil, nil).Read()
if result.Err != nil {
return result.Err
}
if string(result.Rows[0][0]) != "on" {
return errors.New("connection is not read only")
}
return nil
}
// ValidateConnectTargetSessionAttrsStandby is an ValidateConnectFunc that implements libpq compatible
// target_session_attrs=standby.
func ValidateConnectTargetSessionAttrsStandby(ctx context.Context, pgConn *PgConn) error {
result := pgConn.ExecParams(ctx, "select pg_is_in_recovery()", nil, nil, nil, nil).Read()
if result.Err != nil {
return result.Err
}
if string(result.Rows[0][0]) != "t" {
return errors.New("server is not in hot standby mode")
}
return nil
}
// ValidateConnectTargetSessionAttrsPrimary is an ValidateConnectFunc that implements libpq compatible
// target_session_attrs=primary.
func ValidateConnectTargetSessionAttrsPrimary(ctx context.Context, pgConn *PgConn) error {
result := pgConn.ExecParams(ctx, "select pg_is_in_recovery()", nil, nil, nil, nil).Read()
if result.Err != nil {
return result.Err
}
if string(result.Rows[0][0]) == "t" {
return errors.New("server is in standby mode")
}
return nil
}

View File

@ -11,6 +11,7 @@ import (
"io"
"math"
"net"
"strconv"
"strings"
"sync"
"time"
@ -44,7 +45,8 @@ type Notification struct {
// DialFunc is a function that can be used to connect to a PostgreSQL server.
type DialFunc func(ctx context.Context, network, addr string) (net.Conn, error)
// LookupFunc is a function that can be used to lookup IPs addrs from host.
// LookupFunc is a function that can be used to lookup IPs addrs from host. Optionally an ip:port combination can be
// returned in order to override the connection string's port.
type LookupFunc func(ctx context.Context, host string) (addrs []string, err error)
// BuildFrontendFunc is a function that can be used to create Frontend implementation for connection.
@ -196,11 +198,24 @@ func expandWithIPs(ctx context.Context, lookupFn LookupFunc, fallbacks []*Fallba
}
for _, ip := range ips {
configs = append(configs, &FallbackConfig{
Host: ip,
Port: fb.Port,
TLSConfig: fb.TLSConfig,
})
splitIP, splitPort, err := net.SplitHostPort(ip)
if err == nil {
port, err := strconv.ParseUint(splitPort, 10, 16)
if err != nil {
return nil, fmt.Errorf("error parsing port (%s) from lookup: %w", splitPort, err)
}
configs = append(configs, &FallbackConfig{
Host: splitIP,
Port: uint16(port),
TLSConfig: fb.TLSConfig,
})
} else {
configs = append(configs, &FallbackConfig{
Host: ip,
Port: fb.Port,
TLSConfig: fb.TLSConfig,
})
}
}
}
@ -215,7 +230,7 @@ func connect(ctx context.Context, config *Config, fallbackConfig *FallbackConfig
var err error
network, address := NetworkAddress(fallbackConfig.Host, fallbackConfig.Port)
pgConn.conn, err = config.DialFunc(ctx, network, address)
netConn, err := config.DialFunc(ctx, network, address)
if err != nil {
var netErr net.Error
if errors.As(err, &netErr) && netErr.Timeout() {
@ -224,24 +239,27 @@ func connect(ctx context.Context, config *Config, fallbackConfig *FallbackConfig
return nil, &connectError{config: config, msg: "dial error", err: err}
}
pgConn.parameterStatuses = make(map[string]string)
pgConn.conn = netConn
pgConn.contextWatcher = newContextWatcher(netConn)
pgConn.contextWatcher.Watch(ctx)
if fallbackConfig.TLSConfig != nil {
if err := pgConn.startTLS(fallbackConfig.TLSConfig); err != nil {
pgConn.conn.Close()
tlsConn, err := startTLS(netConn, fallbackConfig.TLSConfig)
pgConn.contextWatcher.Unwatch() // Always unwatch `netConn` after TLS.
if err != nil {
netConn.Close()
return nil, &connectError{config: config, msg: "tls error", err: err}
}
pgConn.conn = tlsConn
pgConn.contextWatcher = newContextWatcher(tlsConn)
pgConn.contextWatcher.Watch(ctx)
}
pgConn.status = connStatusConnecting
pgConn.contextWatcher = ctxwatch.NewContextWatcher(
func() { pgConn.conn.SetDeadline(time.Date(1, 1, 1, 1, 1, 1, 1, time.UTC)) },
func() { pgConn.conn.SetDeadline(time.Time{}) },
)
pgConn.contextWatcher.Watch(ctx)
defer pgConn.contextWatcher.Unwatch()
pgConn.parameterStatuses = make(map[string]string)
pgConn.status = connStatusConnecting
pgConn.frontend = config.BuildFrontend(pgConn.conn, pgConn.conn)
startupMsg := pgproto3.StartupMessage{
@ -317,7 +335,7 @@ func connect(ctx context.Context, config *Config, fallbackConfig *FallbackConfig
}
}
return pgConn, nil
case *pgproto3.ParameterStatus:
case *pgproto3.ParameterStatus, *pgproto3.NoticeResponse:
// handled by ReceiveMessage
case *pgproto3.ErrorResponse:
pgConn.conn.Close()
@ -329,24 +347,29 @@ func connect(ctx context.Context, config *Config, fallbackConfig *FallbackConfig
}
}
func (pgConn *PgConn) startTLS(tlsConfig *tls.Config) (err error) {
err = binary.Write(pgConn.conn, binary.BigEndian, []int32{8, 80877103})
func newContextWatcher(conn net.Conn) *ctxwatch.ContextWatcher {
return ctxwatch.NewContextWatcher(
func() { conn.SetDeadline(time.Date(1, 1, 1, 1, 1, 1, 1, time.UTC)) },
func() { conn.SetDeadline(time.Time{}) },
)
}
func startTLS(conn net.Conn, tlsConfig *tls.Config) (net.Conn, error) {
err := binary.Write(conn, binary.BigEndian, []int32{8, 80877103})
if err != nil {
return
return nil, err
}
response := make([]byte, 1)
if _, err = io.ReadFull(pgConn.conn, response); err != nil {
return
if _, err = io.ReadFull(conn, response); err != nil {
return nil, err
}
if response[0] != 'S' {
return errors.New("server refused TLS connection")
return nil, errors.New("server refused TLS connection")
}
pgConn.conn = tls.Client(pgConn.conn, tlsConfig)
return nil
return tls.Client(conn, tlsConfig), nil
}
func (pgConn *PgConn) txPasswordMessage(password string) (err error) {
@ -1694,10 +1717,7 @@ func Construct(hc *HijackedConn) (*PgConn, error) {
cleanupDone: make(chan struct{}),
}
pgConn.contextWatcher = ctxwatch.NewContextWatcher(
func() { pgConn.conn.SetDeadline(time.Date(1, 1, 1, 1, 1, 1, 1, time.UTC)) },
func() { pgConn.conn.SetDeadline(time.Time{}) },
)
pgConn.contextWatcher = newContextWatcher(pgConn.conn)
return pgConn, nil
}

View File

@ -1,3 +1,10 @@
# 1.10.0 (February 7, 2022)
* Normalize UTC timestamps to comply with stdlib (Torkel Rogstad)
* Assign Numeric to *big.Rat (Oleg Lomaka)
* Fix typo in float8 error message (Pinank Solanki)
* Scan type aliases for floating point types (Collin Forsyth)
# 1.9.1 (November 28, 2021)
* Fix: binary timestamp is assumed to be in UTC (restored behavior changed in v1.9.0)

View File

@ -337,6 +337,10 @@ func float64AssignTo(srcVal float64, srcStatus Status, dst interface{}) error {
if v := reflect.ValueOf(dst); v.Kind() == reflect.Ptr {
el := v.Elem()
switch el.Kind() {
// if dst is a type alias of a float32 or 64, set dst val
case reflect.Float32, reflect.Float64:
el.SetFloat(srcVal)
return nil
// if dst is a pointer to pointer, strip the pointer and try again
case reflect.Ptr:
if el.IsNil() {

View File

@ -204,7 +204,7 @@ func (dst *Float8) DecodeBinary(ci *ConnInfo, src []byte) error {
}
if len(src) != 8 {
return fmt.Errorf("invalid length for float4: %v", len(src))
return fmt.Errorf("invalid length for float8: %v", len(src))
}
n := int64(binary.BigEndian.Uint64(src))

View File

@ -369,6 +369,12 @@ func (src *Numeric) AssignTo(dst interface{}) error {
return fmt.Errorf("%d is greater than maximum value for %T", normalizedInt, *v)
}
*v = normalizedInt.Uint64()
case *big.Rat:
rat, err := src.toBigRat()
if err != nil {
return err
}
v.Set(rat)
default:
if nextDst, retry := GetAssignToDstType(dst); retry {
return src.AssignTo(nextDst)
@ -406,6 +412,26 @@ func (dst *Numeric) toBigInt() (*big.Int, error) {
return num, nil
}
func (dst *Numeric) toBigRat() (*big.Rat, error) {
if dst.NaN {
return nil, fmt.Errorf("%v is not a number", dst)
} else if dst.InfinityModifier == Infinity {
return nil, fmt.Errorf("%v is infinity", dst)
} else if dst.InfinityModifier == NegativeInfinity {
return nil, fmt.Errorf("%v is -infinity", dst)
}
num := new(big.Rat).SetInt(dst.Int)
if dst.Exp > 0 {
mul := new(big.Int).Exp(big10, big.NewInt(int64(dst.Exp)), nil)
num.Mul(num, new(big.Rat).SetInt(mul))
} else if dst.Exp < 0 {
mul := new(big.Int).Exp(big10, big.NewInt(int64(-dst.Exp)), nil)
num.Quo(num, new(big.Rat).SetInt(mul))
}
return num, nil
}
func (src *Numeric) toFloat64() (float64, error) {
if src.NaN {
return math.NaN(), nil

View File

@ -124,7 +124,7 @@ func (dst *Timestamptz) DecodeText(ci *ConnInfo, src []byte) error {
return err
}
*dst = Timestamptz{Time: tim, Status: Present}
*dst = Timestamptz{Time: normalizePotentialUTC(tim), Status: Present}
}
return nil
@ -231,6 +231,9 @@ func (src Timestamptz) Value() (driver.Value, error) {
if src.InfinityModifier != None {
return src.InfinityModifier.String(), nil
}
if src.Time.Location().String() == time.UTC.String() {
return src.Time.UTC(), nil
}
return src.Time, nil
case Null:
return nil, nil
@ -289,8 +292,23 @@ func (dst *Timestamptz) UnmarshalJSON(b []byte) error {
return err
}
*dst = Timestamptz{Time: tim, Status: Present}
*dst = Timestamptz{Time: normalizePotentialUTC(tim), Status: Present}
}
return nil
}
// Normalize timestamps in UTC location to behave similarly to how the Golang
// standard library does it: UTC timestamps lack a .loc value.
//
// Reason for this: when comparing two timestamps with reflect.DeepEqual (generally
// speaking not a good idea, but several testing libraries (for example testify)
// does this), their location data needs to be equal for them to be considered
// equal.
func normalizePotentialUTC(timestamp time.Time) time.Time {
if timestamp.Location().String() != time.UTC.String() {
return timestamp
}
return timestamp.UTC()
}

View File

@ -1,3 +1,10 @@
# 4.15.0 (February 7, 2022)
* Upgrade to pgconn v1.11.0
* Upgrade to pgtype v1.10.0
* Upgrade puddle to v1.2.1
* Make BatchResults.Close safe to be called multiple times
# 4.14.1 (November 28, 2021)
* Upgrade pgtype to v1.9.1 (fixes unintentional change to timestamp binary decoding)

View File

@ -3,6 +3,7 @@ package pgx
import (
"context"
"errors"
"fmt"
"github.com/jackc/pgconn"
)
@ -46,17 +47,18 @@ type BatchResults interface {
// Close closes the batch operation. This must be called before the underlying connection can be used again. Any error
// that occurred during a batch operation may have made it impossible to resyncronize the connection with the server.
// In this case the underlying connection will have been closed.
// In this case the underlying connection will have been closed. Close is safe to call multiple times.
Close() error
}
type batchResults struct {
ctx context.Context
conn *Conn
mrr *pgconn.MultiResultReader
err error
b *Batch
ix int
ctx context.Context
conn *Conn
mrr *pgconn.MultiResultReader
err error
b *Batch
ix int
closed bool
}
// Exec reads the results from the next query in the batch as if the query has been sent with Exec.
@ -64,6 +66,9 @@ func (br *batchResults) Exec() (pgconn.CommandTag, error) {
if br.err != nil {
return nil, br.err
}
if br.closed {
return nil, fmt.Errorf("batch already closed")
}
query, arguments, _ := br.nextQueryAndArgs()
@ -114,6 +119,11 @@ func (br *batchResults) Query() (Rows, error) {
return &connRows{err: br.err, closed: true}, br.err
}
if br.closed {
alreadyClosedErr := fmt.Errorf("batch already closed")
return &connRows{err: alreadyClosedErr, closed: true}, alreadyClosedErr
}
rows := br.conn.getRows(br.ctx, query, arguments)
if !br.mrr.NextResult() {
@ -140,6 +150,10 @@ func (br *batchResults) Query() (Rows, error) {
// QueryFunc reads the results from the next query in the batch as if the query has been sent with Conn.QueryFunc.
func (br *batchResults) QueryFunc(scans []interface{}, f func(QueryFuncRow) error) (pgconn.CommandTag, error) {
if br.closed {
return nil, fmt.Errorf("batch already closed")
}
rows, err := br.Query()
if err != nil {
return nil, err
@ -179,6 +193,11 @@ func (br *batchResults) Close() error {
return br.err
}
if br.closed {
return nil
}
br.closed = true
// log any queries that haven't yet been logged by Exec or Query
for {
query, args, ok := br.nextQueryAndArgs()

View File

@ -41,10 +41,13 @@ type Rows interface {
// Scan reads the values from the current row into dest values positionally.
// dest can include pointers to core types, values implementing the Scanner
// interface, and nil. nil will skip the value entirely.
// interface, and nil. nil will skip the value entirely. It is an error to
// call Scan without first calling Next() and checking that it returned true.
Scan(dest ...interface{}) error
// Values returns the decoded row values.
// Values returns the decoded row values. As with Scan(), it is an error to
// call Values without first calling Next() and checking that it returned
// true.
Values() ([]interface{}, error)
// RawValues returns the unparsed bytes of the row values. The returned [][]byte is only valid until the next Next

22
vendor/github.com/robfig/cron/v3/.gitignore generated vendored Normal file
View File

@ -0,0 +1,22 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe

1
vendor/github.com/robfig/cron/v3/.travis.yml generated vendored Normal file
View File

@ -0,0 +1 @@
language: go

21
vendor/github.com/robfig/cron/v3/LICENSE generated vendored Normal file
View File

@ -0,0 +1,21 @@
Copyright (C) 2012 Rob Figueiredo
All Rights Reserved.
MIT LICENSE
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

125
vendor/github.com/robfig/cron/v3/README.md generated vendored Normal file
View File

@ -0,0 +1,125 @@
[![GoDoc](http://godoc.org/github.com/robfig/cron?status.png)](http://godoc.org/github.com/robfig/cron)
[![Build Status](https://travis-ci.org/robfig/cron.svg?branch=master)](https://travis-ci.org/robfig/cron)
# cron
Cron V3 has been released!
To download the specific tagged release, run:
go get github.com/robfig/cron/v3@v3.0.0
Import it in your program as:
import "github.com/robfig/cron/v3"
It requires Go 1.11 or later due to usage of Go Modules.
Refer to the documentation here:
http://godoc.org/github.com/robfig/cron
The rest of this document describes the the advances in v3 and a list of
breaking changes for users that wish to upgrade from an earlier version.
## Upgrading to v3 (June 2019)
cron v3 is a major upgrade to the library that addresses all outstanding bugs,
feature requests, and rough edges. It is based on a merge of master which
contains various fixes to issues found over the years and the v2 branch which
contains some backwards-incompatible features like the ability to remove cron
jobs. In addition, v3 adds support for Go Modules, cleans up rough edges like
the timezone support, and fixes a number of bugs.
New features:
- Support for Go modules. Callers must now import this library as
`github.com/robfig/cron/v3`, instead of `gopkg.in/...`
- Fixed bugs:
- 0f01e6b parser: fix combining of Dow and Dom (#70)
- dbf3220 adjust times when rolling the clock forward to handle non-existent midnight (#157)
- eeecf15 spec_test.go: ensure an error is returned on 0 increment (#144)
- 70971dc cron.Entries(): update request for snapshot to include a reply channel (#97)
- 1cba5e6 cron: fix: removing a job causes the next scheduled job to run too late (#206)
- Standard cron spec parsing by default (first field is "minute"), with an easy
way to opt into the seconds field (quartz-compatible). Although, note that the
year field (optional in Quartz) is not supported.
- Extensible, key/value logging via an interface that complies with
the https://github.com/go-logr/logr project.
- The new Chain & JobWrapper types allow you to install "interceptors" to add
cross-cutting behavior like the following:
- Recover any panics from jobs
- Delay a job's execution if the previous run hasn't completed yet
- Skip a job's execution if the previous run hasn't completed yet
- Log each job's invocations
- Notification when jobs are completed
It is backwards incompatible with both v1 and v2. These updates are required:
- The v1 branch accepted an optional seconds field at the beginning of the cron
spec. This is non-standard and has led to a lot of confusion. The new default
parser conforms to the standard as described by [the Cron wikipedia page].
UPDATING: To retain the old behavior, construct your Cron with a custom
parser:
// Seconds field, required
cron.New(cron.WithSeconds())
// Seconds field, optional
cron.New(
cron.WithParser(
cron.SecondOptional | cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow | cron.Descriptor))
- The Cron type now accepts functional options on construction rather than the
previous ad-hoc behavior modification mechanisms (setting a field, calling a setter).
UPDATING: Code that sets Cron.ErrorLogger or calls Cron.SetLocation must be
updated to provide those values on construction.
- CRON_TZ is now the recommended way to specify the timezone of a single
schedule, which is sanctioned by the specification. The legacy "TZ=" prefix
will continue to be supported since it is unambiguous and easy to do so.
UPDATING: No update is required.
- By default, cron will no longer recover panics in jobs that it runs.
Recovering can be surprising (see issue #192) and seems to be at odds with
typical behavior of libraries. Relatedly, the `cron.WithPanicLogger` option
has been removed to accommodate the more general JobWrapper type.
UPDATING: To opt into panic recovery and configure the panic logger:
cron.New(cron.WithChain(
cron.Recover(logger), // or use cron.DefaultLogger
))
- In adding support for https://github.com/go-logr/logr, `cron.WithVerboseLogger` was
removed, since it is duplicative with the leveled logging.
UPDATING: Callers should use `WithLogger` and specify a logger that does not
discard `Info` logs. For convenience, one is provided that wraps `*log.Logger`:
cron.New(
cron.WithLogger(cron.VerbosePrintfLogger(logger)))
### Background - Cron spec format
There are two cron spec formats in common usage:
- The "standard" cron format, described on [the Cron wikipedia page] and used by
the cron Linux system utility.
- The cron format used by [the Quartz Scheduler], commonly used for scheduled
jobs in Java software
[the Cron wikipedia page]: https://en.wikipedia.org/wiki/Cron
[the Quartz Scheduler]: http://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/tutorial-lesson-06.html
The original version of this package included an optional "seconds" field, which
made it incompatible with both of these formats. Now, the "standard" format is
the default format accepted, and the Quartz format is opt-in.

92
vendor/github.com/robfig/cron/v3/chain.go generated vendored Normal file
View File

@ -0,0 +1,92 @@
package cron
import (
"fmt"
"runtime"
"sync"
"time"
)
// JobWrapper decorates the given Job with some behavior.
type JobWrapper func(Job) Job
// Chain is a sequence of JobWrappers that decorates submitted jobs with
// cross-cutting behaviors like logging or synchronization.
type Chain struct {
wrappers []JobWrapper
}
// NewChain returns a Chain consisting of the given JobWrappers.
func NewChain(c ...JobWrapper) Chain {
return Chain{c}
}
// Then decorates the given job with all JobWrappers in the chain.
//
// This:
// NewChain(m1, m2, m3).Then(job)
// is equivalent to:
// m1(m2(m3(job)))
func (c Chain) Then(j Job) Job {
for i := range c.wrappers {
j = c.wrappers[len(c.wrappers)-i-1](j)
}
return j
}
// Recover panics in wrapped jobs and log them with the provided logger.
func Recover(logger Logger) JobWrapper {
return func(j Job) Job {
return FuncJob(func() {
defer func() {
if r := recover(); r != nil {
const size = 64 << 10
buf := make([]byte, size)
buf = buf[:runtime.Stack(buf, false)]
err, ok := r.(error)
if !ok {
err = fmt.Errorf("%v", r)
}
logger.Error(err, "panic", "stack", "...\n"+string(buf))
}
}()
j.Run()
})
}
}
// DelayIfStillRunning serializes jobs, delaying subsequent runs until the
// previous one is complete. Jobs running after a delay of more than a minute
// have the delay logged at Info.
func DelayIfStillRunning(logger Logger) JobWrapper {
return func(j Job) Job {
var mu sync.Mutex
return FuncJob(func() {
start := time.Now()
mu.Lock()
defer mu.Unlock()
if dur := time.Since(start); dur > time.Minute {
logger.Info("delay", "duration", dur)
}
j.Run()
})
}
}
// SkipIfStillRunning skips an invocation of the Job if a previous invocation is
// still running. It logs skips to the given logger at Info level.
func SkipIfStillRunning(logger Logger) JobWrapper {
return func(j Job) Job {
var ch = make(chan struct{}, 1)
ch <- struct{}{}
return FuncJob(func() {
select {
case v := <-ch:
j.Run()
ch <- v
default:
logger.Info("skip")
}
})
}
}

27
vendor/github.com/robfig/cron/v3/constantdelay.go generated vendored Normal file
View File

@ -0,0 +1,27 @@
package cron
import "time"
// ConstantDelaySchedule represents a simple recurring duty cycle, e.g. "Every 5 minutes".
// It does not support jobs more frequent than once a second.
type ConstantDelaySchedule struct {
Delay time.Duration
}
// Every returns a crontab Schedule that activates once every duration.
// Delays of less than a second are not supported (will round up to 1 second).
// Any fields less than a Second are truncated.
func Every(duration time.Duration) ConstantDelaySchedule {
if duration < time.Second {
duration = time.Second
}
return ConstantDelaySchedule{
Delay: duration - time.Duration(duration.Nanoseconds())%time.Second,
}
}
// Next returns the next time this should be run.
// This rounds so that the next activation time will be on the second.
func (schedule ConstantDelaySchedule) Next(t time.Time) time.Time {
return t.Add(schedule.Delay - time.Duration(t.Nanosecond())*time.Nanosecond)
}

355
vendor/github.com/robfig/cron/v3/cron.go generated vendored Normal file
View File

@ -0,0 +1,355 @@
package cron
import (
"context"
"sort"
"sync"
"time"
)
// Cron keeps track of any number of entries, invoking the associated func as
// specified by the schedule. It may be started, stopped, and the entries may
// be inspected while running.
type Cron struct {
entries []*Entry
chain Chain
stop chan struct{}
add chan *Entry
remove chan EntryID
snapshot chan chan []Entry
running bool
logger Logger
runningMu sync.Mutex
location *time.Location
parser ScheduleParser
nextID EntryID
jobWaiter sync.WaitGroup
}
// ScheduleParser is an interface for schedule spec parsers that return a Schedule
type ScheduleParser interface {
Parse(spec string) (Schedule, error)
}
// Job is an interface for submitted cron jobs.
type Job interface {
Run()
}
// Schedule describes a job's duty cycle.
type Schedule interface {
// Next returns the next activation time, later than the given time.
// Next is invoked initially, and then each time the job is run.
Next(time.Time) time.Time
}
// EntryID identifies an entry within a Cron instance
type EntryID int
// Entry consists of a schedule and the func to execute on that schedule.
type Entry struct {
// ID is the cron-assigned ID of this entry, which may be used to look up a
// snapshot or remove it.
ID EntryID
// Schedule on which this job should be run.
Schedule Schedule
// Next time the job will run, or the zero time if Cron has not been
// started or this entry's schedule is unsatisfiable
Next time.Time
// Prev is the last time this job was run, or the zero time if never.
Prev time.Time
// WrappedJob is the thing to run when the Schedule is activated.
WrappedJob Job
// Job is the thing that was submitted to cron.
// It is kept around so that user code that needs to get at the job later,
// e.g. via Entries() can do so.
Job Job
}
// Valid returns true if this is not the zero entry.
func (e Entry) Valid() bool { return e.ID != 0 }
// byTime is a wrapper for sorting the entry array by time
// (with zero time at the end).
type byTime []*Entry
func (s byTime) Len() int { return len(s) }
func (s byTime) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s byTime) Less(i, j int) bool {
// Two zero times should return false.
// Otherwise, zero is "greater" than any other time.
// (To sort it at the end of the list.)
if s[i].Next.IsZero() {
return false
}
if s[j].Next.IsZero() {
return true
}
return s[i].Next.Before(s[j].Next)
}
// New returns a new Cron job runner, modified by the given options.
//
// Available Settings
//
// Time Zone
// Description: The time zone in which schedules are interpreted
// Default: time.Local
//
// Parser
// Description: Parser converts cron spec strings into cron.Schedules.
// Default: Accepts this spec: https://en.wikipedia.org/wiki/Cron
//
// Chain
// Description: Wrap submitted jobs to customize behavior.
// Default: A chain that recovers panics and logs them to stderr.
//
// See "cron.With*" to modify the default behavior.
func New(opts ...Option) *Cron {
c := &Cron{
entries: nil,
chain: NewChain(),
add: make(chan *Entry),
stop: make(chan struct{}),
snapshot: make(chan chan []Entry),
remove: make(chan EntryID),
running: false,
runningMu: sync.Mutex{},
logger: DefaultLogger,
location: time.Local,
parser: standardParser,
}
for _, opt := range opts {
opt(c)
}
return c
}
// FuncJob is a wrapper that turns a func() into a cron.Job
type FuncJob func()
func (f FuncJob) Run() { f() }
// AddFunc adds a func to the Cron to be run on the given schedule.
// The spec is parsed using the time zone of this Cron instance as the default.
// An opaque ID is returned that can be used to later remove it.
func (c *Cron) AddFunc(spec string, cmd func()) (EntryID, error) {
return c.AddJob(spec, FuncJob(cmd))
}
// AddJob adds a Job to the Cron to be run on the given schedule.
// The spec is parsed using the time zone of this Cron instance as the default.
// An opaque ID is returned that can be used to later remove it.
func (c *Cron) AddJob(spec string, cmd Job) (EntryID, error) {
schedule, err := c.parser.Parse(spec)
if err != nil {
return 0, err
}
return c.Schedule(schedule, cmd), nil
}
// Schedule adds a Job to the Cron to be run on the given schedule.
// The job is wrapped with the configured Chain.
func (c *Cron) Schedule(schedule Schedule, cmd Job) EntryID {
c.runningMu.Lock()
defer c.runningMu.Unlock()
c.nextID++
entry := &Entry{
ID: c.nextID,
Schedule: schedule,
WrappedJob: c.chain.Then(cmd),
Job: cmd,
}
if !c.running {
c.entries = append(c.entries, entry)
} else {
c.add <- entry
}
return entry.ID
}
// Entries returns a snapshot of the cron entries.
func (c *Cron) Entries() []Entry {
c.runningMu.Lock()
defer c.runningMu.Unlock()
if c.running {
replyChan := make(chan []Entry, 1)
c.snapshot <- replyChan
return <-replyChan
}
return c.entrySnapshot()
}
// Location gets the time zone location
func (c *Cron) Location() *time.Location {
return c.location
}
// Entry returns a snapshot of the given entry, or nil if it couldn't be found.
func (c *Cron) Entry(id EntryID) Entry {
for _, entry := range c.Entries() {
if id == entry.ID {
return entry
}
}
return Entry{}
}
// Remove an entry from being run in the future.
func (c *Cron) Remove(id EntryID) {
c.runningMu.Lock()
defer c.runningMu.Unlock()
if c.running {
c.remove <- id
} else {
c.removeEntry(id)
}
}
// Start the cron scheduler in its own goroutine, or no-op if already started.
func (c *Cron) Start() {
c.runningMu.Lock()
defer c.runningMu.Unlock()
if c.running {
return
}
c.running = true
go c.run()
}
// Run the cron scheduler, or no-op if already running.
func (c *Cron) Run() {
c.runningMu.Lock()
if c.running {
c.runningMu.Unlock()
return
}
c.running = true
c.runningMu.Unlock()
c.run()
}
// run the scheduler.. this is private just due to the need to synchronize
// access to the 'running' state variable.
func (c *Cron) run() {
c.logger.Info("start")
// Figure out the next activation times for each entry.
now := c.now()
for _, entry := range c.entries {
entry.Next = entry.Schedule.Next(now)
c.logger.Info("schedule", "now", now, "entry", entry.ID, "next", entry.Next)
}
for {
// Determine the next entry to run.
sort.Sort(byTime(c.entries))
var timer *time.Timer
if len(c.entries) == 0 || c.entries[0].Next.IsZero() {
// If there are no entries yet, just sleep - it still handles new entries
// and stop requests.
timer = time.NewTimer(100000 * time.Hour)
} else {
timer = time.NewTimer(c.entries[0].Next.Sub(now))
}
for {
select {
case now = <-timer.C:
now = now.In(c.location)
c.logger.Info("wake", "now", now)
// Run every entry whose next time was less than now
for _, e := range c.entries {
if e.Next.After(now) || e.Next.IsZero() {
break
}
c.startJob(e.WrappedJob)
e.Prev = e.Next
e.Next = e.Schedule.Next(now)
c.logger.Info("run", "now", now, "entry", e.ID, "next", e.Next)
}
case newEntry := <-c.add:
timer.Stop()
now = c.now()
newEntry.Next = newEntry.Schedule.Next(now)
c.entries = append(c.entries, newEntry)
c.logger.Info("added", "now", now, "entry", newEntry.ID, "next", newEntry.Next)
case replyChan := <-c.snapshot:
replyChan <- c.entrySnapshot()
continue
case <-c.stop:
timer.Stop()
c.logger.Info("stop")
return
case id := <-c.remove:
timer.Stop()
now = c.now()
c.removeEntry(id)
c.logger.Info("removed", "entry", id)
}
break
}
}
}
// startJob runs the given job in a new goroutine.
func (c *Cron) startJob(j Job) {
c.jobWaiter.Add(1)
go func() {
defer c.jobWaiter.Done()
j.Run()
}()
}
// now returns current time in c location
func (c *Cron) now() time.Time {
return time.Now().In(c.location)
}
// Stop stops the cron scheduler if it is running; otherwise it does nothing.
// A context is returned so the caller can wait for running jobs to complete.
func (c *Cron) Stop() context.Context {
c.runningMu.Lock()
defer c.runningMu.Unlock()
if c.running {
c.stop <- struct{}{}
c.running = false
}
ctx, cancel := context.WithCancel(context.Background())
go func() {
c.jobWaiter.Wait()
cancel()
}()
return ctx
}
// entrySnapshot returns a copy of the current cron entry list.
func (c *Cron) entrySnapshot() []Entry {
var entries = make([]Entry, len(c.entries))
for i, e := range c.entries {
entries[i] = *e
}
return entries
}
func (c *Cron) removeEntry(id EntryID) {
var entries []*Entry
for _, e := range c.entries {
if e.ID != id {
entries = append(entries, e)
}
}
c.entries = entries
}

231
vendor/github.com/robfig/cron/v3/doc.go generated vendored Normal file
View File

@ -0,0 +1,231 @@
/*
Package cron implements a cron spec parser and job runner.
Installation
To download the specific tagged release, run:
go get github.com/robfig/cron/v3@v3.0.0
Import it in your program as:
import "github.com/robfig/cron/v3"
It requires Go 1.11 or later due to usage of Go Modules.
Usage
Callers may register Funcs to be invoked on a given schedule. Cron will run
them in their own goroutines.
c := cron.New()
c.AddFunc("30 * * * *", func() { fmt.Println("Every hour on the half hour") })
c.AddFunc("30 3-6,20-23 * * *", func() { fmt.Println(".. in the range 3-6am, 8-11pm") })
c.AddFunc("CRON_TZ=Asia/Tokyo 30 04 * * *", func() { fmt.Println("Runs at 04:30 Tokyo time every day") })
c.AddFunc("@hourly", func() { fmt.Println("Every hour, starting an hour from now") })
c.AddFunc("@every 1h30m", func() { fmt.Println("Every hour thirty, starting an hour thirty from now") })
c.Start()
..
// Funcs are invoked in their own goroutine, asynchronously.
...
// Funcs may also be added to a running Cron
c.AddFunc("@daily", func() { fmt.Println("Every day") })
..
// Inspect the cron job entries' next and previous run times.
inspect(c.Entries())
..
c.Stop() // Stop the scheduler (does not stop any jobs already running).
CRON Expression Format
A cron expression represents a set of times, using 5 space-separated fields.
Field name | Mandatory? | Allowed values | Allowed special characters
---------- | ---------- | -------------- | --------------------------
Minutes | Yes | 0-59 | * / , -
Hours | Yes | 0-23 | * / , -
Day of month | Yes | 1-31 | * / , - ?
Month | Yes | 1-12 or JAN-DEC | * / , -
Day of week | Yes | 0-6 or SUN-SAT | * / , - ?
Month and Day-of-week field values are case insensitive. "SUN", "Sun", and
"sun" are equally accepted.
The specific interpretation of the format is based on the Cron Wikipedia page:
https://en.wikipedia.org/wiki/Cron
Alternative Formats
Alternative Cron expression formats support other fields like seconds. You can
implement that by creating a custom Parser as follows.
cron.New(
cron.WithParser(
cron.NewParser(
cron.SecondOptional | cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow | cron.Descriptor)))
Since adding Seconds is the most common modification to the standard cron spec,
cron provides a builtin function to do that, which is equivalent to the custom
parser you saw earlier, except that its seconds field is REQUIRED:
cron.New(cron.WithSeconds())
That emulates Quartz, the most popular alternative Cron schedule format:
http://www.quartz-scheduler.org/documentation/quartz-2.x/tutorials/crontrigger.html
Special Characters
Asterisk ( * )
The asterisk indicates that the cron expression will match for all values of the
field; e.g., using an asterisk in the 5th field (month) would indicate every
month.
Slash ( / )
Slashes are used to describe increments of ranges. For example 3-59/15 in the
1st field (minutes) would indicate the 3rd minute of the hour and every 15
minutes thereafter. The form "*\/..." is equivalent to the form "first-last/...",
that is, an increment over the largest possible range of the field. The form
"N/..." is accepted as meaning "N-MAX/...", that is, starting at N, use the
increment until the end of that specific range. It does not wrap around.
Comma ( , )
Commas are used to separate items of a list. For example, using "MON,WED,FRI" in
the 5th field (day of week) would mean Mondays, Wednesdays and Fridays.
Hyphen ( - )
Hyphens are used to define ranges. For example, 9-17 would indicate every
hour between 9am and 5pm inclusive.
Question mark ( ? )
Question mark may be used instead of '*' for leaving either day-of-month or
day-of-week blank.
Predefined schedules
You may use one of several pre-defined schedules in place of a cron expression.
Entry | Description | Equivalent To
----- | ----------- | -------------
@yearly (or @annually) | Run once a year, midnight, Jan. 1st | 0 0 1 1 *
@monthly | Run once a month, midnight, first of month | 0 0 1 * *
@weekly | Run once a week, midnight between Sat/Sun | 0 0 * * 0
@daily (or @midnight) | Run once a day, midnight | 0 0 * * *
@hourly | Run once an hour, beginning of hour | 0 * * * *
Intervals
You may also schedule a job to execute at fixed intervals, starting at the time it's added
or cron is run. This is supported by formatting the cron spec like this:
@every <duration>
where "duration" is a string accepted by time.ParseDuration
(http://golang.org/pkg/time/#ParseDuration).
For example, "@every 1h30m10s" would indicate a schedule that activates after
1 hour, 30 minutes, 10 seconds, and then every interval after that.
Note: The interval does not take the job runtime into account. For example,
if a job takes 3 minutes to run, and it is scheduled to run every 5 minutes,
it will have only 2 minutes of idle time between each run.
Time zones
By default, all interpretation and scheduling is done in the machine's local
time zone (time.Local). You can specify a different time zone on construction:
cron.New(
cron.WithLocation(time.UTC))
Individual cron schedules may also override the time zone they are to be
interpreted in by providing an additional space-separated field at the beginning
of the cron spec, of the form "CRON_TZ=Asia/Tokyo".
For example:
# Runs at 6am in time.Local
cron.New().AddFunc("0 6 * * ?", ...)
# Runs at 6am in America/New_York
nyc, _ := time.LoadLocation("America/New_York")
c := cron.New(cron.WithLocation(nyc))
c.AddFunc("0 6 * * ?", ...)
# Runs at 6am in Asia/Tokyo
cron.New().AddFunc("CRON_TZ=Asia/Tokyo 0 6 * * ?", ...)
# Runs at 6am in Asia/Tokyo
c := cron.New(cron.WithLocation(nyc))
c.SetLocation("America/New_York")
c.AddFunc("CRON_TZ=Asia/Tokyo 0 6 * * ?", ...)
The prefix "TZ=(TIME ZONE)" is also supported for legacy compatibility.
Be aware that jobs scheduled during daylight-savings leap-ahead transitions will
not be run!
Job Wrappers
A Cron runner may be configured with a chain of job wrappers to add
cross-cutting functionality to all submitted jobs. For example, they may be used
to achieve the following effects:
- Recover any panics from jobs (activated by default)
- Delay a job's execution if the previous run hasn't completed yet
- Skip a job's execution if the previous run hasn't completed yet
- Log each job's invocations
Install wrappers for all jobs added to a cron using the `cron.WithChain` option:
cron.New(cron.WithChain(
cron.SkipIfStillRunning(logger),
))
Install wrappers for individual jobs by explicitly wrapping them:
job = cron.NewChain(
cron.SkipIfStillRunning(logger),
).Then(job)
Thread safety
Since the Cron service runs concurrently with the calling code, some amount of
care must be taken to ensure proper synchronization.
All cron methods are designed to be correctly synchronized as long as the caller
ensures that invocations have a clear happens-before ordering between them.
Logging
Cron defines a Logger interface that is a subset of the one defined in
github.com/go-logr/logr. It has two logging levels (Info and Error), and
parameters are key/value pairs. This makes it possible for cron logging to plug
into structured logging systems. An adapter, [Verbose]PrintfLogger, is provided
to wrap the standard library *log.Logger.
For additional insight into Cron operations, verbose logging may be activated
which will record job runs, scheduling decisions, and added or removed jobs.
Activate it with a one-off logger as follows:
cron.New(
cron.WithLogger(
cron.VerbosePrintfLogger(log.New(os.Stdout, "cron: ", log.LstdFlags))))
Implementation
Cron entries are stored in an array, sorted by their next activation time. Cron
sleeps until the next job is due to be run.
Upon waking:
- it runs each entry that is active on that second
- it calculates the next run times for the jobs that were run
- it re-sorts the array of entries by next activation time.
- it goes to sleep until the soonest job.
*/
package cron

86
vendor/github.com/robfig/cron/v3/logger.go generated vendored Normal file
View File

@ -0,0 +1,86 @@
package cron
import (
"io/ioutil"
"log"
"os"
"strings"
"time"
)
// DefaultLogger is used by Cron if none is specified.
var DefaultLogger Logger = PrintfLogger(log.New(os.Stdout, "cron: ", log.LstdFlags))
// DiscardLogger can be used by callers to discard all log messages.
var DiscardLogger Logger = PrintfLogger(log.New(ioutil.Discard, "", 0))
// Logger is the interface used in this package for logging, so that any backend
// can be plugged in. It is a subset of the github.com/go-logr/logr interface.
type Logger interface {
// Info logs routine messages about cron's operation.
Info(msg string, keysAndValues ...interface{})
// Error logs an error condition.
Error(err error, msg string, keysAndValues ...interface{})
}
// PrintfLogger wraps a Printf-based logger (such as the standard library "log")
// into an implementation of the Logger interface which logs errors only.
func PrintfLogger(l interface{ Printf(string, ...interface{}) }) Logger {
return printfLogger{l, false}
}
// VerbosePrintfLogger wraps a Printf-based logger (such as the standard library
// "log") into an implementation of the Logger interface which logs everything.
func VerbosePrintfLogger(l interface{ Printf(string, ...interface{}) }) Logger {
return printfLogger{l, true}
}
type printfLogger struct {
logger interface{ Printf(string, ...interface{}) }
logInfo bool
}
func (pl printfLogger) Info(msg string, keysAndValues ...interface{}) {
if pl.logInfo {
keysAndValues = formatTimes(keysAndValues)
pl.logger.Printf(
formatString(len(keysAndValues)),
append([]interface{}{msg}, keysAndValues...)...)
}
}
func (pl printfLogger) Error(err error, msg string, keysAndValues ...interface{}) {
keysAndValues = formatTimes(keysAndValues)
pl.logger.Printf(
formatString(len(keysAndValues)+2),
append([]interface{}{msg, "error", err}, keysAndValues...)...)
}
// formatString returns a logfmt-like format string for the number of
// key/values.
func formatString(numKeysAndValues int) string {
var sb strings.Builder
sb.WriteString("%s")
if numKeysAndValues > 0 {
sb.WriteString(", ")
}
for i := 0; i < numKeysAndValues/2; i++ {
if i > 0 {
sb.WriteString(", ")
}
sb.WriteString("%v=%v")
}
return sb.String()
}
// formatTimes formats any time.Time values as RFC3339.
func formatTimes(keysAndValues []interface{}) []interface{} {
var formattedArgs []interface{}
for _, arg := range keysAndValues {
if t, ok := arg.(time.Time); ok {
arg = t.Format(time.RFC3339)
}
formattedArgs = append(formattedArgs, arg)
}
return formattedArgs
}

45
vendor/github.com/robfig/cron/v3/option.go generated vendored Normal file
View File

@ -0,0 +1,45 @@
package cron
import (
"time"
)
// Option represents a modification to the default behavior of a Cron.
type Option func(*Cron)
// WithLocation overrides the timezone of the cron instance.
func WithLocation(loc *time.Location) Option {
return func(c *Cron) {
c.location = loc
}
}
// WithSeconds overrides the parser used for interpreting job schedules to
// include a seconds field as the first one.
func WithSeconds() Option {
return WithParser(NewParser(
Second | Minute | Hour | Dom | Month | Dow | Descriptor,
))
}
// WithParser overrides the parser used for interpreting job schedules.
func WithParser(p ScheduleParser) Option {
return func(c *Cron) {
c.parser = p
}
}
// WithChain specifies Job wrappers to apply to all jobs added to this cron.
// Refer to the Chain* functions in this package for provided wrappers.
func WithChain(wrappers ...JobWrapper) Option {
return func(c *Cron) {
c.chain = NewChain(wrappers...)
}
}
// WithLogger uses the provided logger.
func WithLogger(logger Logger) Option {
return func(c *Cron) {
c.logger = logger
}
}

434
vendor/github.com/robfig/cron/v3/parser.go generated vendored Normal file
View File

@ -0,0 +1,434 @@
package cron
import (
"fmt"
"math"
"strconv"
"strings"
"time"
)
// Configuration options for creating a parser. Most options specify which
// fields should be included, while others enable features. If a field is not
// included the parser will assume a default value. These options do not change
// the order fields are parse in.
type ParseOption int
const (
Second ParseOption = 1 << iota // Seconds field, default 0
SecondOptional // Optional seconds field, default 0
Minute // Minutes field, default 0
Hour // Hours field, default 0
Dom // Day of month field, default *
Month // Month field, default *
Dow // Day of week field, default *
DowOptional // Optional day of week field, default *
Descriptor // Allow descriptors such as @monthly, @weekly, etc.
)
var places = []ParseOption{
Second,
Minute,
Hour,
Dom,
Month,
Dow,
}
var defaults = []string{
"0",
"0",
"0",
"*",
"*",
"*",
}
// A custom Parser that can be configured.
type Parser struct {
options ParseOption
}
// NewParser creates a Parser with custom options.
//
// It panics if more than one Optional is given, since it would be impossible to
// correctly infer which optional is provided or missing in general.
//
// Examples
//
// // Standard parser without descriptors
// specParser := NewParser(Minute | Hour | Dom | Month | Dow)
// sched, err := specParser.Parse("0 0 15 */3 *")
//
// // Same as above, just excludes time fields
// subsParser := NewParser(Dom | Month | Dow)
// sched, err := specParser.Parse("15 */3 *")
//
// // Same as above, just makes Dow optional
// subsParser := NewParser(Dom | Month | DowOptional)
// sched, err := specParser.Parse("15 */3")
//
func NewParser(options ParseOption) Parser {
optionals := 0
if options&DowOptional > 0 {
optionals++
}
if options&SecondOptional > 0 {
optionals++
}
if optionals > 1 {
panic("multiple optionals may not be configured")
}
return Parser{options}
}
// Parse returns a new crontab schedule representing the given spec.
// It returns a descriptive error if the spec is not valid.
// It accepts crontab specs and features configured by NewParser.
func (p Parser) Parse(spec string) (Schedule, error) {
if len(spec) == 0 {
return nil, fmt.Errorf("empty spec string")
}
// Extract timezone if present
var loc = time.Local
if strings.HasPrefix(spec, "TZ=") || strings.HasPrefix(spec, "CRON_TZ=") {
var err error
i := strings.Index(spec, " ")
eq := strings.Index(spec, "=")
if loc, err = time.LoadLocation(spec[eq+1 : i]); err != nil {
return nil, fmt.Errorf("provided bad location %s: %v", spec[eq+1:i], err)
}
spec = strings.TrimSpace(spec[i:])
}
// Handle named schedules (descriptors), if configured
if strings.HasPrefix(spec, "@") {
if p.options&Descriptor == 0 {
return nil, fmt.Errorf("parser does not accept descriptors: %v", spec)
}
return parseDescriptor(spec, loc)
}
// Split on whitespace.
fields := strings.Fields(spec)
// Validate & fill in any omitted or optional fields
var err error
fields, err = normalizeFields(fields, p.options)
if err != nil {
return nil, err
}
field := func(field string, r bounds) uint64 {
if err != nil {
return 0
}
var bits uint64
bits, err = getField(field, r)
return bits
}
var (
second = field(fields[0], seconds)
minute = field(fields[1], minutes)
hour = field(fields[2], hours)
dayofmonth = field(fields[3], dom)
month = field(fields[4], months)
dayofweek = field(fields[5], dow)
)
if err != nil {
return nil, err
}
return &SpecSchedule{
Second: second,
Minute: minute,
Hour: hour,
Dom: dayofmonth,
Month: month,
Dow: dayofweek,
Location: loc,
}, nil
}
// normalizeFields takes a subset set of the time fields and returns the full set
// with defaults (zeroes) populated for unset fields.
//
// As part of performing this function, it also validates that the provided
// fields are compatible with the configured options.
func normalizeFields(fields []string, options ParseOption) ([]string, error) {
// Validate optionals & add their field to options
optionals := 0
if options&SecondOptional > 0 {
options |= Second
optionals++
}
if options&DowOptional > 0 {
options |= Dow
optionals++
}
if optionals > 1 {
return nil, fmt.Errorf("multiple optionals may not be configured")
}
// Figure out how many fields we need
max := 0
for _, place := range places {
if options&place > 0 {
max++
}
}
min := max - optionals
// Validate number of fields
if count := len(fields); count < min || count > max {
if min == max {
return nil, fmt.Errorf("expected exactly %d fields, found %d: %s", min, count, fields)
}
return nil, fmt.Errorf("expected %d to %d fields, found %d: %s", min, max, count, fields)
}
// Populate the optional field if not provided
if min < max && len(fields) == min {
switch {
case options&DowOptional > 0:
fields = append(fields, defaults[5]) // TODO: improve access to default
case options&SecondOptional > 0:
fields = append([]string{defaults[0]}, fields...)
default:
return nil, fmt.Errorf("unknown optional field")
}
}
// Populate all fields not part of options with their defaults
n := 0
expandedFields := make([]string, len(places))
copy(expandedFields, defaults)
for i, place := range places {
if options&place > 0 {
expandedFields[i] = fields[n]
n++
}
}
return expandedFields, nil
}
var standardParser = NewParser(
Minute | Hour | Dom | Month | Dow | Descriptor,
)
// ParseStandard returns a new crontab schedule representing the given
// standardSpec (https://en.wikipedia.org/wiki/Cron). It requires 5 entries
// representing: minute, hour, day of month, month and day of week, in that
// order. It returns a descriptive error if the spec is not valid.
//
// It accepts
// - Standard crontab specs, e.g. "* * * * ?"
// - Descriptors, e.g. "@midnight", "@every 1h30m"
func ParseStandard(standardSpec string) (Schedule, error) {
return standardParser.Parse(standardSpec)
}
// getField returns an Int with the bits set representing all of the times that
// the field represents or error parsing field value. A "field" is a comma-separated
// list of "ranges".
func getField(field string, r bounds) (uint64, error) {
var bits uint64
ranges := strings.FieldsFunc(field, func(r rune) bool { return r == ',' })
for _, expr := range ranges {
bit, err := getRange(expr, r)
if err != nil {
return bits, err
}
bits |= bit
}
return bits, nil
}
// getRange returns the bits indicated by the given expression:
// number | number "-" number [ "/" number ]
// or error parsing range.
func getRange(expr string, r bounds) (uint64, error) {
var (
start, end, step uint
rangeAndStep = strings.Split(expr, "/")
lowAndHigh = strings.Split(rangeAndStep[0], "-")
singleDigit = len(lowAndHigh) == 1
err error
)
var extra uint64
if lowAndHigh[0] == "*" || lowAndHigh[0] == "?" {
start = r.min
end = r.max
extra = starBit
} else {
start, err = parseIntOrName(lowAndHigh[0], r.names)
if err != nil {
return 0, err
}
switch len(lowAndHigh) {
case 1:
end = start
case 2:
end, err = parseIntOrName(lowAndHigh[1], r.names)
if err != nil {
return 0, err
}
default:
return 0, fmt.Errorf("too many hyphens: %s", expr)
}
}
switch len(rangeAndStep) {
case 1:
step = 1
case 2:
step, err = mustParseInt(rangeAndStep[1])
if err != nil {
return 0, err
}
// Special handling: "N/step" means "N-max/step".
if singleDigit {
end = r.max
}
if step > 1 {
extra = 0
}
default:
return 0, fmt.Errorf("too many slashes: %s", expr)
}
if start < r.min {
return 0, fmt.Errorf("beginning of range (%d) below minimum (%d): %s", start, r.min, expr)
}
if end > r.max {
return 0, fmt.Errorf("end of range (%d) above maximum (%d): %s", end, r.max, expr)
}
if start > end {
return 0, fmt.Errorf("beginning of range (%d) beyond end of range (%d): %s", start, end, expr)
}
if step == 0 {
return 0, fmt.Errorf("step of range should be a positive number: %s", expr)
}
return getBits(start, end, step) | extra, nil
}
// parseIntOrName returns the (possibly-named) integer contained in expr.
func parseIntOrName(expr string, names map[string]uint) (uint, error) {
if names != nil {
if namedInt, ok := names[strings.ToLower(expr)]; ok {
return namedInt, nil
}
}
return mustParseInt(expr)
}
// mustParseInt parses the given expression as an int or returns an error.
func mustParseInt(expr string) (uint, error) {
num, err := strconv.Atoi(expr)
if err != nil {
return 0, fmt.Errorf("failed to parse int from %s: %s", expr, err)
}
if num < 0 {
return 0, fmt.Errorf("negative number (%d) not allowed: %s", num, expr)
}
return uint(num), nil
}
// getBits sets all bits in the range [min, max], modulo the given step size.
func getBits(min, max, step uint) uint64 {
var bits uint64
// If step is 1, use shifts.
if step == 1 {
return ^(math.MaxUint64 << (max + 1)) & (math.MaxUint64 << min)
}
// Else, use a simple loop.
for i := min; i <= max; i += step {
bits |= 1 << i
}
return bits
}
// all returns all bits within the given bounds. (plus the star bit)
func all(r bounds) uint64 {
return getBits(r.min, r.max, 1) | starBit
}
// parseDescriptor returns a predefined schedule for the expression, or error if none matches.
func parseDescriptor(descriptor string, loc *time.Location) (Schedule, error) {
switch descriptor {
case "@yearly", "@annually":
return &SpecSchedule{
Second: 1 << seconds.min,
Minute: 1 << minutes.min,
Hour: 1 << hours.min,
Dom: 1 << dom.min,
Month: 1 << months.min,
Dow: all(dow),
Location: loc,
}, nil
case "@monthly":
return &SpecSchedule{
Second: 1 << seconds.min,
Minute: 1 << minutes.min,
Hour: 1 << hours.min,
Dom: 1 << dom.min,
Month: all(months),
Dow: all(dow),
Location: loc,
}, nil
case "@weekly":
return &SpecSchedule{
Second: 1 << seconds.min,
Minute: 1 << minutes.min,
Hour: 1 << hours.min,
Dom: all(dom),
Month: all(months),
Dow: 1 << dow.min,
Location: loc,
}, nil
case "@daily", "@midnight":
return &SpecSchedule{
Second: 1 << seconds.min,
Minute: 1 << minutes.min,
Hour: 1 << hours.min,
Dom: all(dom),
Month: all(months),
Dow: all(dow),
Location: loc,
}, nil
case "@hourly":
return &SpecSchedule{
Second: 1 << seconds.min,
Minute: 1 << minutes.min,
Hour: all(hours),
Dom: all(dom),
Month: all(months),
Dow: all(dow),
Location: loc,
}, nil
}
const every = "@every "
if strings.HasPrefix(descriptor, every) {
duration, err := time.ParseDuration(descriptor[len(every):])
if err != nil {
return nil, fmt.Errorf("failed to parse duration %s: %s", descriptor, err)
}
return Every(duration), nil
}
return nil, fmt.Errorf("unrecognized descriptor: %s", descriptor)
}

188
vendor/github.com/robfig/cron/v3/spec.go generated vendored Normal file
View File

@ -0,0 +1,188 @@
package cron
import "time"
// SpecSchedule specifies a duty cycle (to the second granularity), based on a
// traditional crontab specification. It is computed initially and stored as bit sets.
type SpecSchedule struct {
Second, Minute, Hour, Dom, Month, Dow uint64
// Override location for this schedule.
Location *time.Location
}
// bounds provides a range of acceptable values (plus a map of name to value).
type bounds struct {
min, max uint
names map[string]uint
}
// The bounds for each field.
var (
seconds = bounds{0, 59, nil}
minutes = bounds{0, 59, nil}
hours = bounds{0, 23, nil}
dom = bounds{1, 31, nil}
months = bounds{1, 12, map[string]uint{
"jan": 1,
"feb": 2,
"mar": 3,
"apr": 4,
"may": 5,
"jun": 6,
"jul": 7,
"aug": 8,
"sep": 9,
"oct": 10,
"nov": 11,
"dec": 12,
}}
dow = bounds{0, 6, map[string]uint{
"sun": 0,
"mon": 1,
"tue": 2,
"wed": 3,
"thu": 4,
"fri": 5,
"sat": 6,
}}
)
const (
// Set the top bit if a star was included in the expression.
starBit = 1 << 63
)
// Next returns the next time this schedule is activated, greater than the given
// time. If no time can be found to satisfy the schedule, return the zero time.
func (s *SpecSchedule) Next(t time.Time) time.Time {
// General approach
//
// For Month, Day, Hour, Minute, Second:
// Check if the time value matches. If yes, continue to the next field.
// If the field doesn't match the schedule, then increment the field until it matches.
// While incrementing the field, a wrap-around brings it back to the beginning
// of the field list (since it is necessary to re-verify previous field
// values)
// Convert the given time into the schedule's timezone, if one is specified.
// Save the original timezone so we can convert back after we find a time.
// Note that schedules without a time zone specified (time.Local) are treated
// as local to the time provided.
origLocation := t.Location()
loc := s.Location
if loc == time.Local {
loc = t.Location()
}
if s.Location != time.Local {
t = t.In(s.Location)
}
// Start at the earliest possible time (the upcoming second).
t = t.Add(1*time.Second - time.Duration(t.Nanosecond())*time.Nanosecond)
// This flag indicates whether a field has been incremented.
added := false
// If no time is found within five years, return zero.
yearLimit := t.Year() + 5
WRAP:
if t.Year() > yearLimit {
return time.Time{}
}
// Find the first applicable month.
// If it's this month, then do nothing.
for 1<<uint(t.Month())&s.Month == 0 {
// If we have to add a month, reset the other parts to 0.
if !added {
added = true
// Otherwise, set the date at the beginning (since the current time is irrelevant).
t = time.Date(t.Year(), t.Month(), 1, 0, 0, 0, 0, loc)
}
t = t.AddDate(0, 1, 0)
// Wrapped around.
if t.Month() == time.January {
goto WRAP
}
}
// Now get a day in that month.
//
// NOTE: This causes issues for daylight savings regimes where midnight does
// not exist. For example: Sao Paulo has DST that transforms midnight on
// 11/3 into 1am. Handle that by noticing when the Hour ends up != 0.
for !dayMatches(s, t) {
if !added {
added = true
t = time.Date(t.Year(), t.Month(), t.Day(), 0, 0, 0, 0, loc)
}
t = t.AddDate(0, 0, 1)
// Notice if the hour is no longer midnight due to DST.
// Add an hour if it's 23, subtract an hour if it's 1.
if t.Hour() != 0 {
if t.Hour() > 12 {
t = t.Add(time.Duration(24-t.Hour()) * time.Hour)
} else {
t = t.Add(time.Duration(-t.Hour()) * time.Hour)
}
}
if t.Day() == 1 {
goto WRAP
}
}
for 1<<uint(t.Hour())&s.Hour == 0 {
if !added {
added = true
t = time.Date(t.Year(), t.Month(), t.Day(), t.Hour(), 0, 0, 0, loc)
}
t = t.Add(1 * time.Hour)
if t.Hour() == 0 {
goto WRAP
}
}
for 1<<uint(t.Minute())&s.Minute == 0 {
if !added {
added = true
t = t.Truncate(time.Minute)
}
t = t.Add(1 * time.Minute)
if t.Minute() == 0 {
goto WRAP
}
}
for 1<<uint(t.Second())&s.Second == 0 {
if !added {
added = true
t = t.Truncate(time.Second)
}
t = t.Add(1 * time.Second)
if t.Second() == 0 {
goto WRAP
}
}
return t.In(origLocation)
}
// dayMatches returns true if the schedule's day-of-week and day-of-month
// restrictions are satisfied by the given time.
func dayMatches(s *SpecSchedule, t time.Time) bool {
var (
domMatch bool = 1<<uint(t.Day())&s.Dom > 0
dowMatch bool = 1<<uint(t.Weekday())&s.Dow > 0
)
if s.Dom&starBit > 0 || s.Dow&starBit > 0 {
return domMatch && dowMatch
}
return domMatch || dowMatch
}

View File

@ -1,3 +1,26 @@
## [1.0.20](https://github.com/uptrace/bun/compare/v1.0.19...v1.0.20) (2021-12-19)
### Bug Fixes
* add Event.QueryTemplate and change Event.Query to be always formatted ([52b1ccd](https://github.com/uptrace/bun/commit/52b1ccdf3578418aa427adef9dcf942d90ae4fdd))
* change GetTableName to return formatted table name in case ModelTableExpr ([95144dd](https://github.com/uptrace/bun/commit/95144dde937b4ac88b36b0bd8b01372421069b44))
* change ScanAndCount to work with transactions ([5b3f2c0](https://github.com/uptrace/bun/commit/5b3f2c021c424da366caffd33589e8adde821403))
* **dbfixture:** directly call funcs bypassing template eval ([a61974b](https://github.com/uptrace/bun/commit/a61974ba2d24361c5357fb9bda1f3eceec5a45cd))
* don't append CASCADE by default in drop table/column queries ([26457ea](https://github.com/uptrace/bun/commit/26457ea5cb20862d232e6e5fa4dbdeac5d444bf1))
* **migrate:** mark migrations as applied on error so the migration can be rolled back ([8ce33fb](https://github.com/uptrace/bun/commit/8ce33fbbac8e33077c20daf19a14c5ff2291bcae))
* respect nullzero when appending struct fields. Fixes [#339](https://github.com/uptrace/bun/issues/339) ([ffd02f3](https://github.com/uptrace/bun/commit/ffd02f3170b3cccdd670a48d563cfb41094c05d6))
* reuse tx for relation join ([#366](https://github.com/uptrace/bun/issues/366)) ([60bdb1a](https://github.com/uptrace/bun/commit/60bdb1ac84c0a699429eead3b7fdfbf14fe69ac6))
### Features
* add `Dialect()` to Transaction and IDB interface ([693f1e1](https://github.com/uptrace/bun/commit/693f1e135999fc31cf83b99a2530a695b20f4e1b))
* add model embedding via embed:prefix_ ([9a2cedc](https://github.com/uptrace/bun/commit/9a2cedc8b08fa8585d4bfced338bd0a40d736b1d))
* change the default logoutput to stderr ([4bf5773](https://github.com/uptrace/bun/commit/4bf577382f19c64457cbf0d64490401450954654)), closes [#349](https://github.com/uptrace/bun/issues/349)
## [1.0.19](https://github.com/uptrace/bun/compare/v1.0.18...v1.0.19) (2021-11-30)

View File

@ -43,8 +43,147 @@ Projects using Bun:
demand.
- [RealWorld app](https://github.com/go-bun/bun-realworld-app)
## Benchmark
[https://github.com/davars/dbeval](https://github.com/davars/dbeval)
<details>
<summary>github.com/frederikhors/orm-benchmark results</summary>
<summary>results</summary>
```
BenchmarkInsert
BenchmarkInsert/*dbeval.Memory/Authors
BenchmarkInsert/*dbeval.Memory/Authors-4 84450 12104 ns/op 2623 B/op 70 allocs/op
BenchmarkInsert/*dbeval.Xorm/Authors
BenchmarkInsert/*dbeval.Xorm/Authors-4 7291 153505 ns/op 9024 B/op 311 allocs/op
BenchmarkInsert/*dbeval.UpperDB/Authors
BenchmarkInsert/*dbeval.UpperDB/Authors-4 4608 223672 ns/op 24160 B/op 1100 allocs/op
BenchmarkInsert/*dbeval.Bun/Authors
BenchmarkInsert/*dbeval.Bun/Authors-4 6034 186439 ns/op 6818 B/op 80 allocs/op
BenchmarkInsert/*dbeval.PQ/Authors
BenchmarkInsert/*dbeval.PQ/Authors-4 1141 907494 ns/op 6487 B/op 193 allocs/op
BenchmarkInsert/*dbeval.SQLX/Authors
BenchmarkInsert/*dbeval.SQLX/Authors-4 1165 916987 ns/op 10089 B/op 271 allocs/op
BenchmarkInsert/*dbeval.Ozzo/Authors
BenchmarkInsert/*dbeval.Ozzo/Authors-4 1105 1058082 ns/op 27826 B/op 588 allocs/op
BenchmarkInsert/*dbeval.PGXStdlib/Authors
BenchmarkInsert/*dbeval.PGXStdlib/Authors-4 1228 900207 ns/op 6032 B/op 180 allocs/op
BenchmarkInsert/*dbeval.Gorm/Authors
BenchmarkInsert/*dbeval.Gorm/Authors-4 946 1184285 ns/op 35634 B/op 918 allocs/op
BenchmarkInsert/*dbeval.PGX/Authors
BenchmarkInsert/*dbeval.PGX/Authors-4 1116 923728 ns/op 3839 B/op 130 allocs/op
BenchmarkInsert/*dbeval.DBR/Authors
BenchmarkInsert/*dbeval.DBR/Authors-4 5800 183982 ns/op 8646 B/op 230 allocs/op
BenchmarkInsert/*dbeval.GoPG/Authors
BenchmarkInsert/*dbeval.GoPG/Authors-4 6110 173923 ns/op 2906 B/op 87 allocs/op
BenchmarkInsert/*dbeval.DBR/Articles
BenchmarkInsert/*dbeval.DBR/Articles-4 1706 684466 ns/op 133346 B/op 1614 allocs/op
BenchmarkInsert/*dbeval.PQ/Articles
BenchmarkInsert/*dbeval.PQ/Articles-4 884 1249791 ns/op 100403 B/op 1491 allocs/op
BenchmarkInsert/*dbeval.PGX/Articles
BenchmarkInsert/*dbeval.PGX/Articles-4 916 1288143 ns/op 83539 B/op 1392 allocs/op
BenchmarkInsert/*dbeval.GoPG/Articles
BenchmarkInsert/*dbeval.GoPG/Articles-4 1726 622639 ns/op 78638 B/op 1359 allocs/op
BenchmarkInsert/*dbeval.SQLX/Articles
BenchmarkInsert/*dbeval.SQLX/Articles-4 860 1262599 ns/op 92030 B/op 1574 allocs/op
BenchmarkInsert/*dbeval.Gorm/Articles
BenchmarkInsert/*dbeval.Gorm/Articles-4 782 1421550 ns/op 136534 B/op 2411 allocs/op
BenchmarkInsert/*dbeval.PGXStdlib/Articles
BenchmarkInsert/*dbeval.PGXStdlib/Articles-4 938 1230576 ns/op 86743 B/op 1441 allocs/op
BenchmarkInsert/*dbeval.Bun/Articles
BenchmarkInsert/*dbeval.Bun/Articles-4 1843 626681 ns/op 101610 B/op 1323 allocs/op
BenchmarkInsert/*dbeval.Xorm/Articles
BenchmarkInsert/*dbeval.Xorm/Articles-4 1677 650244 ns/op 126677 B/op 1752 allocs/op
BenchmarkInsert/*dbeval.Memory/Articles
BenchmarkInsert/*dbeval.Memory/Articles-4 1988 1223308 ns/op 77576 B/op 1310 allocs/op
BenchmarkInsert/*dbeval.UpperDB/Articles
BenchmarkInsert/*dbeval.UpperDB/Articles-4 1696 687130 ns/op 139680 B/op 2862 allocs/op
BenchmarkInsert/*dbeval.Ozzo/Articles
BenchmarkInsert/*dbeval.Ozzo/Articles-4 697 1496859 ns/op 114780 B/op 1950 allocs/op
BenchmarkFindAuthorByID
BenchmarkFindAuthorByID/*dbeval.UpperDB
BenchmarkFindAuthorByID/*dbeval.UpperDB-4 10184 117527 ns/op 9953 B/op 441 allocs/op
BenchmarkFindAuthorByID/*dbeval.Bun
BenchmarkFindAuthorByID/*dbeval.Bun-4 20716 54261 ns/op 5096 B/op 15 allocs/op
BenchmarkFindAuthorByID/*dbeval.Ozzo
BenchmarkFindAuthorByID/*dbeval.Ozzo-4 11166 91043 ns/op 3088 B/op 64 allocs/op
BenchmarkFindAuthorByID/*dbeval.PQ
BenchmarkFindAuthorByID/*dbeval.PQ-4 13875 86171 ns/op 844 B/op 24 allocs/op
BenchmarkFindAuthorByID/*dbeval.PGX
BenchmarkFindAuthorByID/*dbeval.PGX-4 13846 79983 ns/op 719 B/op 15 allocs/op
BenchmarkFindAuthorByID/*dbeval.Memory
BenchmarkFindAuthorByID/*dbeval.Memory-4 14113720 82.33 ns/op 0 B/op 0 allocs/op
BenchmarkFindAuthorByID/*dbeval.Xorm
BenchmarkFindAuthorByID/*dbeval.Xorm-4 12027 98519 ns/op 3633 B/op 106 allocs/op
BenchmarkFindAuthorByID/*dbeval.Gorm
BenchmarkFindAuthorByID/*dbeval.Gorm-4 11521 102241 ns/op 6592 B/op 143 allocs/op
BenchmarkFindAuthorByID/*dbeval.PGXStdlib
BenchmarkFindAuthorByID/*dbeval.PGXStdlib-4 13933 82626 ns/op 1174 B/op 28 allocs/op
BenchmarkFindAuthorByID/*dbeval.DBR
BenchmarkFindAuthorByID/*dbeval.DBR-4 21920 51175 ns/op 1756 B/op 39 allocs/op
BenchmarkFindAuthorByID/*dbeval.SQLX
BenchmarkFindAuthorByID/*dbeval.SQLX-4 13603 80788 ns/op 1327 B/op 32 allocs/op
BenchmarkFindAuthorByID/*dbeval.GoPG
BenchmarkFindAuthorByID/*dbeval.GoPG-4 23174 50042 ns/op 869 B/op 17 allocs/op
BenchmarkFindAuthorByName
BenchmarkFindAuthorByName/*dbeval.SQLX
BenchmarkFindAuthorByName/*dbeval.SQLX-4 1070 1065272 ns/op 126348 B/op 4018 allocs/op
BenchmarkFindAuthorByName/*dbeval.Bun
BenchmarkFindAuthorByName/*dbeval.Bun-4 877 1231377 ns/op 115803 B/op 5005 allocs/op
BenchmarkFindAuthorByName/*dbeval.Xorm
BenchmarkFindAuthorByName/*dbeval.Xorm-4 471 2345445 ns/op 455711 B/op 19080 allocs/op
BenchmarkFindAuthorByName/*dbeval.DBR
BenchmarkFindAuthorByName/*dbeval.DBR-4 954 1089977 ns/op 120070 B/op 6023 allocs/op
BenchmarkFindAuthorByName/*dbeval.PQ
BenchmarkFindAuthorByName/*dbeval.PQ-4 1333 784400 ns/op 87159 B/op 4006 allocs/op
BenchmarkFindAuthorByName/*dbeval.GoPG
BenchmarkFindAuthorByName/*dbeval.GoPG-4 1580 770966 ns/op 87525 B/op 3028 allocs/op
BenchmarkFindAuthorByName/*dbeval.UpperDB
BenchmarkFindAuthorByName/*dbeval.UpperDB-4 789 1314164 ns/op 190689 B/op 6428 allocs/op
BenchmarkFindAuthorByName/*dbeval.Ozzo
BenchmarkFindAuthorByName/*dbeval.Ozzo-4 948 1255282 ns/op 238764 B/op 6053 allocs/op
BenchmarkFindAuthorByName/*dbeval.PGXStdlib
BenchmarkFindAuthorByName/*dbeval.PGXStdlib-4 1279 920391 ns/op 126163 B/op 4014 allocs/op
BenchmarkFindAuthorByName/*dbeval.PGX
BenchmarkFindAuthorByName/*dbeval.PGX-4 1364 780970 ns/op 101967 B/op 2028 allocs/op
BenchmarkFindAuthorByName/*dbeval.Gorm
BenchmarkFindAuthorByName/*dbeval.Gorm-4 340 3445818 ns/op 1573637 B/op 27102 allocs/op
BenchmarkFindAuthorByName/*dbeval.Memory
BenchmarkFindAuthorByName/*dbeval.Memory-4 38081223 31.24 ns/op 0 B/op 0 allocs/op
BenchmarkRecentArticles
BenchmarkRecentArticles/*dbeval.PGXStdlib
BenchmarkRecentArticles/*dbeval.PGXStdlib-4 358 3344119 ns/op 3425578 B/op 14177 allocs/op
BenchmarkRecentArticles/*dbeval.GoPG
BenchmarkRecentArticles/*dbeval.GoPG-4 364 3156372 ns/op 1794091 B/op 10032 allocs/op
BenchmarkRecentArticles/*dbeval.Xorm
BenchmarkRecentArticles/*dbeval.Xorm-4 157 7567835 ns/op 5018011 B/op 81425 allocs/op
BenchmarkRecentArticles/*dbeval.Gorm
BenchmarkRecentArticles/*dbeval.Gorm-4 139 7980084 ns/op 6776277 B/op 85418 allocs/op
BenchmarkRecentArticles/*dbeval.SQLX
BenchmarkRecentArticles/*dbeval.SQLX-4 338 3289802 ns/op 3425890 B/op 14181 allocs/op
BenchmarkRecentArticles/*dbeval.Ozzo
BenchmarkRecentArticles/*dbeval.Ozzo-4 320 3508322 ns/op 4025966 B/op 18207 allocs/op
BenchmarkRecentArticles/*dbeval.DBR
BenchmarkRecentArticles/*dbeval.DBR-4 237 5248644 ns/op 3331003 B/op 21370 allocs/op
BenchmarkRecentArticles/*dbeval.Bun
BenchmarkRecentArticles/*dbeval.Bun-4 280 4528582 ns/op 1864362 B/op 15965 allocs/op
BenchmarkRecentArticles/*dbeval.UpperDB
BenchmarkRecentArticles/*dbeval.UpperDB-4 297 3704663 ns/op 3607287 B/op 18542 allocs/op
BenchmarkRecentArticles/*dbeval.PQ
BenchmarkRecentArticles/*dbeval.PQ-4 308 3489229 ns/op 3277050 B/op 17359 allocs/op
BenchmarkRecentArticles/*dbeval.Memory
BenchmarkRecentArticles/*dbeval.Memory-4 29590380 42.27 ns/op 0 B/op 0 allocs/op
BenchmarkRecentArticles/*dbeval.PGX
BenchmarkRecentArticles/*dbeval.PGX-4 356 3345500 ns/op 3297316 B/op 6226 allocs/op
```
</details>
[https://github.com/frederikhors/orm-benchmark](https://github.com/frederikhors/orm-benchmark)
<details>
<summary>results</summary>
```
4000 times - Insert

62
vendor/github.com/uptrace/bun/db.go generated vendored
View File

@ -32,6 +32,7 @@ func WithDiscardUnknownColumns() DBOption {
type DB struct {
*sql.DB
dialect schema.Dialect
features feature.Feature
@ -125,7 +126,7 @@ func (db *DB) NewDropColumn() *DropColumnQuery {
func (db *DB) ResetModel(ctx context.Context, models ...interface{}) error {
for _, model := range models {
if _, err := db.NewDropTable().Model(model).IfExists().Exec(ctx); err != nil {
if _, err := db.NewDropTable().Model(model).IfExists().Cascade().Exec(ctx); err != nil {
return err
}
if _, err := db.NewCreateTable().Model(model).Exec(ctx); err != nil {
@ -226,8 +227,9 @@ func (db *DB) Exec(query string, args ...interface{}) (sql.Result, error) {
func (db *DB) ExecContext(
ctx context.Context, query string, args ...interface{},
) (sql.Result, error) {
ctx, event := db.beforeQuery(ctx, nil, query, args, nil)
res, err := db.DB.ExecContext(ctx, db.format(query, args))
formattedQuery := db.format(query, args)
ctx, event := db.beforeQuery(ctx, nil, query, args, formattedQuery, nil)
res, err := db.DB.ExecContext(ctx, formattedQuery)
db.afterQuery(ctx, event, res, err)
return res, err
}
@ -239,8 +241,9 @@ func (db *DB) Query(query string, args ...interface{}) (*sql.Rows, error) {
func (db *DB) QueryContext(
ctx context.Context, query string, args ...interface{},
) (*sql.Rows, error) {
ctx, event := db.beforeQuery(ctx, nil, query, args, nil)
rows, err := db.DB.QueryContext(ctx, db.format(query, args))
formattedQuery := db.format(query, args)
ctx, event := db.beforeQuery(ctx, nil, query, args, formattedQuery, nil)
rows, err := db.DB.QueryContext(ctx, formattedQuery)
db.afterQuery(ctx, event, nil, err)
return rows, err
}
@ -250,8 +253,9 @@ func (db *DB) QueryRow(query string, args ...interface{}) *sql.Row {
}
func (db *DB) QueryRowContext(ctx context.Context, query string, args ...interface{}) *sql.Row {
ctx, event := db.beforeQuery(ctx, nil, query, args, nil)
row := db.DB.QueryRowContext(ctx, db.format(query, args))
formattedQuery := db.format(query, args)
ctx, event := db.beforeQuery(ctx, nil, query, args, formattedQuery, nil)
row := db.DB.QueryRowContext(ctx, formattedQuery)
db.afterQuery(ctx, event, nil, row.Err())
return row
}
@ -281,8 +285,9 @@ func (db *DB) Conn(ctx context.Context) (Conn, error) {
func (c Conn) ExecContext(
ctx context.Context, query string, args ...interface{},
) (sql.Result, error) {
ctx, event := c.db.beforeQuery(ctx, nil, query, args, nil)
res, err := c.Conn.ExecContext(ctx, c.db.format(query, args))
formattedQuery := c.db.format(query, args)
ctx, event := c.db.beforeQuery(ctx, nil, query, args, formattedQuery, nil)
res, err := c.Conn.ExecContext(ctx, formattedQuery)
c.db.afterQuery(ctx, event, res, err)
return res, err
}
@ -290,19 +295,25 @@ func (c Conn) ExecContext(
func (c Conn) QueryContext(
ctx context.Context, query string, args ...interface{},
) (*sql.Rows, error) {
ctx, event := c.db.beforeQuery(ctx, nil, query, args, nil)
rows, err := c.Conn.QueryContext(ctx, c.db.format(query, args))
formattedQuery := c.db.format(query, args)
ctx, event := c.db.beforeQuery(ctx, nil, query, args, formattedQuery, nil)
rows, err := c.Conn.QueryContext(ctx, formattedQuery)
c.db.afterQuery(ctx, event, nil, err)
return rows, err
}
func (c Conn) QueryRowContext(ctx context.Context, query string, args ...interface{}) *sql.Row {
ctx, event := c.db.beforeQuery(ctx, nil, query, args, nil)
row := c.Conn.QueryRowContext(ctx, c.db.format(query, args))
formattedQuery := c.db.format(query, args)
ctx, event := c.db.beforeQuery(ctx, nil, query, args, formattedQuery, nil)
row := c.Conn.QueryRowContext(ctx, formattedQuery)
c.db.afterQuery(ctx, event, nil, row.Err())
return row
}
func (c Conn) Dialect() schema.Dialect {
return c.db.Dialect()
}
func (c Conn) NewValues(model interface{}) *ValuesQuery {
return NewValuesQuery(c.db, model).Conn(c)
}
@ -408,7 +419,7 @@ func (db *DB) Begin() (Tx, error) {
}
func (db *DB) BeginTx(ctx context.Context, opts *sql.TxOptions) (Tx, error) {
ctx, event := db.beforeQuery(ctx, nil, "BEGIN", nil, nil)
ctx, event := db.beforeQuery(ctx, nil, "BEGIN", nil, "BEGIN", nil)
tx, err := db.DB.BeginTx(ctx, opts)
db.afterQuery(ctx, event, nil, err)
if err != nil {
@ -422,14 +433,14 @@ func (db *DB) BeginTx(ctx context.Context, opts *sql.TxOptions) (Tx, error) {
}
func (tx Tx) Commit() error {
ctx, event := tx.db.beforeQuery(tx.ctx, nil, "COMMIT", nil, nil)
ctx, event := tx.db.beforeQuery(tx.ctx, nil, "COMMIT", nil, "COMMIT", nil)
err := tx.Tx.Commit()
tx.db.afterQuery(ctx, event, nil, err)
return err
}
func (tx Tx) Rollback() error {
ctx, event := tx.db.beforeQuery(tx.ctx, nil, "ROLLBACK", nil, nil)
ctx, event := tx.db.beforeQuery(tx.ctx, nil, "ROLLBACK", nil, "ROLLBACK", nil)
err := tx.Tx.Rollback()
tx.db.afterQuery(ctx, event, nil, err)
return err
@ -442,8 +453,9 @@ func (tx Tx) Exec(query string, args ...interface{}) (sql.Result, error) {
func (tx Tx) ExecContext(
ctx context.Context, query string, args ...interface{},
) (sql.Result, error) {
ctx, event := tx.db.beforeQuery(ctx, nil, query, args, nil)
res, err := tx.Tx.ExecContext(ctx, tx.db.format(query, args))
formattedQuery := tx.db.format(query, args)
ctx, event := tx.db.beforeQuery(ctx, nil, query, args, formattedQuery, nil)
res, err := tx.Tx.ExecContext(ctx, formattedQuery)
tx.db.afterQuery(ctx, event, res, err)
return res, err
}
@ -455,8 +467,9 @@ func (tx Tx) Query(query string, args ...interface{}) (*sql.Rows, error) {
func (tx Tx) QueryContext(
ctx context.Context, query string, args ...interface{},
) (*sql.Rows, error) {
ctx, event := tx.db.beforeQuery(ctx, nil, query, args, nil)
rows, err := tx.Tx.QueryContext(ctx, tx.db.format(query, args))
formattedQuery := tx.db.format(query, args)
ctx, event := tx.db.beforeQuery(ctx, nil, query, args, formattedQuery, nil)
rows, err := tx.Tx.QueryContext(ctx, formattedQuery)
tx.db.afterQuery(ctx, event, nil, err)
return rows, err
}
@ -466,14 +479,19 @@ func (tx Tx) QueryRow(query string, args ...interface{}) *sql.Row {
}
func (tx Tx) QueryRowContext(ctx context.Context, query string, args ...interface{}) *sql.Row {
ctx, event := tx.db.beforeQuery(ctx, nil, query, args, nil)
row := tx.Tx.QueryRowContext(ctx, tx.db.format(query, args))
formattedQuery := tx.db.format(query, args)
ctx, event := tx.db.beforeQuery(ctx, nil, query, args, formattedQuery, nil)
row := tx.Tx.QueryRowContext(ctx, formattedQuery)
tx.db.afterQuery(ctx, event, nil, row.Err())
return row
}
//------------------------------------------------------------------------------
func (tx Tx) Dialect() schema.Dialect {
return tx.db.Dialect()
}
func (tx Tx) NewValues(model interface{}) *ValuesQuery {
return NewValuesQuery(tx.db, model).Conn(tx)
}

View File

@ -13,9 +13,10 @@ import (
type QueryEvent struct {
DB *DB
QueryAppender schema.QueryAppender // Deprecated: use IQuery instead
QueryAppender schema.QueryAppender // DEPRECATED: use IQuery instead
IQuery Query
Query string
QueryTemplate string
QueryArgs []interface{}
Model Model
@ -51,8 +52,9 @@ type QueryHook interface {
func (db *DB) beforeQuery(
ctx context.Context,
iquery Query,
query string,
queryTemplate string,
queryArgs []interface{},
query string,
model Model,
) (context.Context, *QueryEvent) {
atomic.AddUint32(&db.stats.Queries, 1)
@ -68,6 +70,7 @@ func (db *DB) beforeQuery(
QueryAppender: iquery,
IQuery: iquery,
Query: query,
QueryTemplate: queryTemplate,
QueryArgs: queryArgs,
StartTime: time.Now(),

View File

@ -5,7 +5,6 @@ import (
"errors"
"fmt"
"io/ioutil"
"log"
"path/filepath"
"regexp"
"time"
@ -59,11 +58,16 @@ func (m *Migrator) DB() *bun.DB {
// MigrationsWithStatus returns migrations with status in ascending order.
func (m *Migrator) MigrationsWithStatus(ctx context.Context) (MigrationSlice, error) {
sorted, _, err := m.migrationsWithStatus(ctx)
return sorted, err
}
func (m *Migrator) migrationsWithStatus(ctx context.Context) (MigrationSlice, int64, error) {
sorted := m.migrations.Sorted()
applied, err := m.selectAppliedMigrations(ctx)
if err != nil {
return nil, err
return nil, 0, err
}
appliedMap := migrationMap(applied)
@ -76,7 +80,7 @@ func (m *Migrator) MigrationsWithStatus(ctx context.Context) (MigrationSlice, er
}
}
return sorted, nil
return sorted, applied.LastGroupID(), nil
}
func (m *Migrator) Init(ctx context.Context) error {
@ -128,7 +132,7 @@ func (m *Migrator) Migrate(ctx context.Context, opts ...MigrationOption) (*Migra
}
defer m.Unlock(ctx) //nolint:errcheck
migrations, err := m.MigrationsWithStatus(ctx)
migrations, lastGroupID, err := m.migrationsWithStatus(ctx)
if err != nil {
return nil, err
}
@ -139,21 +143,22 @@ func (m *Migrator) Migrate(ctx context.Context, opts ...MigrationOption) (*Migra
if len(group.Migrations) == 0 {
return group, nil
}
group.ID = migrations.LastGroupID() + 1
group.ID = lastGroupID + 1
for i := range group.Migrations {
migration := &group.Migrations[i]
migration.GroupID = group.ID
// Always mark migration as applied so the rollback has a chance to fix the database.
if err := m.MarkApplied(ctx, migration); err != nil {
return nil, err
}
if !cfg.nop && migration.Up != nil {
if err := migration.Up(ctx, m.db); err != nil {
return group, err
}
}
if err := m.MarkApplied(ctx, migration); err != nil {
return nil, err
}
}
return group, nil
@ -195,36 +200,6 @@ func (m *Migrator) Rollback(ctx context.Context, opts ...MigrationOption) (*Migr
return lastGroup, nil
}
type MigrationStatus struct {
Migrations MigrationSlice
NewMigrations MigrationSlice
LastGroup *MigrationGroup
}
func (m *Migrator) Status(ctx context.Context) (*MigrationStatus, error) {
log.Printf(
"DEPRECATED: bun: replace Status(ctx) with " +
"MigrationsWithStatus(ctx)")
migrations, err := m.MigrationsWithStatus(ctx)
if err != nil {
return nil, err
}
return &MigrationStatus{
Migrations: migrations,
NewMigrations: migrations.Unapplied(),
LastGroup: migrations.LastGroup(),
}, nil
}
func (m *Migrator) MarkCompleted(ctx context.Context) (*MigrationGroup, error) {
log.Printf(
"DEPRECATED: bun: replace MarkCompleted(ctx) with " +
"Migrate(ctx, migrate.WithNopMigration())")
return m.Migrate(ctx, WithNopMigration())
}
type goMigrationConfig struct {
packageName string
}
@ -320,7 +295,7 @@ func (m *Migrator) genMigrationName(name string) (string, error) {
return fmt.Sprintf("%s_%s", version, name), nil
}
// MarkApplied marks the migration as applied (applied).
// MarkApplied marks the migration as applied (completed).
func (m *Migrator) MarkApplied(ctx context.Context, migration *Migration) error {
_, err := m.db.NewInsert().Model(migration).
ModelTableExpr(m.table).

View File

@ -1,6 +1,6 @@
{
"name": "bun",
"version": "1.0.19",
"version": "1.0.20",
"main": "index.js",
"repository": "git@github.com:uptrace/bun.git",
"author": "Vladimir Mihailenco <vladimir.webdev@gmail.com>",

View File

@ -43,6 +43,7 @@ var (
// IDB is a common interface for *bun.DB, bun.Conn, and bun.Tx.
type IDB interface {
IConn
Dialect() schema.Dialect
NewValues(model interface{}) *ValuesQuery
NewSelect() *SelectQuery
@ -59,9 +60,9 @@ type IDB interface {
}
var (
_ IConn = (*DB)(nil)
_ IConn = (*Conn)(nil)
_ IConn = (*Tx)(nil)
_ IDB = (*DB)(nil)
_ IDB = (*Conn)(nil)
_ IDB = (*Tx)(nil)
)
type baseQuery struct {
@ -74,10 +75,10 @@ type baseQuery struct {
tableModel TableModel
table *schema.Table
with []withQuery
modelTable schema.QueryWithArgs
tables []schema.QueryWithArgs
columns []schema.QueryWithArgs
with []withQuery
modelTableName schema.QueryWithArgs
tables []schema.QueryWithArgs
columns []schema.QueryWithArgs
flags internal.Flag
}
@ -86,13 +87,6 @@ func (q *baseQuery) DB() *DB {
return q.db
}
type query interface {
GetModel() Model
GetTableName() string
}
var _ query = (*baseQuery)(nil)
func (q *baseQuery) GetModel() Model {
return q.model
}
@ -103,15 +97,16 @@ func (q *baseQuery) GetTableName() string {
}
for _, wq := range q.with {
if v, ok := wq.query.(query); ok {
if v, ok := wq.query.(Query); ok {
if model := v.GetModel(); model != nil {
return v.GetTableName()
}
}
}
if q.modelTable.Query != "" {
return q.modelTable.Query
if q.modelTableName.Query != "" {
b, _ := q.modelTableName.AppendQuery(q.db.fmter, nil)
return string(b)
}
if len(q.tables) > 0 {
return q.tables[0].Query
@ -304,8 +299,8 @@ func (q *baseQuery) _excludeColumn(column string) bool {
//------------------------------------------------------------------------------
func (q *baseQuery) modelHasTableName() bool {
if !q.modelTable.IsZero() {
return q.modelTable.Query != ""
if !q.modelTableName.IsZero() {
return q.modelTableName.Query != ""
}
return q.table != nil
}
@ -332,8 +327,8 @@ func (q *baseQuery) _appendTables(
startLen := len(b)
if q.modelHasTableName() {
if !q.modelTable.IsZero() {
b, err = q.modelTable.AppendQuery(fmter, b)
if !q.modelTableName.IsZero() {
b, err = q.modelTableName.AppendQuery(fmter, b)
if err != nil {
return nil, err
}
@ -372,8 +367,8 @@ func (q *baseQuery) appendFirstTableWithAlias(
func (q *baseQuery) _appendFirstTable(
fmter schema.Formatter, b []byte, withAlias bool,
) ([]byte, error) {
if !q.modelTable.IsZero() {
return q.modelTable.AppendQuery(fmter, b)
if !q.modelTableName.IsZero() {
return q.modelTableName.AppendQuery(fmter, b)
}
if q.table != nil {
@ -473,7 +468,7 @@ func (q *baseQuery) scan(
model Model,
hasDest bool,
) (sql.Result, error) {
ctx, event := q.db.beforeQuery(ctx, iquery, query, nil, q.model)
ctx, event := q.db.beforeQuery(ctx, iquery, query, nil, query, q.model)
rows, err := q.conn.QueryContext(ctx, query)
if err != nil {
@ -503,16 +498,10 @@ func (q *baseQuery) exec(
iquery Query,
query string,
) (sql.Result, error) {
ctx, event := q.db.beforeQuery(ctx, iquery, query, nil, q.model)
ctx, event := q.db.beforeQuery(ctx, iquery, query, nil, query, q.model)
res, err := q.conn.ExecContext(ctx, query)
if err != nil {
q.db.afterQuery(ctx, event, nil, err)
return res, err
}
q.db.afterQuery(ctx, event, res, err)
return res, nil
q.db.afterQuery(ctx, event, nil, err)
return res, err
}
//------------------------------------------------------------------------------
@ -607,34 +596,30 @@ func (q *whereBaseQuery) addWhereCols(cols []string) {
q.setErr(err)
return
}
var fields []*schema.Field
if cols == nil {
if err := q.table.CheckPKs(); err != nil {
q.setErr(err)
return
}
fields = q.table.PKs
} else {
fields = make([]*schema.Field, len(cols))
for i, col := range cols {
field, err := q.table.Field(col)
if err != nil {
q.setErr(err)
return
}
fields[i] = field
}
}
if q.whereFields != nil {
err := errors.New("bun: WherePK can only be called once")
q.setErr(err)
return
}
q.whereFields = fields
if cols == nil {
if err := q.table.CheckPKs(); err != nil {
q.setErr(err)
return
}
q.whereFields = q.table.PKs
return
}
q.whereFields = make([]*schema.Field, len(cols))
for i, col := range cols {
field, err := q.table.Field(col)
if err != nil {
q.setErr(err)
return
}
q.whereFields[i] = field
}
}
func (q *whereBaseQuery) mustAppendWhere(
@ -951,6 +936,7 @@ func (q setQuery) appendSet(fmter schema.Formatter, b []byte) (_ []byte, err err
//------------------------------------------------------------------------------
type cascadeQuery struct {
cascade bool
restrict bool
}
@ -958,10 +944,11 @@ func (q cascadeQuery) appendCascade(fmter schema.Formatter, b []byte) []byte {
if !fmter.HasFeature(feature.TableCascade) {
return b
}
if q.cascade {
b = append(b, " CASCADE"...)
}
if q.restrict {
b = append(b, " RESTRICT"...)
} else {
b = append(b, " CASCADE"...)
}
return b
}

View File

@ -15,6 +15,8 @@ type AddColumnQuery struct {
ifNotExists bool
}
var _ Query = (*AddColumnQuery)(nil)
func NewAddColumnQuery(db *DB) *AddColumnQuery {
q := &AddColumnQuery{
baseQuery: baseQuery{
@ -50,7 +52,7 @@ func (q *AddColumnQuery) TableExpr(query string, args ...interface{}) *AddColumn
}
func (q *AddColumnQuery) ModelTableExpr(query string, args ...interface{}) *AddColumnQuery {
q.modelTable = schema.SafeQuery(query, args)
q.modelTableName = schema.SafeQuery(query, args)
return q
}

View File

@ -13,6 +13,8 @@ type DropColumnQuery struct {
baseQuery
}
var _ Query = (*DropColumnQuery)(nil)
func NewDropColumnQuery(db *DB) *DropColumnQuery {
q := &DropColumnQuery{
baseQuery: baseQuery{
@ -48,7 +50,7 @@ func (q *DropColumnQuery) TableExpr(query string, args ...interface{}) *DropColu
}
func (q *DropColumnQuery) ModelTableExpr(query string, args ...interface{}) *DropColumnQuery {
q.modelTable = schema.SafeQuery(query, args)
q.modelTableName = schema.SafeQuery(query, args)
return q
}

View File

@ -15,6 +15,8 @@ type DeleteQuery struct {
returningQuery
}
var _ Query = (*DeleteQuery)(nil)
func NewDeleteQuery(db *DB) *DeleteQuery {
q := &DeleteQuery{
whereBaseQuery: whereBaseQuery{
@ -60,7 +62,7 @@ func (q *DeleteQuery) TableExpr(query string, args ...interface{}) *DeleteQuery
}
func (q *DeleteQuery) ModelTableExpr(query string, args ...interface{}) *DeleteQuery {
q.modelTable = schema.SafeQuery(query, args)
q.modelTableName = schema.SafeQuery(query, args)
return q
}
@ -134,6 +136,10 @@ func (q *DeleteQuery) Operation() string {
}
func (q *DeleteQuery) AppendQuery(fmter schema.Formatter, b []byte) (_ []byte, err error) {
if q.err != nil {
return nil, q.err
}
fmter = formatterWithModel(fmter, q)
if q.isSoftDelete() {

View File

@ -22,6 +22,8 @@ type CreateIndexQuery struct {
include []schema.QueryWithArgs
}
var _ Query = (*CreateIndexQuery)(nil)
func NewCreateIndexQuery(db *DB) *CreateIndexQuery {
q := &CreateIndexQuery{
whereBaseQuery: whereBaseQuery{
@ -86,7 +88,7 @@ func (q *CreateIndexQuery) TableExpr(query string, args ...interface{}) *CreateI
}
func (q *CreateIndexQuery) ModelTableExpr(query string, args ...interface{}) *CreateIndexQuery {
q.modelTable = schema.SafeQuery(query, args)
q.modelTableName = schema.SafeQuery(query, args)
return q
}

View File

@ -18,6 +18,8 @@ type DropIndexQuery struct {
index schema.QueryWithArgs
}
var _ Query = (*DropIndexQuery)(nil)
func NewDropIndexQuery(db *DB) *DropIndexQuery {
q := &DropIndexQuery{
baseQuery: baseQuery{
@ -50,6 +52,11 @@ func (q *DropIndexQuery) IfExists() *DropIndexQuery {
return q
}
func (q *DropIndexQuery) Cascade() *DropIndexQuery {
q.cascade = true
return q
}
func (q *DropIndexQuery) Restrict() *DropIndexQuery {
q.restrict = true
return q

View File

@ -24,6 +24,8 @@ type InsertQuery struct {
replace bool
}
var _ Query = (*InsertQuery)(nil)
func NewInsertQuery(db *DB) *InsertQuery {
q := &InsertQuery{
whereBaseQuery: whereBaseQuery{
@ -71,7 +73,7 @@ func (q *InsertQuery) TableExpr(query string, args ...interface{}) *InsertQuery
}
func (q *InsertQuery) ModelTableExpr(query string, args ...interface{}) *InsertQuery {
q.modelTable = schema.SafeQuery(query, args)
q.modelTableName = schema.SafeQuery(query, args)
return q
}
@ -159,6 +161,10 @@ func (q *InsertQuery) Operation() string {
}
func (q *InsertQuery) AppendQuery(fmter schema.Formatter, b []byte) (_ []byte, err error) {
if q.err != nil {
return nil, q.err
}
fmter = formatterWithModel(fmter, q)
b, err = q.appendWith(fmter, b)

View File

@ -34,6 +34,8 @@ type SelectQuery struct {
union []union
}
var _ Query = (*SelectQuery)(nil)
func NewSelectQuery(db *DB) *SelectQuery {
return &SelectQuery{
whereBaseQuery: whereBaseQuery{
@ -90,7 +92,7 @@ func (q *SelectQuery) TableExpr(query string, args ...interface{}) *SelectQuery
}
func (q *SelectQuery) ModelTableExpr(query string, args ...interface{}) *SelectQuery {
q.modelTable = schema.SafeQuery(query, args)
q.modelTableName = schema.SafeQuery(query, args)
return q
}
@ -342,9 +344,9 @@ func (q *SelectQuery) selectJoins(ctx context.Context, joins []relationJoin) err
case schema.HasOneRelation, schema.BelongsToRelation:
err = q.selectJoins(ctx, j.JoinModel.getJoins())
case schema.HasManyRelation:
err = j.selectMany(ctx, q.db.NewSelect())
err = j.selectMany(ctx, q.db.NewSelect().Conn(q.conn))
case schema.ManyToManyRelation:
err = j.selectM2M(ctx, q.db.NewSelect())
err = j.selectM2M(ctx, q.db.NewSelect().Conn(q.conn))
default:
panic("not reached")
}
@ -369,6 +371,10 @@ func (q *SelectQuery) AppendQuery(fmter schema.Formatter, b []byte) (_ []byte, e
func (q *SelectQuery) appendQuery(
fmter schema.Formatter, b []byte, count bool,
) (_ []byte, err error) {
if q.err != nil {
return nil, q.err
}
fmter = formatterWithModel(fmter, q)
cteCount := count && (len(q.group) > 0 || q.distinctOn != nil)
@ -767,7 +773,7 @@ func (q *SelectQuery) Count(ctx context.Context) (int, error) {
}
query := internal.String(queryBytes)
ctx, event := q.db.beforeQuery(ctx, qq, query, nil, q.model)
ctx, event := q.db.beforeQuery(ctx, qq, query, nil, query, q.model)
var num int
err = q.conn.QueryRowContext(ctx, query).Scan(&num)
@ -778,6 +784,13 @@ func (q *SelectQuery) Count(ctx context.Context) (int, error) {
}
func (q *SelectQuery) ScanAndCount(ctx context.Context, dest ...interface{}) (int, error) {
if _, ok := q.conn.(*DB); ok {
return q.scanAndCountConc(ctx, dest...)
}
return q.scanAndCountSeq(ctx, dest...)
}
func (q *SelectQuery) scanAndCountConc(ctx context.Context, dest ...interface{}) (int, error) {
var count int
var wg sync.WaitGroup
var mu sync.Mutex
@ -817,6 +830,21 @@ func (q *SelectQuery) ScanAndCount(ctx context.Context, dest ...interface{}) (in
return count, firstErr
}
func (q *SelectQuery) scanAndCountSeq(ctx context.Context, dest ...interface{}) (int, error) {
var firstErr error
if q.limit >= 0 {
firstErr = q.Scan(ctx, dest...)
}
count, err := q.Count(ctx)
if err != nil && firstErr == nil {
firstErr = err
}
return count, firstErr
}
func (q *SelectQuery) Exists(ctx context.Context) (bool, error) {
if q.err != nil {
return false, q.err
@ -830,7 +858,7 @@ func (q *SelectQuery) Exists(ctx context.Context) (bool, error) {
}
query := internal.String(queryBytes)
ctx, event := q.db.beforeQuery(ctx, qq, query, nil, q.model)
ctx, event := q.db.beforeQuery(ctx, qq, query, nil, query, q.model)
var exists bool
err = q.conn.QueryRowContext(ctx, query).Scan(&exists)

View File

@ -24,6 +24,8 @@ type CreateTableQuery struct {
tablespace schema.QueryWithArgs
}
var _ Query = (*CreateTableQuery)(nil)
func NewCreateTableQuery(db *DB) *CreateTableQuery {
q := &CreateTableQuery{
baseQuery: baseQuery{
@ -59,7 +61,7 @@ func (q *CreateTableQuery) TableExpr(query string, args ...interface{}) *CreateT
}
func (q *CreateTableQuery) ModelTableExpr(query string, args ...interface{}) *CreateTableQuery {
q.modelTable = schema.SafeQuery(query, args)
q.modelTableName = schema.SafeQuery(query, args)
return q
}

View File

@ -15,6 +15,8 @@ type DropTableQuery struct {
ifExists bool
}
var _ Query = (*DropTableQuery)(nil)
func NewDropTableQuery(db *DB) *DropTableQuery {
q := &DropTableQuery{
baseQuery: baseQuery{
@ -50,7 +52,7 @@ func (q *DropTableQuery) TableExpr(query string, args ...interface{}) *DropTable
}
func (q *DropTableQuery) ModelTableExpr(query string, args ...interface{}) *DropTableQuery {
q.modelTable = schema.SafeQuery(query, args)
q.modelTableName = schema.SafeQuery(query, args)
return q
}
@ -61,6 +63,11 @@ func (q *DropTableQuery) IfExists() *DropTableQuery {
return q
}
func (q *DropTableQuery) Cascade() *DropTableQuery {
q.cascade = true
return q
}
func (q *DropTableQuery) Restrict() *DropTableQuery {
q.restrict = true
return q

View File

@ -16,6 +16,8 @@ type TruncateTableQuery struct {
continueIdentity bool
}
var _ Query = (*TruncateTableQuery)(nil)
func NewTruncateTableQuery(db *DB) *TruncateTableQuery {
q := &TruncateTableQuery{
baseQuery: baseQuery{
@ -57,6 +59,11 @@ func (q *TruncateTableQuery) ContinueIdentity() *TruncateTableQuery {
return q
}
func (q *TruncateTableQuery) Cascade() *TruncateTableQuery {
q.cascade = true
return q
}
func (q *TruncateTableQuery) Restrict() *TruncateTableQuery {
q.restrict = true
return q

View File

@ -20,6 +20,8 @@ type UpdateQuery struct {
omitZero bool
}
var _ Query = (*UpdateQuery)(nil)
func NewUpdateQuery(db *DB) *UpdateQuery {
q := &UpdateQuery{
whereBaseQuery: whereBaseQuery{
@ -67,7 +69,7 @@ func (q *UpdateQuery) TableExpr(query string, args ...interface{}) *UpdateQuery
}
func (q *UpdateQuery) ModelTableExpr(query string, args ...interface{}) *UpdateQuery {
q.modelTable = schema.SafeQuery(query, args)
q.modelTableName = schema.SafeQuery(query, args)
return q
}
@ -170,6 +172,10 @@ func (q *UpdateQuery) Operation() string {
}
func (q *UpdateQuery) AppendQuery(fmter schema.Formatter, b []byte) (_ []byte, err error) {
if q.err != nil {
return nil, q.err
}
fmter = formatterWithModel(fmter, q)
b, err = q.appendWith(fmter, b)

View File

@ -16,7 +16,10 @@ type ValuesQuery struct {
withOrder bool
}
var _ schema.NamedArgAppender = (*ValuesQuery)(nil)
var (
_ Query = (*ValuesQuery)(nil)
_ schema.NamedArgAppender = (*ValuesQuery)(nil)
)
func NewValuesQuery(db *DB, model interface{}) *ValuesQuery {
q := &ValuesQuery{

View File

@ -177,8 +177,6 @@ type NamedArgAppender interface {
AppendNamedArg(fmter Formatter, b []byte, name string) ([]byte, bool)
}
//------------------------------------------------------------------------------
type namedArgList struct {
arg NamedArgAppender
next *namedArgList
@ -219,13 +217,13 @@ func (a *namedArg) AppendNamedArg(fmter Formatter, b []byte, name string) ([]byt
//------------------------------------------------------------------------------
var _ NamedArgAppender = (*structArgs)(nil)
type structArgs struct {
table *Table
strct reflect.Value
}
var _ NamedArgAppender = (*structArgs)(nil)
func newStructArgs(fmter Formatter, strct interface{}) (*structArgs, bool) {
v := reflect.ValueOf(strct)
if !v.IsValid() {

View File

@ -203,7 +203,7 @@ func (t *Table) fieldByGoName(name string) *Field {
func (t *Table) initFields() {
t.Fields = make([]*Field, 0, t.Type.NumField())
t.FieldMap = make(map[string]*Field, t.Type.NumField())
t.addFields(t.Type, nil)
t.addFields(t.Type, "", nil)
if len(t.PKs) == 0 {
for _, name := range []string{"id", "uuid", "pk_" + t.ModelName} {
@ -230,7 +230,7 @@ func (t *Table) initFields() {
}
}
func (t *Table) addFields(typ reflect.Type, baseIndex []int) {
func (t *Table) addFields(typ reflect.Type, prefix string, index []int) {
for i := 0; i < typ.NumField(); i++ {
f := typ.Field(i)
unexported := f.PkgPath != ""
@ -242,10 +242,6 @@ func (t *Table) addFields(typ reflect.Type, baseIndex []int) {
continue
}
// Make a copy so the slice is not shared between fields.
index := make([]int, len(baseIndex))
copy(index, baseIndex)
if f.Anonymous {
if f.Name == "BaseModel" && f.Type == baseModelType {
if len(index) == 0 {
@ -258,7 +254,7 @@ func (t *Table) addFields(typ reflect.Type, baseIndex []int) {
if fieldType.Kind() != reflect.Struct {
continue
}
t.addFields(fieldType, append(index, f.Index...))
t.addFields(fieldType, "", withIndex(index, f.Index))
tag := tagparser.Parse(f.Tag.Get("bun"))
if _, inherit := tag.Options["inherit"]; inherit {
@ -274,7 +270,7 @@ func (t *Table) addFields(typ reflect.Type, baseIndex []int) {
continue
}
if field := t.newField(f, index); field != nil {
if field := t.newField(f, prefix, index); field != nil {
t.addField(field)
}
}
@ -315,10 +311,20 @@ func (t *Table) processBaseModelField(f reflect.StructField) {
}
//nolint
func (t *Table) newField(f reflect.StructField, index []int) *Field {
sqlName := internal.Underscore(f.Name)
func (t *Table) newField(f reflect.StructField, prefix string, index []int) *Field {
tag := tagparser.Parse(f.Tag.Get("bun"))
if prefix, ok := tag.Option("embed"); ok {
fieldType := indirectType(f.Type)
if fieldType.Kind() != reflect.Struct {
panic(fmt.Errorf("bun: embed %s.%s: got %s, wanted reflect.Struct",
t.TypeName, f.Name, fieldType.Kind()))
}
t.addFields(fieldType, prefix, withIndex(index, f.Index))
return nil
}
sqlName := internal.Underscore(f.Name)
if tag.Name != "" && tag.Name != sqlName {
if isKnownFieldOption(tag.Name) {
internal.Warn.Printf(
@ -328,10 +334,10 @@ func (t *Table) newField(f reflect.StructField, index []int) *Field {
}
sqlName = tag.Name
}
if s, ok := tag.Option("column"); ok {
sqlName = s
}
sqlName = prefix + sqlName
for name := range tag.Options {
if !isKnownFieldOption(name) {
@ -339,7 +345,7 @@ func (t *Table) newField(f reflect.StructField, index []int) *Field {
}
}
index = append(index, f.Index...)
index = withIndex(index, f.Index)
if field := t.fieldWithLock(sqlName); field != nil {
if indexEqual(field.Index, index) {
return field
@ -795,7 +801,7 @@ func (t *Table) inlineFields(field *Field, seen map[reflect.Type]struct{}) {
f.GoName = field.GoName + "_" + f.GoName
f.Name = field.Name + "__" + f.Name
f.SQLName = t.quoteIdent(f.Name)
f.Index = appendNew(field.Index, f.Index...)
f.Index = withIndex(field.Index, f.Index)
t.fieldsMapMu.Lock()
if _, ok := t.FieldMap[f.Name]; !ok {
@ -834,7 +840,7 @@ func (t *Table) AppendNamedArg(
fmter Formatter, b []byte, name string, strct reflect.Value,
) ([]byte, bool) {
if field, ok := t.FieldMap[name]; ok {
return fmter.appendArg(b, field.Value(strct).Interface()), true
return field.AppendValue(fmter, b, strct), true
}
return b, false
}
@ -853,13 +859,6 @@ func (t *Table) quoteIdent(s string) Safe {
return Safe(NewFormatter(t.dialect).AppendIdent(nil, s))
}
func appendNew(dst []int, src ...int) []int {
cp := make([]int, len(dst)+len(src))
copy(cp, dst)
copy(cp[len(dst):], src)
return cp
}
func isKnownTableOption(name string) bool {
switch name {
case "table", "alias", "select":
@ -991,3 +990,10 @@ func softDeleteFieldUpdaterFallback(field *Field) func(fv reflect.Value, tm time
return field.ScanWithCheck(fv, tm)
}
}
func withIndex(a, b []int) []int {
dest := make([]int, 0, len(a)+len(b))
dest = append(dest, a...)
dest = append(dest, b...)
return dest
}

View File

@ -2,5 +2,5 @@ package bun
// Version is the current release version.
func Version() string {
return "1.0.19"
return "1.0.20"
}