2018-01-13 23:52:44 +01:00
|
|
|
package main
|
|
|
|
|
|
|
|
import (
|
2019-11-10 12:32:38 +01:00
|
|
|
"bytes"
|
2018-01-13 23:52:44 +01:00
|
|
|
"fmt"
|
2020-06-08 17:54:49 +02:00
|
|
|
"math/rand"
|
2018-01-30 15:46:21 +01:00
|
|
|
"net/url"
|
2018-01-13 23:52:44 +01:00
|
|
|
"os"
|
2023-08-11 00:51:34 +02:00
|
|
|
"path/filepath"
|
2018-01-13 23:52:44 +01:00
|
|
|
"strings"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/dchest/safefile"
|
|
|
|
|
|
|
|
"github.com/jedisct1/dlog"
|
2020-06-08 17:54:49 +02:00
|
|
|
"github.com/jedisct1/go-dnsstamps"
|
2018-01-13 23:52:44 +01:00
|
|
|
"github.com/jedisct1/go-minisign"
|
|
|
|
)
|
|
|
|
|
|
|
|
type SourceFormat int
|
|
|
|
|
|
|
|
const (
|
2018-04-18 19:06:50 +02:00
|
|
|
SourceFormatV2 = iota
|
2018-01-13 23:52:44 +01:00
|
|
|
)
|
|
|
|
|
2023-08-11 00:51:34 +02:00
|
|
|
const (
|
|
|
|
DefaultPrefetchDelay time.Duration = 24 * time.Hour
|
|
|
|
MinimumPrefetchInterval time.Duration = 10 * time.Minute
|
|
|
|
)
|
2018-01-18 23:19:14 +01:00
|
|
|
|
2018-01-13 23:52:44 +01:00
|
|
|
type Source struct {
|
2023-08-11 00:51:34 +02:00
|
|
|
name string
|
|
|
|
urls []*url.URL
|
|
|
|
format SourceFormat
|
|
|
|
bin []byte
|
|
|
|
minisignKey *minisign.PublicKey
|
|
|
|
cacheFile string
|
|
|
|
cacheTTL, prefetchDelay time.Duration
|
|
|
|
refresh time.Time
|
|
|
|
prefix string
|
2019-10-30 03:00:49 +01:00
|
|
|
}
|
|
|
|
|
2023-08-11 11:01:55 +02:00
|
|
|
// timeNow() is replaced by tests to provide a static value
|
|
|
|
var timeNow = time.Now
|
|
|
|
|
|
|
|
func (source *Source) checkSignature(bin, sig []byte) error {
|
|
|
|
signature, err := minisign.DecodeSignature(string(sig))
|
|
|
|
if err == nil {
|
2019-10-30 06:31:28 +01:00
|
|
|
_, err = source.minisignKey.Verify(bin, signature)
|
2019-10-30 03:00:49 +01:00
|
|
|
}
|
2020-03-20 22:40:29 +01:00
|
|
|
return err
|
2018-01-13 23:52:44 +01:00
|
|
|
}
|
|
|
|
|
2023-08-11 11:01:55 +02:00
|
|
|
func (source *Source) fetchFromCache(now time.Time) (time.Duration, error) {
|
|
|
|
var err error
|
2023-04-07 15:16:15 +02:00
|
|
|
var bin, sig []byte
|
2023-02-02 19:38:24 +01:00
|
|
|
if bin, err = os.ReadFile(source.cacheFile); err != nil {
|
2023-08-11 11:01:55 +02:00
|
|
|
return 0, err
|
2018-01-20 00:30:33 +01:00
|
|
|
}
|
2023-02-02 19:38:24 +01:00
|
|
|
if sig, err = os.ReadFile(source.cacheFile + ".minisig"); err != nil {
|
2023-08-11 11:01:55 +02:00
|
|
|
return 0, err
|
2019-10-31 06:53:16 +01:00
|
|
|
}
|
2019-11-01 02:23:32 +01:00
|
|
|
if err = source.checkSignature(bin, sig); err != nil {
|
2023-08-11 11:01:55 +02:00
|
|
|
return 0, err
|
2019-11-01 02:23:32 +01:00
|
|
|
}
|
2023-04-07 15:21:00 +02:00
|
|
|
source.bin = bin
|
2019-11-01 02:23:32 +01:00
|
|
|
var fi os.FileInfo
|
|
|
|
if fi, err = os.Stat(source.cacheFile); err != nil {
|
2023-08-11 11:01:55 +02:00
|
|
|
return 0, err
|
2019-11-01 02:23:32 +01:00
|
|
|
}
|
2023-08-11 11:01:55 +02:00
|
|
|
var ttl time.Duration = 0
|
2019-11-01 11:15:10 +01:00
|
|
|
if elapsed := now.Sub(fi.ModTime()); elapsed < source.cacheTTL {
|
2023-08-11 00:51:34 +02:00
|
|
|
ttl = source.prefetchDelay - elapsed
|
|
|
|
dlog.Debugf("Source [%s] cache file [%s] is still fresh, next update: %v", source.name, source.cacheFile, ttl)
|
2019-10-30 05:24:59 +01:00
|
|
|
} else {
|
2019-11-01 08:56:57 +01:00
|
|
|
dlog.Debugf("Source [%s] cache file [%s] needs to be refreshed", source.name, source.cacheFile)
|
2018-02-19 19:24:51 +01:00
|
|
|
}
|
2023-08-11 11:01:55 +02:00
|
|
|
return ttl, nil
|
2018-01-13 23:52:44 +01:00
|
|
|
}
|
|
|
|
|
2023-08-11 11:01:55 +02:00
|
|
|
func writeSource(f string, bin, sig []byte) error {
|
|
|
|
var err error
|
2019-11-02 05:36:33 +01:00
|
|
|
var fSrc, fSig *safefile.File
|
2023-02-11 14:27:12 +01:00
|
|
|
if fSrc, err = safefile.Create(f, 0o644); err != nil {
|
2023-08-11 11:01:55 +02:00
|
|
|
return err
|
2019-10-31 10:03:18 +01:00
|
|
|
}
|
2019-11-02 05:36:33 +01:00
|
|
|
defer fSrc.Close()
|
2023-02-11 14:27:12 +01:00
|
|
|
if fSig, err = safefile.Create(f+".minisig", 0o644); err != nil {
|
2023-08-11 11:01:55 +02:00
|
|
|
return err
|
2019-10-31 10:03:18 +01:00
|
|
|
}
|
2019-11-02 05:36:33 +01:00
|
|
|
defer fSig.Close()
|
|
|
|
if _, err = fSrc.Write(bin); err != nil {
|
2023-08-11 11:01:55 +02:00
|
|
|
return err
|
2019-11-02 05:36:33 +01:00
|
|
|
}
|
|
|
|
if _, err = fSig.Write(sig); err != nil {
|
2023-08-11 11:01:55 +02:00
|
|
|
return err
|
2019-11-02 05:36:33 +01:00
|
|
|
}
|
|
|
|
if err = fSrc.Commit(); err != nil {
|
2023-08-11 11:01:55 +02:00
|
|
|
return err
|
2019-11-02 05:36:33 +01:00
|
|
|
}
|
2019-11-10 12:32:38 +01:00
|
|
|
return fSig.Commit()
|
|
|
|
}
|
|
|
|
|
2023-08-11 00:51:34 +02:00
|
|
|
func (source *Source) updateCache(bin, sig []byte, now time.Time) {
|
2023-08-11 11:11:16 +02:00
|
|
|
file := source.cacheFile
|
|
|
|
absPath := file
|
|
|
|
if resolved, err := filepath.Abs(file); err != nil {
|
|
|
|
absPath = resolved
|
|
|
|
}
|
|
|
|
|
2023-08-11 11:16:44 +02:00
|
|
|
if !bytes.Equal(source.bin, bin) {
|
|
|
|
if err := writeSource(file, bin, sig); err != nil {
|
|
|
|
dlog.Warnf("Couldn't write cache file [%s]: %s", absPath, err) // an error writing to the cache isn't fatal
|
2023-08-11 00:51:34 +02:00
|
|
|
}
|
2023-08-11 11:16:44 +02:00
|
|
|
}
|
|
|
|
if err := os.Chtimes(file, now, now); err != nil {
|
|
|
|
dlog.Warnf("Couldn't update cache file [%s]: %s", absPath, err)
|
2023-08-11 11:11:16 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
source.bin = bin
|
2019-10-31 10:03:18 +01:00
|
|
|
}
|
|
|
|
|
2019-11-01 13:07:48 +01:00
|
|
|
func (source *Source) parseURLs(urls []string) {
|
|
|
|
for _, urlStr := range urls {
|
|
|
|
if srcURL, err := url.Parse(urlStr); err != nil {
|
|
|
|
dlog.Warnf("Source [%s] failed to parse URL [%s]", source.name, urlStr)
|
|
|
|
} else {
|
|
|
|
source.urls = append(source.urls, srcURL)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-08-11 11:01:55 +02:00
|
|
|
func fetchFromURL(xTransport *XTransport, u *url.URL) ([]byte, error) {
|
2024-04-25 12:43:29 +02:00
|
|
|
bin, _, _, _, err := xTransport.GetWithCompression(u, "", DefaultTimeout)
|
2020-02-21 22:33:34 +01:00
|
|
|
return bin, err
|
2019-10-30 05:12:50 +01:00
|
|
|
}
|
|
|
|
|
2023-08-11 11:01:55 +02:00
|
|
|
func (source *Source) fetchWithCache(xTransport *XTransport, now time.Time) (time.Duration, error) {
|
|
|
|
var err error
|
|
|
|
var ttl time.Duration
|
2023-08-11 00:51:34 +02:00
|
|
|
if ttl, err = source.fetchFromCache(now); err != nil {
|
2019-11-01 10:26:39 +01:00
|
|
|
if len(source.urls) == 0 {
|
2023-08-11 00:51:34 +02:00
|
|
|
dlog.Errorf("Source [%s] cache file [%s] not present and no valid URL", source.name, source.cacheFile)
|
2023-08-11 11:01:55 +02:00
|
|
|
return 0, err
|
2018-02-19 19:24:51 +01:00
|
|
|
}
|
2019-11-01 08:56:57 +01:00
|
|
|
dlog.Debugf("Source [%s] cache file [%s] not present", source.name, source.cacheFile)
|
2019-10-31 00:40:06 +01:00
|
|
|
}
|
2023-08-11 11:24:54 +02:00
|
|
|
|
|
|
|
if len(source.urls) == 0 {
|
|
|
|
return 0, err
|
2019-11-01 11:15:10 +01:00
|
|
|
}
|
2023-08-11 11:24:54 +02:00
|
|
|
if ttl > 0 {
|
|
|
|
source.refresh = now.Add(ttl)
|
2023-08-11 11:01:55 +02:00
|
|
|
return 0, err
|
2018-01-31 14:23:44 +01:00
|
|
|
}
|
2023-08-11 11:24:54 +02:00
|
|
|
|
2023-08-11 00:51:34 +02:00
|
|
|
ttl = MinimumPrefetchInterval
|
2023-08-11 11:24:54 +02:00
|
|
|
source.refresh = now.Add(ttl)
|
2023-04-07 15:16:15 +02:00
|
|
|
var bin, sig []byte
|
2019-11-01 13:07:48 +01:00
|
|
|
for _, srcURL := range source.urls {
|
|
|
|
dlog.Infof("Source [%s] loading from URL [%s]", source.name, srcURL)
|
2019-11-01 10:26:39 +01:00
|
|
|
sigURL := &url.URL{}
|
|
|
|
*sigURL = *srcURL // deep copy to avoid parsing twice
|
|
|
|
sigURL.Path += ".minisig"
|
|
|
|
if bin, err = fetchFromURL(xTransport, srcURL); err != nil {
|
|
|
|
dlog.Debugf("Source [%s] failed to download from URL [%s]", source.name, srcURL)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if sig, err = fetchFromURL(xTransport, sigURL); err != nil {
|
|
|
|
dlog.Debugf("Source [%s] failed to download signature from URL [%s]", source.name, sigURL)
|
|
|
|
continue
|
|
|
|
}
|
2023-08-11 11:24:54 +02:00
|
|
|
if err = source.checkSignature(bin, sig); err != nil {
|
|
|
|
dlog.Debugf("Source [%s] failed signature check using URL [%s]", source.name, srcURL)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
break // valid signature
|
2018-01-13 23:52:44 +01:00
|
|
|
}
|
2019-11-01 10:26:39 +01:00
|
|
|
if err != nil {
|
2023-08-11 11:01:55 +02:00
|
|
|
return 0, err
|
2023-04-06 14:19:25 +02:00
|
|
|
}
|
2023-04-07 15:21:00 +02:00
|
|
|
source.updateCache(bin, sig, now)
|
2023-08-11 00:51:34 +02:00
|
|
|
ttl = source.prefetchDelay
|
2023-08-11 11:24:54 +02:00
|
|
|
source.refresh = now.Add(ttl)
|
2023-08-11 11:01:55 +02:00
|
|
|
return ttl, nil
|
2018-01-13 23:52:44 +01:00
|
|
|
}
|
|
|
|
|
2019-11-01 08:56:57 +01:00
|
|
|
// NewSource loads a new source using the given cacheFile and urls, ensuring it has a valid signature
|
2022-03-23 17:48:48 +01:00
|
|
|
func NewSource(
|
|
|
|
name string,
|
|
|
|
xTransport *XTransport,
|
|
|
|
urls []string,
|
|
|
|
minisignKeyStr string,
|
|
|
|
cacheFile string,
|
|
|
|
formatStr string,
|
|
|
|
refreshDelay time.Duration,
|
|
|
|
prefix string,
|
2023-08-11 11:01:55 +02:00
|
|
|
) (*Source, error) {
|
2023-08-11 00:51:34 +02:00
|
|
|
if refreshDelay < DefaultPrefetchDelay {
|
|
|
|
refreshDelay = DefaultPrefetchDelay
|
|
|
|
}
|
2023-08-11 11:01:55 +02:00
|
|
|
source := &Source{
|
2023-08-11 00:51:34 +02:00
|
|
|
name: name,
|
|
|
|
urls: []*url.URL{},
|
|
|
|
cacheFile: cacheFile,
|
|
|
|
cacheTTL: refreshDelay,
|
|
|
|
prefetchDelay: DefaultPrefetchDelay,
|
|
|
|
prefix: prefix,
|
2022-03-23 17:48:48 +01:00
|
|
|
}
|
2018-04-18 19:06:50 +02:00
|
|
|
if formatStr == "v2" {
|
2018-01-25 15:02:18 +01:00
|
|
|
source.format = SourceFormatV2
|
|
|
|
} else {
|
2019-10-31 02:04:08 +01:00
|
|
|
return source, fmt.Errorf("Unsupported source format: [%s]", formatStr)
|
2018-01-13 23:52:44 +01:00
|
|
|
}
|
2019-10-30 03:00:49 +01:00
|
|
|
if minisignKey, err := minisign.NewPublicKey(minisignKeyStr); err == nil {
|
|
|
|
source.minisignKey = &minisignKey
|
|
|
|
} else {
|
2019-10-31 02:04:08 +01:00
|
|
|
return source, err
|
2018-01-18 23:19:14 +01:00
|
|
|
}
|
2019-11-01 13:07:48 +01:00
|
|
|
source.parseURLs(urls)
|
2023-08-11 11:01:55 +02:00
|
|
|
_, err := source.fetchWithCache(xTransport, timeNow())
|
|
|
|
if err == nil {
|
2023-04-07 15:16:15 +02:00
|
|
|
dlog.Noticef("Source [%s] loaded", name)
|
2018-01-20 01:00:19 +01:00
|
|
|
}
|
2023-08-11 11:01:55 +02:00
|
|
|
return source, err
|
2018-01-13 23:52:44 +01:00
|
|
|
}
|
|
|
|
|
2019-11-01 08:56:57 +01:00
|
|
|
// PrefetchSources downloads latest versions of given sources, ensuring they have a valid signature before caching
|
2019-10-31 02:22:48 +01:00
|
|
|
func PrefetchSources(xTransport *XTransport, sources []*Source) time.Duration {
|
|
|
|
now := timeNow()
|
2019-10-31 05:32:21 +01:00
|
|
|
interval := MinimumPrefetchInterval
|
2019-10-31 02:22:48 +01:00
|
|
|
for _, source := range sources {
|
2019-11-01 08:56:57 +01:00
|
|
|
if source.refresh.IsZero() || source.refresh.After(now) {
|
2019-10-31 10:15:21 +01:00
|
|
|
continue
|
|
|
|
}
|
2019-11-01 08:56:57 +01:00
|
|
|
dlog.Debugf("Prefetching [%s]", source.name)
|
2023-04-07 15:16:15 +02:00
|
|
|
if delay, err := source.fetchWithCache(xTransport, now); err != nil {
|
2020-05-31 13:46:44 +02:00
|
|
|
dlog.Infof("Prefetching [%s] failed: %v, will retry in %v", source.name, err, interval)
|
2023-04-07 15:16:15 +02:00
|
|
|
} else {
|
2023-08-11 00:51:34 +02:00
|
|
|
dlog.Debugf("Prefetching [%s] succeeded, next update in %v min", source.name, delay)
|
2023-04-07 15:16:15 +02:00
|
|
|
if delay >= MinimumPrefetchInterval && (interval == MinimumPrefetchInterval || interval > delay) {
|
|
|
|
interval = delay
|
|
|
|
}
|
2019-10-31 02:22:48 +01:00
|
|
|
}
|
|
|
|
}
|
2019-10-31 05:32:21 +01:00
|
|
|
return interval
|
2019-10-31 02:22:48 +01:00
|
|
|
}
|
|
|
|
|
2021-01-01 14:04:12 +01:00
|
|
|
func (source *Source) Parse() ([]RegisteredServer, error) {
|
2018-04-18 19:06:50 +02:00
|
|
|
if source.format == SourceFormatV2 {
|
2021-01-01 14:04:12 +01:00
|
|
|
return source.parseV2()
|
2018-01-25 15:02:18 +01:00
|
|
|
}
|
|
|
|
dlog.Fatal("Unexpected source format")
|
|
|
|
return []RegisteredServer{}, nil
|
|
|
|
}
|
|
|
|
|
2021-01-01 14:04:12 +01:00
|
|
|
func (source *Source) parseV2() ([]RegisteredServer, error) {
|
2018-01-25 15:02:18 +01:00
|
|
|
var registeredServers []RegisteredServer
|
2019-10-06 12:14:21 +02:00
|
|
|
var stampErrs []string
|
|
|
|
appendStampErr := func(format string, a ...interface{}) {
|
|
|
|
stampErr := fmt.Sprintf(format, a...)
|
|
|
|
stampErrs = append(stampErrs, stampErr)
|
|
|
|
dlog.Warn(stampErr)
|
|
|
|
}
|
2023-08-11 00:51:34 +02:00
|
|
|
in := string(source.bin)
|
|
|
|
parts := strings.Split(in, "## ")
|
2018-01-25 15:02:18 +01:00
|
|
|
if len(parts) < 2 {
|
2018-03-28 13:36:19 +02:00
|
|
|
return registeredServers, fmt.Errorf("Invalid format for source at [%v]", source.urls)
|
2018-01-25 15:02:18 +01:00
|
|
|
}
|
|
|
|
parts = parts[1:]
|
|
|
|
for _, part := range parts {
|
2021-04-05 11:46:57 +02:00
|
|
|
part = strings.TrimSpace(part)
|
2018-01-25 15:02:18 +01:00
|
|
|
subparts := strings.Split(part, "\n")
|
|
|
|
if len(subparts) < 2 {
|
2018-03-28 13:36:19 +02:00
|
|
|
return registeredServers, fmt.Errorf("Invalid format for source at [%v]", source.urls)
|
2018-01-25 15:02:18 +01:00
|
|
|
}
|
2021-04-05 11:46:57 +02:00
|
|
|
name := strings.TrimSpace(subparts[0])
|
2018-01-25 15:02:18 +01:00
|
|
|
if len(name) == 0 {
|
2018-03-28 13:36:19 +02:00
|
|
|
return registeredServers, fmt.Errorf("Invalid format for source at [%v]", source.urls)
|
2018-01-25 15:02:18 +01:00
|
|
|
}
|
2018-01-31 09:42:56 +01:00
|
|
|
subparts = subparts[1:]
|
2021-01-01 14:04:12 +01:00
|
|
|
name = source.prefix + name
|
2018-01-31 09:42:56 +01:00
|
|
|
var stampStr, description string
|
2020-06-08 17:54:49 +02:00
|
|
|
stampStrs := make([]string, 0)
|
2018-01-25 15:02:18 +01:00
|
|
|
for _, subpart := range subparts {
|
2021-04-05 11:46:57 +02:00
|
|
|
subpart = strings.TrimSpace(subpart)
|
2020-06-08 17:54:49 +02:00
|
|
|
if strings.HasPrefix(subpart, "sdns:") && len(subpart) >= 6 {
|
|
|
|
stampStrs = append(stampStrs, subpart)
|
2018-01-31 09:42:56 +01:00
|
|
|
continue
|
|
|
|
} else if len(subpart) == 0 || strings.HasPrefix(subpart, "//") {
|
|
|
|
continue
|
2018-01-25 15:02:18 +01:00
|
|
|
}
|
2018-01-31 09:42:56 +01:00
|
|
|
if len(description) > 0 {
|
|
|
|
description += "\n"
|
|
|
|
}
|
|
|
|
description += subpart
|
2018-01-25 15:02:18 +01:00
|
|
|
}
|
2020-06-08 17:54:49 +02:00
|
|
|
stampStrsLen := len(stampStrs)
|
|
|
|
if stampStrsLen <= 0 {
|
2019-10-06 12:14:21 +02:00
|
|
|
appendStampErr("Missing stamp for server [%s]", name)
|
|
|
|
continue
|
2020-06-08 17:54:49 +02:00
|
|
|
} else if stampStrsLen > 1 {
|
|
|
|
rand.Shuffle(stampStrsLen, func(i, j int) { stampStrs[i], stampStrs[j] = stampStrs[j], stampStrs[i] })
|
2018-01-25 15:02:18 +01:00
|
|
|
}
|
2020-06-08 17:54:49 +02:00
|
|
|
var stamp dnsstamps.ServerStamp
|
|
|
|
var err error
|
|
|
|
for _, stampStr = range stampStrs {
|
2020-06-11 11:13:41 +02:00
|
|
|
stamp, err = dnsstamps.NewServerStampFromString(stampStr)
|
2020-06-08 17:54:49 +02:00
|
|
|
if err == nil {
|
|
|
|
break
|
|
|
|
}
|
2019-10-06 12:14:21 +02:00
|
|
|
appendStampErr("Invalid or unsupported stamp [%v]: %s", stampStr, err.Error())
|
2020-06-08 17:54:49 +02:00
|
|
|
}
|
|
|
|
if err != nil {
|
2019-10-06 12:14:21 +02:00
|
|
|
continue
|
2018-01-25 15:02:18 +01:00
|
|
|
}
|
|
|
|
registeredServer := RegisteredServer{
|
2018-01-31 09:42:56 +01:00
|
|
|
name: name, stamp: stamp, description: description,
|
2018-01-25 15:02:18 +01:00
|
|
|
}
|
|
|
|
dlog.Debugf("Registered [%s] with stamp [%s]", name, stamp.String())
|
|
|
|
registeredServers = append(registeredServers, registeredServer)
|
|
|
|
}
|
2019-10-06 12:14:21 +02:00
|
|
|
if len(stampErrs) > 0 {
|
|
|
|
return registeredServers, fmt.Errorf("%s", strings.Join(stampErrs, ", "))
|
|
|
|
}
|
2018-01-25 15:02:18 +01:00
|
|
|
return registeredServers, nil
|
|
|
|
}
|