2018-01-13 23:52:44 +01:00
|
|
|
package main
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
2018-02-04 11:31:54 +01:00
|
|
|
"io"
|
2018-01-13 23:52:44 +01:00
|
|
|
"io/ioutil"
|
|
|
|
"net/http"
|
2018-01-30 15:46:21 +01:00
|
|
|
"net/url"
|
2018-01-13 23:52:44 +01:00
|
|
|
"os"
|
2018-03-05 11:58:31 +01:00
|
|
|
"path/filepath"
|
2018-01-13 23:52:44 +01:00
|
|
|
"strings"
|
|
|
|
"time"
|
2018-01-25 15:02:18 +01:00
|
|
|
"unicode"
|
2018-01-13 23:52:44 +01:00
|
|
|
|
|
|
|
"github.com/dchest/safefile"
|
|
|
|
|
|
|
|
"github.com/jedisct1/dlog"
|
2018-04-18 18:58:39 +02:00
|
|
|
stamps "github.com/jedisct1/go-dnsstamps"
|
2018-01-13 23:52:44 +01:00
|
|
|
"github.com/jedisct1/go-minisign"
|
|
|
|
)
|
|
|
|
|
|
|
|
type SourceFormat int
|
|
|
|
|
|
|
|
const (
|
2018-04-18 19:06:50 +02:00
|
|
|
SourceFormatV2 = iota
|
2018-01-13 23:52:44 +01:00
|
|
|
)
|
|
|
|
|
2018-01-18 23:19:14 +01:00
|
|
|
const (
|
2019-10-31 05:32:21 +01:00
|
|
|
DefaultPrefetchDelay time.Duration = 24 * time.Hour
|
|
|
|
MinimumPrefetchInterval time.Duration = 10 * time.Minute
|
2018-01-18 23:19:14 +01:00
|
|
|
)
|
|
|
|
|
2018-01-13 23:52:44 +01:00
|
|
|
type Source struct {
|
2019-11-01 08:56:57 +01:00
|
|
|
name string
|
2019-10-31 09:31:30 +01:00
|
|
|
urls []string
|
|
|
|
format SourceFormat
|
|
|
|
in []byte
|
|
|
|
minisignKey *minisign.PublicKey
|
|
|
|
cacheFile string
|
|
|
|
cacheTTL, prefetchDelay time.Duration
|
2019-10-31 10:15:21 +01:00
|
|
|
refresh time.Time
|
2019-10-30 03:00:49 +01:00
|
|
|
}
|
|
|
|
|
2019-10-30 06:31:28 +01:00
|
|
|
func (source *Source) checkSignature(bin, sig []byte) (err error) {
|
2019-10-30 03:00:49 +01:00
|
|
|
var signature minisign.Signature
|
2019-10-30 06:31:28 +01:00
|
|
|
if signature, err = minisign.DecodeSignature(string(sig)); err == nil {
|
|
|
|
_, err = source.minisignKey.Verify(bin, signature)
|
2019-10-30 03:00:49 +01:00
|
|
|
}
|
|
|
|
return
|
2018-01-13 23:52:44 +01:00
|
|
|
}
|
|
|
|
|
2019-11-03 07:34:59 +01:00
|
|
|
// timeNow can be replaced by tests to provide a static value
|
|
|
|
var timeNow = time.Now
|
|
|
|
|
2019-11-01 11:15:10 +01:00
|
|
|
func (source *Source) fetchFromCache(now time.Time) (delay time.Duration, err error) {
|
2019-11-01 02:23:32 +01:00
|
|
|
var bin, sig []byte
|
2019-10-31 09:31:30 +01:00
|
|
|
if bin, err = ioutil.ReadFile(source.cacheFile); err != nil {
|
2018-01-20 00:30:33 +01:00
|
|
|
return
|
|
|
|
}
|
2019-10-31 09:31:30 +01:00
|
|
|
if sig, err = ioutil.ReadFile(source.cacheFile + ".minisig"); err != nil {
|
2019-10-31 06:53:16 +01:00
|
|
|
return
|
|
|
|
}
|
2019-11-01 02:23:32 +01:00
|
|
|
if err = source.checkSignature(bin, sig); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
source.in = bin
|
|
|
|
var fi os.FileInfo
|
|
|
|
if fi, err = os.Stat(source.cacheFile); err != nil {
|
|
|
|
return
|
|
|
|
}
|
2019-11-01 11:15:10 +01:00
|
|
|
if elapsed := now.Sub(fi.ModTime()); elapsed < source.cacheTTL {
|
2019-11-01 08:56:57 +01:00
|
|
|
delay = source.prefetchDelay - elapsed
|
|
|
|
dlog.Debugf("Source [%s] cache file [%s] is still fresh, next update: %v", source.name, source.cacheFile, delay)
|
2019-10-30 05:24:59 +01:00
|
|
|
} else {
|
2019-11-01 08:56:57 +01:00
|
|
|
dlog.Debugf("Source [%s] cache file [%s] needs to be refreshed", source.name, source.cacheFile)
|
2018-02-19 19:24:51 +01:00
|
|
|
}
|
2018-01-20 00:30:33 +01:00
|
|
|
return
|
2018-01-13 23:52:44 +01:00
|
|
|
}
|
|
|
|
|
2019-10-31 10:03:18 +01:00
|
|
|
func (source *Source) writeToCache(bin, sig []byte) (err error) {
|
|
|
|
f := source.cacheFile
|
|
|
|
defer func() {
|
|
|
|
if err != nil {
|
|
|
|
if absPath, err2 := filepath.Abs(f); err2 == nil {
|
|
|
|
f = absPath
|
|
|
|
}
|
|
|
|
dlog.Warnf("%s: %s", f, err)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
if err = safefile.WriteFile(f, bin, 0644); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if err = safefile.WriteFile(f+".minisig", sig, 0644); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-10-30 05:12:50 +01:00
|
|
|
func fetchFromURL(xTransport *XTransport, u *url.URL) (bin []byte, err error) {
|
|
|
|
var resp *http.Response
|
|
|
|
if resp, _, err = xTransport.Get(u, "", DefaultTimeout); err == nil {
|
|
|
|
bin, err = ioutil.ReadAll(io.LimitReader(resp.Body, MaxHTTPBodyLength))
|
|
|
|
resp.Body.Close()
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-11-01 11:15:10 +01:00
|
|
|
func (source *Source) fetchWithCache(xTransport *XTransport, now time.Time) (delay time.Duration, err error) {
|
|
|
|
if delay, err = source.fetchFromCache(now); err != nil {
|
2019-11-01 10:26:39 +01:00
|
|
|
if len(source.urls) == 0 {
|
2019-11-01 08:56:57 +01:00
|
|
|
dlog.Errorf("Source [%s] cache file [%s] not present and no URL given", source.name, source.cacheFile)
|
2019-10-31 00:40:06 +01:00
|
|
|
return
|
2018-02-19 19:24:51 +01:00
|
|
|
}
|
2019-11-01 08:56:57 +01:00
|
|
|
dlog.Debugf("Source [%s] cache file [%s] not present", source.name, source.cacheFile)
|
2019-10-31 00:40:06 +01:00
|
|
|
}
|
2019-11-01 11:15:10 +01:00
|
|
|
if len(source.urls) > 0 {
|
|
|
|
defer func() {
|
|
|
|
source.refresh = now.Add(delay)
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
if len(source.urls) == 0 || delay > 0 {
|
2018-01-31 14:23:44 +01:00
|
|
|
return
|
|
|
|
}
|
2019-11-01 08:56:57 +01:00
|
|
|
delay = MinimumPrefetchInterval
|
2019-11-01 02:23:32 +01:00
|
|
|
var bin, sig []byte
|
2019-11-01 10:26:39 +01:00
|
|
|
for _, urlStr := range source.urls {
|
|
|
|
dlog.Infof("Source [%s] loading from URL [%s]", source.name, urlStr)
|
|
|
|
var srcURL *url.URL
|
|
|
|
if srcURL, err = url.Parse(urlStr); err != nil {
|
|
|
|
dlog.Debugf("Source [%s] failed to parse URL [%s]", source.name, urlStr)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
sigURL := &url.URL{}
|
|
|
|
*sigURL = *srcURL // deep copy to avoid parsing twice
|
|
|
|
sigURL.Path += ".minisig"
|
|
|
|
if bin, err = fetchFromURL(xTransport, srcURL); err != nil {
|
|
|
|
dlog.Debugf("Source [%s] failed to download from URL [%s]", source.name, srcURL)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if sig, err = fetchFromURL(xTransport, sigURL); err != nil {
|
|
|
|
dlog.Debugf("Source [%s] failed to download signature from URL [%s]", source.name, sigURL)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if err = source.checkSignature(bin, sig); err == nil {
|
|
|
|
break // valid signature
|
|
|
|
} // above err check inverted to make use of implicit continue
|
|
|
|
dlog.Debugf("Source [%s] failed signature check using URL [%s]", source.name, urlStr)
|
2018-01-13 23:52:44 +01:00
|
|
|
}
|
2019-11-01 10:26:39 +01:00
|
|
|
if err != nil {
|
2019-11-01 02:23:32 +01:00
|
|
|
return
|
|
|
|
}
|
|
|
|
source.in = bin
|
2019-10-31 10:03:18 +01:00
|
|
|
source.writeToCache(bin, sig) // ignore error: not fatal
|
2019-11-01 08:56:57 +01:00
|
|
|
delay = source.prefetchDelay
|
2018-01-13 23:52:44 +01:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-11-01 08:56:57 +01:00
|
|
|
// NewSource loads a new source using the given cacheFile and urls, ensuring it has a valid signature
|
|
|
|
func NewSource(name string, xTransport *XTransport, urls []string, minisignKeyStr string, cacheFile string, formatStr string, refreshDelay time.Duration) (source *Source, err error) {
|
2019-10-31 09:31:30 +01:00
|
|
|
if refreshDelay < DefaultPrefetchDelay {
|
|
|
|
refreshDelay = DefaultPrefetchDelay
|
|
|
|
}
|
2019-11-01 08:56:57 +01:00
|
|
|
source = &Source{name: name, urls: urls, cacheFile: cacheFile, cacheTTL: refreshDelay, prefetchDelay: DefaultPrefetchDelay}
|
2018-04-18 19:06:50 +02:00
|
|
|
if formatStr == "v2" {
|
2018-01-25 15:02:18 +01:00
|
|
|
source.format = SourceFormatV2
|
|
|
|
} else {
|
2019-10-31 02:04:08 +01:00
|
|
|
return source, fmt.Errorf("Unsupported source format: [%s]", formatStr)
|
2018-01-13 23:52:44 +01:00
|
|
|
}
|
2019-10-30 03:00:49 +01:00
|
|
|
if minisignKey, err := minisign.NewPublicKey(minisignKeyStr); err == nil {
|
|
|
|
source.minisignKey = &minisignKey
|
|
|
|
} else {
|
2019-10-31 02:04:08 +01:00
|
|
|
return source, err
|
2018-01-18 23:19:14 +01:00
|
|
|
}
|
2019-11-01 11:15:10 +01:00
|
|
|
if _, err = source.fetchWithCache(xTransport, timeNow()); err != nil {
|
2019-10-31 00:40:06 +01:00
|
|
|
return
|
2018-01-20 01:00:19 +01:00
|
|
|
}
|
2019-11-01 08:56:57 +01:00
|
|
|
dlog.Noticef("Source [%s] loaded", name)
|
2019-10-31 00:40:06 +01:00
|
|
|
return
|
2018-01-13 23:52:44 +01:00
|
|
|
}
|
|
|
|
|
2019-11-01 08:56:57 +01:00
|
|
|
// PrefetchSources downloads latest versions of given sources, ensuring they have a valid signature before caching
|
2019-10-31 02:22:48 +01:00
|
|
|
func PrefetchSources(xTransport *XTransport, sources []*Source) time.Duration {
|
|
|
|
now := timeNow()
|
2019-10-31 05:32:21 +01:00
|
|
|
interval := MinimumPrefetchInterval
|
2019-10-31 02:22:48 +01:00
|
|
|
for _, source := range sources {
|
2019-11-01 08:56:57 +01:00
|
|
|
if source.refresh.IsZero() || source.refresh.After(now) {
|
2019-10-31 10:15:21 +01:00
|
|
|
continue
|
|
|
|
}
|
2019-11-01 08:56:57 +01:00
|
|
|
dlog.Debugf("Prefetching [%s]", source.name)
|
2019-11-01 11:15:10 +01:00
|
|
|
if delay, err := source.fetchWithCache(xTransport, now); err != nil {
|
2019-11-01 10:26:39 +01:00
|
|
|
dlog.Debugf("Prefetching [%s] failed: %v", source.name, err)
|
|
|
|
} else {
|
|
|
|
dlog.Debugf("Prefetching [%s] succeeded, next update: %v", source.name, delay)
|
|
|
|
if delay >= MinimumPrefetchInterval && (interval == MinimumPrefetchInterval || interval > delay) {
|
|
|
|
interval = delay
|
2019-10-31 02:22:48 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-10-31 05:32:21 +01:00
|
|
|
return interval
|
2019-10-31 02:22:48 +01:00
|
|
|
}
|
|
|
|
|
2018-01-20 16:59:40 +01:00
|
|
|
func (source *Source) Parse(prefix string) ([]RegisteredServer, error) {
|
2018-04-18 19:06:50 +02:00
|
|
|
if source.format == SourceFormatV2 {
|
2018-01-25 15:02:18 +01:00
|
|
|
return source.parseV2(prefix)
|
|
|
|
}
|
|
|
|
dlog.Fatal("Unexpected source format")
|
|
|
|
return []RegisteredServer{}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (source *Source) parseV2(prefix string) ([]RegisteredServer, error) {
|
|
|
|
var registeredServers []RegisteredServer
|
2019-10-06 12:14:21 +02:00
|
|
|
var stampErrs []string
|
|
|
|
appendStampErr := func(format string, a ...interface{}) {
|
|
|
|
stampErr := fmt.Sprintf(format, a...)
|
|
|
|
stampErrs = append(stampErrs, stampErr)
|
|
|
|
dlog.Warn(stampErr)
|
|
|
|
}
|
2018-01-25 15:02:18 +01:00
|
|
|
in := string(source.in)
|
|
|
|
parts := strings.Split(in, "## ")
|
|
|
|
if len(parts) < 2 {
|
2018-03-28 13:36:19 +02:00
|
|
|
return registeredServers, fmt.Errorf("Invalid format for source at [%v]", source.urls)
|
2018-01-25 15:02:18 +01:00
|
|
|
}
|
|
|
|
parts = parts[1:]
|
2019-10-06 12:14:21 +02:00
|
|
|
PartsLoop:
|
2018-01-25 15:02:18 +01:00
|
|
|
for _, part := range parts {
|
|
|
|
part = strings.TrimFunc(part, unicode.IsSpace)
|
|
|
|
subparts := strings.Split(part, "\n")
|
|
|
|
if len(subparts) < 2 {
|
2018-03-28 13:36:19 +02:00
|
|
|
return registeredServers, fmt.Errorf("Invalid format for source at [%v]", source.urls)
|
2018-01-25 15:02:18 +01:00
|
|
|
}
|
|
|
|
name := strings.TrimFunc(subparts[0], unicode.IsSpace)
|
|
|
|
if len(name) == 0 {
|
2018-03-28 13:36:19 +02:00
|
|
|
return registeredServers, fmt.Errorf("Invalid format for source at [%v]", source.urls)
|
2018-01-25 15:02:18 +01:00
|
|
|
}
|
2018-01-31 09:42:56 +01:00
|
|
|
subparts = subparts[1:]
|
2018-01-30 19:47:29 +01:00
|
|
|
name = prefix + name
|
2018-01-31 09:42:56 +01:00
|
|
|
var stampStr, description string
|
2018-01-25 15:02:18 +01:00
|
|
|
for _, subpart := range subparts {
|
|
|
|
subpart = strings.TrimFunc(subpart, unicode.IsSpace)
|
2019-03-03 18:20:39 +01:00
|
|
|
if strings.HasPrefix(subpart, "sdns:") {
|
2018-01-31 09:42:56 +01:00
|
|
|
if len(stampStr) > 0 {
|
2019-10-06 12:14:21 +02:00
|
|
|
appendStampErr("Multiple stamps for server [%s]", name)
|
|
|
|
continue PartsLoop
|
2018-01-31 09:42:56 +01:00
|
|
|
}
|
2018-01-25 15:02:18 +01:00
|
|
|
stampStr = subpart
|
2018-01-31 09:42:56 +01:00
|
|
|
continue
|
|
|
|
} else if len(subpart) == 0 || strings.HasPrefix(subpart, "//") {
|
|
|
|
continue
|
2018-01-25 15:02:18 +01:00
|
|
|
}
|
2018-01-31 09:42:56 +01:00
|
|
|
if len(description) > 0 {
|
|
|
|
description += "\n"
|
|
|
|
}
|
|
|
|
description += subpart
|
2018-01-25 15:02:18 +01:00
|
|
|
}
|
2019-03-03 18:20:39 +01:00
|
|
|
if len(stampStr) < 6 {
|
2019-10-06 12:14:21 +02:00
|
|
|
appendStampErr("Missing stamp for server [%s]", name)
|
|
|
|
continue
|
2018-01-25 15:02:18 +01:00
|
|
|
}
|
2018-04-14 15:03:21 +02:00
|
|
|
stamp, err := stamps.NewServerStampFromString(stampStr)
|
2018-01-25 15:02:18 +01:00
|
|
|
if err != nil {
|
2019-10-06 12:14:21 +02:00
|
|
|
appendStampErr("Invalid or unsupported stamp [%v]: %s", stampStr, err.Error())
|
|
|
|
continue
|
2018-01-25 15:02:18 +01:00
|
|
|
}
|
|
|
|
registeredServer := RegisteredServer{
|
2018-01-31 09:42:56 +01:00
|
|
|
name: name, stamp: stamp, description: description,
|
2018-01-25 15:02:18 +01:00
|
|
|
}
|
|
|
|
dlog.Debugf("Registered [%s] with stamp [%s]", name, stamp.String())
|
|
|
|
registeredServers = append(registeredServers, registeredServer)
|
|
|
|
}
|
2019-10-06 12:14:21 +02:00
|
|
|
if len(stampErrs) > 0 {
|
|
|
|
return registeredServers, fmt.Errorf("%s", strings.Join(stampErrs, ", "))
|
|
|
|
}
|
2018-01-25 15:02:18 +01:00
|
|
|
return registeredServers, nil
|
|
|
|
}
|