Bläddra i källkod

Merge github.com/syncthing/usage-reporting into main repo

Jakob Borg 7 år sedan
förälder
incheckning
37cd5a0bec
41 ändrade filer med 9644 tillägg och 0 borttagningar
  1. 324 0
      cmd/uraggregate/main.go
  2. 234 0
      cmd/ursrv/analytics.go
  3. 25 0
      cmd/ursrv/compiler_test.go
  4. 122 0
      cmd/ursrv/formatting.go
  5. 1607 0
      cmd/ursrv/main.go
  6. BIN
      cmd/ursrv/static/assets/img/favicon.png
  7. 6 0
      cmd/ursrv/static/bootstrap/css/bootstrap-theme.min.css
  8. 6 0
      cmd/ursrv/static/bootstrap/css/bootstrap.min.css
  9. 6 0
      cmd/ursrv/static/bootstrap/js/bootstrap.min.js
  10. BIN
      cmd/ursrv/static/fonts/glyphicons-halflings-regular.eot
  11. 229 0
      cmd/ursrv/static/fonts/glyphicons-halflings-regular.svg
  12. BIN
      cmd/ursrv/static/fonts/glyphicons-halflings-regular.ttf
  13. BIN
      cmd/ursrv/static/fonts/glyphicons-halflings-regular.woff
  14. 631 0
      cmd/ursrv/static/index.html
  15. 8 0
      vendor/github.com/lib/pq/LICENSE.md
  16. 756 0
      vendor/github.com/lib/pq/array.go
  17. 91 0
      vendor/github.com/lib/pq/buf.go
  18. 1854 0
      vendor/github.com/lib/pq/conn.go
  19. 131 0
      vendor/github.com/lib/pq/conn_go18.go
  20. 43 0
      vendor/github.com/lib/pq/connector.go
  21. 282 0
      vendor/github.com/lib/pq/copy.go
  22. 245 0
      vendor/github.com/lib/pq/doc.go
  23. 603 0
      vendor/github.com/lib/pq/encode.go
  24. 515 0
      vendor/github.com/lib/pq/error.go
  25. 98 0
      vendor/github.com/lib/pq/example/listen/doc.go
  26. 118 0
      vendor/github.com/lib/pq/hstore/hstore.go
  27. 797 0
      vendor/github.com/lib/pq/notify.go
  28. 6 0
      vendor/github.com/lib/pq/oid/doc.go
  29. 93 0
      vendor/github.com/lib/pq/oid/gen.go
  30. 343 0
      vendor/github.com/lib/pq/oid/types.go
  31. 93 0
      vendor/github.com/lib/pq/rows.go
  32. 169 0
      vendor/github.com/lib/pq/ssl.go
  33. 14 0
      vendor/github.com/lib/pq/ssl_go1.7.go
  34. 20 0
      vendor/github.com/lib/pq/ssl_permissions.go
  35. 8 0
      vendor/github.com/lib/pq/ssl_renegotiation.go
  36. 9 0
      vendor/github.com/lib/pq/ssl_windows.go
  37. 76 0
      vendor/github.com/lib/pq/url.go
  38. 24 0
      vendor/github.com/lib/pq/user_posix.go
  39. 27 0
      vendor/github.com/lib/pq/user_windows.go
  40. 23 0
      vendor/github.com/lib/pq/uuid.go
  41. 8 0
      vendor/manifest

+ 324 - 0
cmd/uraggregate/main.go

@@ -0,0 +1,324 @@
+package main
+
+import (
+	"database/sql"
+	"log"
+	"os"
+	"time"
+
+	_ "github.com/lib/pq"
+)
+
+var dbConn = getEnvDefault("UR_DB_URL", "postgres://user:password@localhost/ur?sslmode=disable")
+
+func getEnvDefault(key, def string) string {
+	if val := os.Getenv(key); val != "" {
+		return val
+	}
+	return def
+}
+
+func main() {
+	log.SetFlags(log.Ltime | log.Ldate)
+	log.SetOutput(os.Stdout)
+
+	db, err := sql.Open("postgres", dbConn)
+	if err != nil {
+		log.Fatalln("database:", err)
+	}
+	err = setupDB(db)
+	if err != nil {
+		log.Fatalln("database:", err)
+	}
+
+	for {
+		runAggregation(db)
+		// Sleep until one minute past next midnight
+		sleepUntilNext(24*time.Hour, 1*time.Minute)
+	}
+}
+
+func runAggregation(db *sql.DB) {
+	since := maxIndexedDay(db, "VersionSummary")
+	log.Println("Aggregating VersionSummary data since", since)
+	rows, err := aggregateVersionSummary(db, since)
+	if err != nil {
+		log.Fatalln("aggregate:", err)
+	}
+	log.Println("Inserted", rows, "rows")
+
+	log.Println("Aggregating UserMovement data")
+	rows, err = aggregateUserMovement(db)
+	if err != nil {
+		log.Fatalln("aggregate:", err)
+	}
+	log.Println("Inserted", rows, "rows")
+
+	log.Println("Aggregating Performance data")
+	since = maxIndexedDay(db, "Performance")
+	rows, err = aggregatePerformance(db, since)
+	if err != nil {
+		log.Fatalln("aggregate:", err)
+	}
+	log.Println("Inserted", rows, "rows")
+
+	log.Println("Aggregating BlockStats data")
+	since = maxIndexedDay(db, "BlockStats")
+	rows, err = aggregateBlockStats(db, since)
+	if err != nil {
+		log.Fatalln("aggregate:", err)
+	}
+	log.Println("Inserted", rows, "rows")
+}
+
+func sleepUntilNext(intv, margin time.Duration) {
+	now := time.Now().UTC()
+	next := now.Truncate(intv).Add(intv).Add(margin)
+	log.Println("Sleeping until", next)
+	time.Sleep(next.Sub(now))
+}
+
+func setupDB(db *sql.DB) error {
+	_, err := db.Exec(`CREATE TABLE IF NOT EXISTS VersionSummary (
+		Day TIMESTAMP NOT NULL,
+		Version VARCHAR(8) NOT NULL,
+		Count INTEGER NOT NULL
+	)`)
+	if err != nil {
+		return err
+	}
+
+	_, err = db.Exec(`CREATE TABLE IF NOT EXISTS UserMovement (
+		Day TIMESTAMP NOT NULL,
+		Added INTEGER NOT NULL,
+		Bounced INTEGER NOT NULL,
+		Removed INTEGER NOT NULL
+	)`)
+	if err != nil {
+		return err
+	}
+
+	_, err = db.Exec(`CREATE TABLE IF NOT EXISTS Performance (
+		Day TIMESTAMP NOT NULL,
+		TotFiles INTEGER NOT NULL,
+		TotMiB INTEGER NOT NULL,
+		SHA256Perf DOUBLE PRECISION NOT NULL,
+		MemorySize INTEGER NOT NULL,
+		MemoryUsageMiB INTEGER NOT NULL
+	)`)
+	if err != nil {
+		return err
+	}
+
+	_, err = db.Exec(`CREATE TABLE IF NOT EXISTS BlockStats (
+		Day TIMESTAMP NOT NULL,
+		Reports INTEGER NOT NULL,
+		Total INTEGER NOT NULL,
+		Renamed INTEGER NOT NULL,
+		Reused INTEGER NOT NULL,
+		Pulled INTEGER NOT NULL,
+		CopyOrigin INTEGER NOT NULL,
+		CopyOriginShifted INTEGER NOT NULL,
+		CopyElsewhere INTEGER NOT NULL
+	)`)
+	if err != nil {
+		return err
+	}
+
+	var t string
+
+	row := db.QueryRow(`SELECT 'UniqueDayVersionIndex'::regclass`)
+	if err := row.Scan(&t); err != nil {
+		_, err = db.Exec(`CREATE UNIQUE INDEX UniqueDayVersionIndex ON VersionSummary (Day, Version)`)
+	}
+
+	row = db.QueryRow(`SELECT 'VersionDayIndex'::regclass`)
+	if err := row.Scan(&t); err != nil {
+		_, err = db.Exec(`CREATE INDEX VersionDayIndex ON VersionSummary (Day)`)
+	}
+
+	row = db.QueryRow(`SELECT 'MovementDayIndex'::regclass`)
+	if err := row.Scan(&t); err != nil {
+		_, err = db.Exec(`CREATE INDEX MovementDayIndex ON UserMovement (Day)`)
+	}
+
+	row = db.QueryRow(`SELECT 'PerformanceDayIndex'::regclass`)
+	if err := row.Scan(&t); err != nil {
+		_, err = db.Exec(`CREATE INDEX PerformanceDayIndex ON Performance (Day)`)
+	}
+
+	row = db.QueryRow(`SELECT 'BlockStatsDayIndex'::regclass`)
+	if err := row.Scan(&t); err != nil {
+		_, err = db.Exec(`CREATE INDEX BlockStatsDayIndex ON BlockStats (Day)`)
+	}
+
+	return err
+}
+
+func maxIndexedDay(db *sql.DB, table string) time.Time {
+	var t time.Time
+	row := db.QueryRow("SELECT MAX(Day) FROM " + table)
+	err := row.Scan(&t)
+	if err != nil {
+		return time.Time{}
+	}
+	return t
+}
+
+func aggregateVersionSummary(db *sql.DB, since time.Time) (int64, error) {
+	res, err := db.Exec(`INSERT INTO VersionSummary (
+	SELECT
+		DATE_TRUNC('day', Received) AS Day,
+		SUBSTRING(Version FROM '^v\d.\d+') AS Ver,
+		COUNT(*) AS Count
+		FROM Reports
+		WHERE
+			DATE_TRUNC('day', Received) > $1
+			AND DATE_TRUNC('day', Received) < DATE_TRUNC('day', NOW())
+			AND Version like 'v0.%'
+		GROUP BY Day, Ver
+		);
+	`, since)
+	if err != nil {
+		return 0, err
+	}
+
+	return res.RowsAffected()
+}
+
+func aggregateUserMovement(db *sql.DB) (int64, error) {
+	rows, err := db.Query(`SELECT
+		DATE_TRUNC('day', Received) AS Day,
+		UniqueID
+		FROM Reports
+		WHERE
+			DATE_TRUNC('day', Received) < DATE_TRUNC('day', NOW())
+			AND Version like 'v0.%'
+		ORDER BY Day
+	`)
+	if err != nil {
+		return 0, err
+	}
+	defer rows.Close()
+
+	firstSeen := make(map[string]time.Time)
+	lastSeen := make(map[string]time.Time)
+	var minTs time.Time
+	minTs = minTs.In(time.UTC)
+
+	for rows.Next() {
+		var ts time.Time
+		var id string
+		if err := rows.Scan(&ts, &id); err != nil {
+			return 0, err
+		}
+
+		if minTs.IsZero() {
+			minTs = ts
+		}
+		if _, ok := firstSeen[id]; !ok {
+			firstSeen[id] = ts
+		}
+		lastSeen[id] = ts
+	}
+
+	type sumRow struct {
+		day     time.Time
+		added   int
+		removed int
+		bounced int
+	}
+	var sumRows []sumRow
+	for t := minTs; t.Before(time.Now().Truncate(24 * time.Hour)); t = t.AddDate(0, 0, 1) {
+		var added, removed, bounced int
+		old := t.Before(time.Now().AddDate(0, 0, -30))
+		for id, first := range firstSeen {
+			last := lastSeen[id]
+			if first.Equal(t) && last.Equal(t) && old {
+				bounced++
+				continue
+			}
+			if first.Equal(t) {
+				added++
+			}
+			if last == t && old {
+				removed++
+			}
+		}
+		sumRows = append(sumRows, sumRow{t, added, removed, bounced})
+	}
+
+	tx, err := db.Begin()
+	if err != nil {
+		return 0, err
+	}
+	if _, err := tx.Exec("DELETE FROM UserMovement"); err != nil {
+		tx.Rollback()
+		return 0, err
+	}
+	for _, r := range sumRows {
+		if _, err := tx.Exec("INSERT INTO UserMovement (Day, Added, Removed, Bounced) VALUES ($1, $2, $3, $4)", r.day, r.added, r.removed, r.bounced); err != nil {
+			tx.Rollback()
+			return 0, err
+		}
+	}
+
+	return int64(len(sumRows)), tx.Commit()
+}
+
+func aggregatePerformance(db *sql.DB, since time.Time) (int64, error) {
+	res, err := db.Exec(`INSERT INTO Performance (
+	SELECT
+		DATE_TRUNC('day', Received) AS Day,
+		AVG(TotFiles) As TotFiles,
+		AVG(TotMiB) As TotMiB,
+		AVG(SHA256Perf) As SHA256Perf,
+		AVG(MemorySize) As MemorySize,
+		AVG(MemoryUsageMiB) As MemoryUsageMiB
+		FROM Reports
+		WHERE
+			DATE_TRUNC('day', Received) > $1
+			AND DATE_TRUNC('day', Received) < DATE_TRUNC('day', NOW())
+			AND Version like 'v0.%'
+		GROUP BY Day
+		);
+	`, since)
+	if err != nil {
+		return 0, err
+	}
+
+	return res.RowsAffected()
+}
+
+func aggregateBlockStats(db *sql.DB, since time.Time) (int64, error) {
+	// Filter out anything prior 0.14.41 as that has sum aggregations which
+	// made no sense.
+	res, err := db.Exec(`INSERT INTO BlockStats (
+	SELECT
+		DATE_TRUNC('day', Received) AS Day,
+		COUNT(1) As Reports,
+		SUM(BlocksTotal) AS Total,
+		SUM(BlocksRenamed) AS Renamed,
+		SUM(BlocksReused) AS Reused,
+		SUM(BlocksPulled) AS Pulled,
+		SUM(BlocksCopyOrigin) AS CopyOrigin,
+		SUM(BlocksCopyOriginShifted) AS CopyOriginShifted,
+		SUM(BlocksCopyElsewhere) AS CopyElsewhere
+		FROM Reports
+		WHERE
+			DATE_TRUNC('day', Received) > $1
+			AND DATE_TRUNC('day', Received) < DATE_TRUNC('day', NOW())
+			AND ReportVersion = 3
+			AND Version LIKE 'v0.%'
+			AND Version NOT LIKE 'v0.14.40%'
+			AND Version NOT LIKE 'v0.14.39%'
+			AND Version NOT LIKE 'v0.14.38%'
+		GROUP BY Day
+	);
+	`, since)
+	if err != nil {
+		return 0, err
+	}
+
+	return res.RowsAffected()
+}

+ 234 - 0
cmd/ursrv/analytics.go

@@ -0,0 +1,234 @@
+package main
+
+import (
+	"regexp"
+	"sort"
+	"strconv"
+	"strings"
+)
+
+type analytic struct {
+	Key        string
+	Count      int
+	Percentage float64
+	Items      []analytic `json:",omitempty"`
+}
+
+type analyticList []analytic
+
+func (l analyticList) Less(a, b int) bool {
+	if l[a].Key == "Others" {
+		return true
+	}
+	if l[b].Key == "Others" {
+		return false
+	}
+	return l[b].Count < l[a].Count // inverse
+}
+
+func (l analyticList) Swap(a, b int) {
+	l[a], l[b] = l[b], l[a]
+}
+
+func (l analyticList) Len() int {
+	return len(l)
+}
+
+// Returns a list of frequency analytics for a given list of strings.
+func analyticsFor(ss []string, cutoff int) []analytic {
+	m := make(map[string]int)
+	t := 0
+	for _, s := range ss {
+		m[s]++
+		t++
+	}
+
+	l := make([]analytic, 0, len(m))
+	for k, c := range m {
+		l = append(l, analytic{
+			Key:        k,
+			Count:      c,
+			Percentage: 100 * float64(c) / float64(t),
+		})
+	}
+
+	sort.Sort(analyticList(l))
+
+	if cutoff > 0 && len(l) > cutoff {
+		c := 0
+		for _, i := range l[cutoff:] {
+			c += i.Count
+		}
+		l = append(l[:cutoff], analytic{
+			Key:        "Others",
+			Count:      c,
+			Percentage: 100 * float64(c) / float64(t),
+		})
+	}
+
+	return l
+}
+
+// Find the points at which certain penetration levels are met
+func penetrationLevels(as []analytic, points []float64) []analytic {
+	sort.Slice(as, func(a, b int) bool {
+		return versionLess(as[b].Key, as[a].Key)
+	})
+
+	var res []analytic
+
+	idx := 0
+	sum := 0.0
+	for _, a := range as {
+		sum += a.Percentage
+		if sum >= points[idx] {
+			a.Count = int(points[idx])
+			a.Percentage = sum
+			res = append(res, a)
+			idx++
+			if idx == len(points) {
+				break
+			}
+		}
+	}
+	return res
+}
+
+func statsForInts(data []int) [4]float64 {
+	var res [4]float64
+	if len(data) == 0 {
+		return res
+	}
+
+	sort.Ints(data)
+	res[0] = float64(data[int(float64(len(data))*0.05)])
+	res[1] = float64(data[len(data)/2])
+	res[2] = float64(data[int(float64(len(data))*0.95)])
+	res[3] = float64(data[len(data)-1])
+	return res
+}
+
+func statsForFloats(data []float64) [4]float64 {
+	var res [4]float64
+	if len(data) == 0 {
+		return res
+	}
+
+	sort.Float64s(data)
+	res[0] = data[int(float64(len(data))*0.05)]
+	res[1] = data[len(data)/2]
+	res[2] = data[int(float64(len(data))*0.95)]
+	res[3] = data[len(data)-1]
+	return res
+}
+
+func group(by func(string) string, as []analytic, perGroup int) []analytic {
+	var res []analytic
+
+next:
+	for _, a := range as {
+		group := by(a.Key)
+		for i := range res {
+			if res[i].Key == group {
+				res[i].Count += a.Count
+				res[i].Percentage += a.Percentage
+				if len(res[i].Items) < perGroup {
+					res[i].Items = append(res[i].Items, a)
+				}
+				continue next
+			}
+		}
+		res = append(res, analytic{
+			Key:        group,
+			Count:      a.Count,
+			Percentage: a.Percentage,
+			Items:      []analytic{a},
+		})
+	}
+
+	sort.Sort(analyticList(res))
+	return res
+}
+
+func byVersion(s string) string {
+	parts := strings.Split(s, ".")
+	if len(parts) >= 2 {
+		return strings.Join(parts[:2], ".")
+	}
+	return s
+}
+
+func byPlatform(s string) string {
+	parts := strings.Split(s, "-")
+	if len(parts) >= 2 {
+		return parts[0]
+	}
+	return s
+}
+
+var numericGoVersion = regexp.MustCompile(`^go[0-9]\.[0-9]+`)
+
+func byCompiler(s string) string {
+	if m := numericGoVersion.FindString(s); m != "" {
+		return m
+	}
+	return "Other"
+}
+
+func versionLess(a, b string) bool {
+	arel, apre := versionParts(a)
+	brel, bpre := versionParts(b)
+
+	minlen := len(arel)
+	if l := len(brel); l < minlen {
+		minlen = l
+	}
+
+	for i := 0; i < minlen; i++ {
+		if arel[i] != brel[i] {
+			return arel[i] < brel[i]
+		}
+	}
+
+	// Longer version is newer, when the preceding parts are equal
+	if len(arel) != len(brel) {
+		return len(arel) < len(brel)
+	}
+
+	if apre != bpre {
+		// "(+dev)" versions are ahead
+		if apre == plusStr {
+			return false
+		}
+		if bpre == plusStr {
+			return true
+		}
+		return apre < bpre
+	}
+
+	// don't actually care how the prerelease stuff compares for our purposes
+	return false
+}
+
+// Split a version as returned from transformVersion into parts.
+// "1.2.3-beta.2" -> []int{1, 2, 3}, "beta.2"}
+func versionParts(v string) ([]int, string) {
+	parts := strings.SplitN(v[1:], " ", 2) // " (+dev)" versions
+	if len(parts) == 1 {
+		parts = strings.SplitN(parts[0], "-", 2) // "-rc.1" type versions
+	}
+	fields := strings.Split(parts[0], ".")
+
+	release := make([]int, len(fields))
+	for i, s := range fields {
+		v, _ := strconv.Atoi(s)
+		release[i] = v
+	}
+
+	var prerelease string
+	if len(parts) > 1 {
+		prerelease = parts[1]
+	}
+
+	return release, prerelease
+}

+ 25 - 0
cmd/ursrv/compiler_test.go

@@ -0,0 +1,25 @@
+package main
+
+import "testing"
+
+func TestCompilerRe(t *testing.T) {
+	tests := [][3]string{
+		{`syncthing v0.11.0 (xgcc (Ubuntu 4.9.3-0ubuntu4) 4.9.3 linux-amd64 default) niklas@Niklas-Netbook 2015-04-26 13:15:08 UTC`, "xgcc (Ubuntu 4.9.3-0ubuntu4) 4.9.3", "niklas@Niklas-Netbook"},
+		{`syncthing v0.12.0-rc5 "Beryllium Bedbug" (go1.4.2 linux-arm android) unknown-user@Felix-T420 2015-10-22 18:32:15 UTC`, "go1.4.2", "unknown-user@Felix-T420"},
+		{`syncthing v0.13.0-beta.0+39-ge267bf3 "Copper Cockroach" (go1.4.2 linux-amd64) [email protected] 2016-01-20 08:41:52 UTC`, "go1.4.2", "[email protected]"},
+	}
+
+	for _, tc := range tests {
+		m := compilerRe.FindStringSubmatch(tc[0])
+		if len(m) != 3 {
+			t.Errorf("Regexp didn't match %q", tc[0])
+			continue
+		}
+		if m[1] != tc[1] {
+			t.Errorf("Compiler %q != %q", m[1], tc[1])
+		}
+		if m[2] != tc[2] {
+			t.Errorf("Builder %q != %q", m[2], tc[2])
+		}
+	}
+}

+ 122 - 0
cmd/ursrv/formatting.go

@@ -0,0 +1,122 @@
+package main
+
+import (
+	"bytes"
+	"fmt"
+	"strings"
+)
+
+type NumberType int
+
+const (
+	NumberMetric NumberType = iota
+	NumberBinary
+	NumberDuration
+)
+
+func number(ntype NumberType, v float64) string {
+	if ntype == NumberDuration {
+		return duration(v)
+	} else if ntype == NumberBinary {
+		return binary(v)
+	} else {
+		return metric(v)
+	}
+}
+
+type suffix struct {
+	Suffix     string
+	Multiplier float64
+}
+
+var metricSuffixes = []suffix{
+	{"G", 1e9},
+	{"M", 1e6},
+	{"k", 1e3},
+}
+
+var binarySuffixes = []suffix{
+	{"Gi", 1 << 30},
+	{"Mi", 1 << 20},
+	{"Ki", 1 << 10},
+}
+
+var durationSuffix = []suffix{
+	{"year", 365 * 24 * 60 * 60},
+	{"month", 30 * 24 * 60 * 60},
+	{"day", 24 * 60 * 60},
+	{"hour", 60 * 60},
+	{"minute", 60},
+	{"second", 1},
+}
+
+func metric(v float64) string {
+	return withSuffix(v, metricSuffixes, false)
+}
+
+func binary(v float64) string {
+	return withSuffix(v, binarySuffixes, false)
+}
+
+func duration(v float64) string {
+	return withSuffix(v, durationSuffix, true)
+}
+
+func withSuffix(v float64, ps []suffix, pluralize bool) string {
+	for _, p := range ps {
+		if v >= p.Multiplier {
+			suffix := p.Suffix
+			if pluralize && v/p.Multiplier != 1.0 {
+				suffix += "s"
+			}
+			// If the number only has decimal zeroes, strip em off.
+			num := strings.TrimRight(strings.TrimRight(fmt.Sprintf("%.1f", v/p.Multiplier), "0"), ".")
+			return fmt.Sprintf("%s %s", num, suffix)
+		}
+	}
+	return strings.TrimRight(strings.TrimRight(fmt.Sprintf("%.1f", v), "0"), ".")
+}
+
+// commatize returns a number with sep as thousands separators. Handles
+// integers and plain floats.
+func commatize(sep, s string) string {
+	// If no dot, don't do anything.
+	if !strings.ContainsRune(s, '.') {
+		return s
+	}
+	var b bytes.Buffer
+	fs := strings.SplitN(s, ".", 2)
+
+	l := len(fs[0])
+	for i := range fs[0] {
+		b.Write([]byte{s[i]})
+		if i < l-1 && (l-i)%3 == 1 {
+			b.WriteString(sep)
+		}
+	}
+
+	if len(fs) > 1 && len(fs[1]) > 0 {
+		b.WriteString(".")
+		b.WriteString(fs[1])
+	}
+
+	return b.String()
+}
+
+func proportion(m map[string]int, count int) float64 {
+	total := 0
+	isMax := true
+	for _, n := range m {
+		total += n
+		if n > count {
+			isMax = false
+		}
+	}
+	pct := (100 * float64(count)) / float64(total)
+	// To avoid rounding errors in the template, surpassing 100% and breaking
+	// the progress bars.
+	if isMax && len(m) > 1 && count != total {
+		pct -= 0.01
+	}
+	return pct
+}

+ 1607 - 0
cmd/ursrv/main.go

@@ -0,0 +1,1607 @@
+package main
+
+import (
+	"bytes"
+	"crypto/tls"
+	"database/sql"
+	"database/sql/driver"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"html/template"
+	"io"
+	"io/ioutil"
+	"log"
+	"net"
+	"net/http"
+	"os"
+	"regexp"
+	"sort"
+	"strings"
+	"sync"
+	"time"
+	"unicode"
+
+	"github.com/lib/pq"
+	"github.com/oschwald/geoip2-golang"
+)
+
+var (
+	useHTTP          = os.Getenv("UR_USE_HTTP") != ""
+	debug            = os.Getenv("UR_DEBUG") != ""
+	keyFile          = getEnvDefault("UR_KEY_FILE", "key.pem")
+	certFile         = getEnvDefault("UR_CRT_FILE", "crt.pem")
+	dbConn           = getEnvDefault("UR_DB_URL", "postgres://user:password@localhost/ur?sslmode=disable")
+	listenAddr       = getEnvDefault("UR_LISTEN", "0.0.0.0:8443")
+	geoIPPath        = getEnvDefault("UR_GEOIP", "GeoLite2-City.mmdb")
+	tpl              *template.Template
+	compilerRe       = regexp.MustCompile(`\(([A-Za-z0-9()., -]+) \w+-\w+(?:| android| default)\) ([\[email protected]]+)`)
+	progressBarClass = []string{"", "progress-bar-success", "progress-bar-info", "progress-bar-warning", "progress-bar-danger"}
+	featureOrder     = []string{"Various", "Folder", "Device", "Connection", "GUI"}
+	knownVersions    = []string{"v2", "v3"}
+)
+
+var funcs = map[string]interface{}{
+	"commatize":  commatize,
+	"number":     number,
+	"proportion": proportion,
+	"counter": func() *counter {
+		return &counter{}
+	},
+	"progressBarClassByIndex": func(a int) string {
+		return progressBarClass[a%len(progressBarClass)]
+	},
+	"slice": func(numParts, whichPart int, input []feature) []feature {
+		var part []feature
+		perPart := (len(input) / numParts) + len(input)%2
+
+		parts := make([][]feature, 0, numParts)
+		for len(input) >= perPart {
+			part, input = input[:perPart], input[perPart:]
+			parts = append(parts, part)
+		}
+		if len(input) > 0 {
+			parts = append(parts, input[:len(input)])
+		}
+		return parts[whichPart-1]
+	},
+}
+
+func getEnvDefault(key, def string) string {
+	if val := os.Getenv(key); val != "" {
+		return val
+	}
+	return def
+}
+
+type IntMap map[string]int
+
+func (p IntMap) Value() (driver.Value, error) {
+	return json.Marshal(p)
+}
+
+func (p *IntMap) Scan(src interface{}) error {
+	source, ok := src.([]byte)
+	if !ok {
+		return errors.New("Type assertion .([]byte) failed.")
+	}
+
+	var i map[string]int
+	err := json.Unmarshal(source, &i)
+	if err != nil {
+		return err
+	}
+
+	*p = i
+	return nil
+}
+
+type report struct {
+	Received time.Time // Only from DB
+
+	UniqueID       string
+	Version        string
+	LongVersion    string
+	Platform       string
+	NumFolders     int
+	NumDevices     int
+	TotFiles       int
+	FolderMaxFiles int
+	TotMiB         int
+	FolderMaxMiB   int
+	MemoryUsageMiB int
+	SHA256Perf     float64
+	MemorySize     int
+
+	// v2 fields
+
+	URVersion  int
+	NumCPU     int
+	FolderUses struct {
+		ReadOnly            int
+		IgnorePerms         int
+		IgnoreDelete        int
+		AutoNormalize       int
+		SimpleVersioning    int
+		ExternalVersioning  int
+		StaggeredVersioning int
+		TrashcanVersioning  int
+	}
+	DeviceUses struct {
+		Introducer       int
+		CustomCertName   int
+		CompressAlways   int
+		CompressMetadata int
+		CompressNever    int
+		DynamicAddr      int
+		StaticAddr       int
+	}
+	Announce struct {
+		GlobalEnabled     bool
+		LocalEnabled      bool
+		DefaultServersDNS int
+		DefaultServersIP  int
+		OtherServers      int
+	}
+	Relays struct {
+		Enabled        bool
+		DefaultServers int
+		OtherServers   int
+	}
+	UsesRateLimit        bool
+	UpgradeAllowedManual bool
+	UpgradeAllowedAuto   bool
+
+	// V2.5 fields (fields that were in v2 but never added to the database
+	UpgradeAllowedPre bool
+	RescanIntvs       pq.Int64Array
+
+	// v3 fields
+
+	Uptime                     int
+	NATType                    string
+	AlwaysLocalNets            bool
+	CacheIgnoredFiles          bool
+	OverwriteRemoteDeviceNames bool
+	ProgressEmitterEnabled     bool
+	CustomDefaultFolderPath    bool
+	WeakHashSelection          string
+	CustomTrafficClass         bool
+	CustomTempIndexMinBlocks   bool
+	TemporariesDisabled        bool
+	TemporariesCustom          bool
+	LimitBandwidthInLan        bool
+	CustomReleaseURL           bool
+	RestartOnWakeup            bool
+	CustomStunServers          bool
+
+	FolderUsesV3 struct {
+		ScanProgressDisabled    int
+		ConflictsDisabled       int
+		ConflictsUnlimited      int
+		ConflictsOther          int
+		DisableSparseFiles      int
+		DisableTempIndexes      int
+		AlwaysWeakHash          int
+		CustomWeakHashThreshold int
+		FsWatcherEnabled        int
+		PullOrder               IntMap
+		FilesystemType          IntMap
+		FsWatcherDelays         pq.Int64Array
+	}
+
+	GUIStats struct {
+		Enabled                   int
+		UseTLS                    int
+		UseAuth                   int
+		InsecureAdminAccess       int
+		Debugging                 int
+		InsecureSkipHostCheck     int
+		InsecureAllowFrameLoading int
+		ListenLocal               int
+		ListenUnspecified         int
+		Theme                     IntMap
+	}
+
+	BlockStats struct {
+		Total             int
+		Renamed           int
+		Reused            int
+		Pulled            int
+		CopyOrigin        int
+		CopyOriginShifted int
+		CopyElsewhere     int
+	}
+
+	TransportStats IntMap
+
+	IgnoreStats struct {
+		Lines           int
+		Inverts         int
+		Folded          int
+		Deletable       int
+		Rooted          int
+		Includes        int
+		EscapedIncludes int
+		DoubleStars     int
+		Stars           int
+	}
+
+	// V3 fields added late in the RC
+	WeakHashEnabled bool
+
+	// Generated
+
+	Date    string
+	Address string
+}
+
+func (r *report) Validate() error {
+	if r.UniqueID == "" || r.Version == "" || r.Platform == "" {
+		return fmt.Errorf("missing required field")
+	}
+	if len(r.Date) != 8 {
+		return fmt.Errorf("date not initialized")
+	}
+
+	// Some fields may not be null.
+	if r.RescanIntvs == nil {
+		r.RescanIntvs = []int64{}
+	}
+	if r.FolderUsesV3.FsWatcherDelays == nil {
+		r.FolderUsesV3.FsWatcherDelays = []int64{}
+	}
+
+	return nil
+}
+
+func (r *report) FieldPointers() []interface{} {
+	// All the fields of the report, in the same order as the database fields.
+	return []interface{}{
+		&r.Received, &r.UniqueID, &r.Version, &r.LongVersion, &r.Platform,
+		&r.NumFolders, &r.NumDevices, &r.TotFiles, &r.FolderMaxFiles,
+		&r.TotMiB, &r.FolderMaxMiB, &r.MemoryUsageMiB, &r.SHA256Perf,
+		&r.MemorySize, &r.Date,
+		// V2
+		&r.URVersion, &r.NumCPU, &r.FolderUses.ReadOnly, &r.FolderUses.IgnorePerms,
+		&r.FolderUses.IgnoreDelete, &r.FolderUses.AutoNormalize, &r.DeviceUses.Introducer,
+		&r.DeviceUses.CustomCertName, &r.DeviceUses.CompressAlways,
+		&r.DeviceUses.CompressMetadata, &r.DeviceUses.CompressNever,
+		&r.DeviceUses.DynamicAddr, &r.DeviceUses.StaticAddr,
+		&r.Announce.GlobalEnabled, &r.Announce.LocalEnabled,
+		&r.Announce.DefaultServersDNS, &r.Announce.DefaultServersIP,
+		&r.Announce.OtherServers, &r.Relays.Enabled, &r.Relays.DefaultServers,
+		&r.Relays.OtherServers, &r.UsesRateLimit, &r.UpgradeAllowedManual,
+		&r.UpgradeAllowedAuto, &r.FolderUses.SimpleVersioning,
+		&r.FolderUses.ExternalVersioning, &r.FolderUses.StaggeredVersioning,
+		&r.FolderUses.TrashcanVersioning,
+
+		// V2.5
+		&r.UpgradeAllowedPre, &r.RescanIntvs,
+
+		// V3
+		&r.Uptime, &r.NATType, &r.AlwaysLocalNets, &r.CacheIgnoredFiles,
+		&r.OverwriteRemoteDeviceNames, &r.ProgressEmitterEnabled, &r.CustomDefaultFolderPath,
+		&r.WeakHashSelection, &r.CustomTrafficClass, &r.CustomTempIndexMinBlocks,
+		&r.TemporariesDisabled, &r.TemporariesCustom, &r.LimitBandwidthInLan,
+		&r.CustomReleaseURL, &r.RestartOnWakeup, &r.CustomStunServers,
+
+		&r.FolderUsesV3.ScanProgressDisabled, &r.FolderUsesV3.ConflictsDisabled,
+		&r.FolderUsesV3.ConflictsUnlimited, &r.FolderUsesV3.ConflictsOther,
+		&r.FolderUsesV3.DisableSparseFiles, &r.FolderUsesV3.DisableTempIndexes,
+		&r.FolderUsesV3.AlwaysWeakHash, &r.FolderUsesV3.CustomWeakHashThreshold,
+		&r.FolderUsesV3.FsWatcherEnabled,
+
+		&r.FolderUsesV3.PullOrder, &r.FolderUsesV3.FilesystemType,
+		&r.FolderUsesV3.FsWatcherDelays,
+
+		&r.GUIStats.Enabled, &r.GUIStats.UseTLS, &r.GUIStats.UseAuth,
+		&r.GUIStats.InsecureAdminAccess,
+		&r.GUIStats.Debugging, &r.GUIStats.InsecureSkipHostCheck,
+		&r.GUIStats.InsecureAllowFrameLoading, &r.GUIStats.ListenLocal,
+		&r.GUIStats.ListenUnspecified, &r.GUIStats.Theme,
+
+		&r.BlockStats.Total, &r.BlockStats.Renamed,
+		&r.BlockStats.Reused, &r.BlockStats.Pulled, &r.BlockStats.CopyOrigin,
+		&r.BlockStats.CopyOriginShifted, &r.BlockStats.CopyElsewhere,
+
+		&r.TransportStats,
+
+		&r.IgnoreStats.Lines, &r.IgnoreStats.Inverts, &r.IgnoreStats.Folded,
+		&r.IgnoreStats.Deletable, &r.IgnoreStats.Rooted, &r.IgnoreStats.Includes,
+		&r.IgnoreStats.EscapedIncludes, &r.IgnoreStats.DoubleStars, &r.IgnoreStats.Stars,
+
+		// V3 added late in the RC
+		&r.WeakHashEnabled,
+		&r.Address,
+	}
+}
+
+func (r *report) FieldNames() []string {
+	// The database fields that back this struct in PostgreSQL
+	return []string{
+		// V1
+		"Received",
+		"UniqueID",
+		"Version",
+		"LongVersion",
+		"Platform",
+		"NumFolders",
+		"NumDevices",
+		"TotFiles",
+		"FolderMaxFiles",
+		"TotMiB",
+		"FolderMaxMiB",
+		"MemoryUsageMiB",
+		"SHA256Perf",
+		"MemorySize",
+		"Date",
+		// V2
+		"ReportVersion",
+		"NumCPU",
+		"FolderRO",
+		"FolderIgnorePerms",
+		"FolderIgnoreDelete",
+		"FolderAutoNormalize",
+		"DeviceIntroducer",
+		"DeviceCustomCertName",
+		"DeviceCompressAlways",
+		"DeviceCompressMetadata",
+		"DeviceCompressNever",
+		"DeviceDynamicAddr",
+		"DeviceStaticAddr",
+		"AnnounceGlobalEnabled",
+		"AnnounceLocalEnabled",
+		"AnnounceDefaultServersDNS",
+		"AnnounceDefaultServersIP",
+		"AnnounceOtherServers",
+		"RelayEnabled",
+		"RelayDefaultServers",
+		"RelayOtherServers",
+		"RateLimitEnabled",
+		"UpgradeAllowedManual",
+		"UpgradeAllowedAuto",
+		// v0.12.19+
+		"FolderSimpleVersioning",
+		"FolderExternalVersioning",
+		"FolderStaggeredVersioning",
+		"FolderTrashcanVersioning",
+		// V2.5
+		"UpgradeAllowedPre",
+		"RescanIntvs",
+		// V3
+		"Uptime",
+		"NATType",
+		"AlwaysLocalNets",
+		"CacheIgnoredFiles",
+		"OverwriteRemoteDeviceNames",
+		"ProgressEmitterEnabled",
+		"CustomDefaultFolderPath",
+		"WeakHashSelection",
+		"CustomTrafficClass",
+		"CustomTempIndexMinBlocks",
+		"TemporariesDisabled",
+		"TemporariesCustom",
+		"LimitBandwidthInLan",
+		"CustomReleaseURL",
+		"RestartOnWakeup",
+		"CustomStunServers",
+
+		"FolderScanProgressDisabled",
+		"FolderConflictsDisabled",
+		"FolderConflictsUnlimited",
+		"FolderConflictsOther",
+		"FolderDisableSparseFiles",
+		"FolderDisableTempIndexes",
+		"FolderAlwaysWeakHash",
+		"FolderCustomWeakHashThreshold",
+		"FolderFsWatcherEnabled",
+		"FolderPullOrder",
+		"FolderFilesystemType",
+		"FolderFsWatcherDelays",
+
+		"GUIEnabled",
+		"GUIUseTLS",
+		"GUIUseAuth",
+		"GUIInsecureAdminAccess",
+		"GUIDebugging",
+		"GUIInsecureSkipHostCheck",
+		"GUIInsecureAllowFrameLoading",
+		"GUIListenLocal",
+		"GUIListenUnspecified",
+		"GUITheme",
+
+		"BlocksTotal",
+		"BlocksRenamed",
+		"BlocksReused",
+		"BlocksPulled",
+		"BlocksCopyOrigin",
+		"BlocksCopyOriginShifted",
+		"BlocksCopyElsewhere",
+
+		"Transport",
+
+		"IgnoreLines",
+		"IgnoreInverts",
+		"IgnoreFolded",
+		"IgnoreDeletable",
+		"IgnoreRooted",
+		"IgnoreIncludes",
+		"IgnoreEscapedIncludes",
+		"IgnoreDoubleStars",
+		"IgnoreStars",
+
+		// V3 added late in the RC
+		"WeakHashEnabled",
+		"Address",
+	}
+}
+
+func setupDB(db *sql.DB) error {
+	_, err := db.Exec(`CREATE TABLE IF NOT EXISTS Reports (
+		Received TIMESTAMP NOT NULL,
+		UniqueID VARCHAR(32) NOT NULL,
+		Version VARCHAR(32) NOT NULL,
+		LongVersion VARCHAR(256) NOT NULL,
+		Platform VARCHAR(32) NOT NULL,
+		NumFolders INTEGER NOT NULL,
+		NumDevices INTEGER NOT NULL,
+		TotFiles INTEGER NOT NULL,
+		FolderMaxFiles INTEGER NOT NULL,
+		TotMiB INTEGER NOT NULL,
+		FolderMaxMiB INTEGER NOT NULL,
+		MemoryUsageMiB INTEGER NOT NULL,
+		SHA256Perf DOUBLE PRECISION NOT NULL,
+		MemorySize INTEGER NOT NULL,
+		Date VARCHAR(8) NOT NULL
+	)`)
+	if err != nil {
+		return err
+	}
+
+	var t string
+	row := db.QueryRow(`SELECT 'UniqueIDIndex'::regclass`)
+	if err := row.Scan(&t); err != nil {
+		if _, err = db.Exec(`CREATE UNIQUE INDEX UniqueIDIndex ON Reports (Date, UniqueID)`); err != nil {
+			return err
+		}
+	}
+
+	row = db.QueryRow(`SELECT 'ReceivedIndex'::regclass`)
+	if err := row.Scan(&t); err != nil {
+		if _, err = db.Exec(`CREATE INDEX ReceivedIndex ON Reports (Received)`); err != nil {
+			return err
+		}
+	}
+
+	// V2
+
+	row = db.QueryRow(`SELECT attname FROM pg_attribute WHERE attrelid = (SELECT oid FROM pg_class WHERE relname = 'reports') AND attname = 'reportversion'`)
+	if err := row.Scan(&t); err != nil {
+		// The ReportVersion column doesn't exist; add the new columns.
+		_, err = db.Exec(`ALTER TABLE Reports
+		ADD COLUMN ReportVersion INTEGER NOT NULL DEFAULT 0,
+		ADD COLUMN NumCPU INTEGER NOT NULL DEFAULT 0,
+		ADD COLUMN FolderRO  INTEGER NOT NULL DEFAULT 0,
+		ADD COLUMN FolderIgnorePerms INTEGER NOT NULL DEFAULT 0,
+		ADD COLUMN FolderIgnoreDelete INTEGER NOT NULL DEFAULT 0,
+		ADD COLUMN FolderAutoNormalize INTEGER NOT NULL DEFAULT 0,
+		ADD COLUMN DeviceIntroducer INTEGER NOT NULL DEFAULT 0,
+		ADD COLUMN DeviceCustomCertName INTEGER NOT NULL DEFAULT 0,
+		ADD COLUMN DeviceCompressAlways INTEGER NOT NULL DEFAULT 0,
+		ADD COLUMN DeviceCompressMetadata INTEGER NOT NULL DEFAULT 0,
+		ADD COLUMN DeviceCompressNever INTEGER NOT NULL DEFAULT 0,
+		ADD COLUMN DeviceDynamicAddr INTEGER NOT NULL DEFAULT 0,
+		ADD COLUMN DeviceStaticAddr INTEGER NOT NULL DEFAULT 0,
+		ADD COLUMN AnnounceGlobalEnabled BOOLEAN NOT NULL DEFAULT FALSE,
+		ADD COLUMN AnnounceLocalEnabled BOOLEAN NOT NULL DEFAULT FALSE,
+		ADD COLUMN AnnounceDefaultServersDNS INTEGER NOT NULL DEFAULT 0,
+		ADD COLUMN AnnounceDefaultServersIP INTEGER NOT NULL DEFAULT 0,
+		ADD COLUMN AnnounceOtherServers INTEGER NOT NULL DEFAULT 0,
+		ADD COLUMN RelayEnabled BOOLEAN NOT NULL DEFAULT FALSE,
+		ADD COLUMN RelayDefaultServers INTEGER NOT NULL DEFAULT 0,
+		ADD COLUMN RelayOtherServers INTEGER NOT NULL DEFAULT 0,
+		ADD COLUMN RateLimitEnabled BOOLEAN NOT NULL DEFAULT FALSE,
+		ADD COLUMN UpgradeAllowedManual BOOLEAN NOT NULL DEFAULT FALSE,
+		ADD COLUMN UpgradeAllowedAuto BOOLEAN NOT NULL DEFAULT FALSE,
+		ADD COLUMN FolderSimpleVersioning INTEGER NOT NULL DEFAULT 0,
+		ADD COLUMN FolderExternalVersioning INTEGER NOT NULL DEFAULT 0,
+		ADD COLUMN FolderStaggeredVersioning INTEGER NOT NULL DEFAULT 0,
+		ADD COLUMN FolderTrashcanVersioning INTEGER NOT NULL DEFAULT 0
+		`)
+		if err != nil {
+			return err
+		}
+	}
+
+	row = db.QueryRow(`SELECT 'ReportVersionIndex'::regclass`)
+	if err := row.Scan(&t); err != nil {
+		if _, err = db.Exec(`CREATE INDEX ReportVersionIndex ON Reports (ReportVersion)`); err != nil {
+			return err
+		}
+	}
+
+	// V2.5
+
+	row = db.QueryRow(`SELECT attname FROM pg_attribute WHERE attrelid = (SELECT oid FROM pg_class WHERE relname = 'reports') AND attname = 'upgradeallowedpre'`)
+	if err := row.Scan(&t); err != nil {
+		// The ReportVersion column doesn't exist; add the new columns.
+		_, err = db.Exec(`ALTER TABLE Reports
+		ADD COLUMN UpgradeAllowedPre BOOLEAN NOT NULL DEFAULT FALSE,
+		ADD COLUMN RescanIntvs INT[] NOT NULL DEFAULT '{}'
+		`)
+		if err != nil {
+			return err
+		}
+	}
+
+	// V3
+
+	row = db.QueryRow(`SELECT attname FROM pg_attribute WHERE attrelid = (SELECT oid FROM pg_class WHERE relname = 'reports') AND attname = 'uptime'`)
+	if err := row.Scan(&t); err != nil {
+		// The Uptime column doesn't exist; add the new columns.
+		_, err = db.Exec(`ALTER TABLE Reports
+		ADD COLUMN Uptime INTEGER NOT NULL DEFAULT 0,
+		ADD COLUMN NATType VARCHAR(32) NOT NULL DEFAULT '',
+		ADD COLUMN AlwaysLocalNets BOOLEAN NOT NULL DEFAULT FALSE,
+		ADD COLUMN CacheIgnoredFiles BOOLEAN NOT NULL DEFAULT FALSE,
+		ADD COLUMN OverwriteRemoteDeviceNames BOOLEAN NOT NULL DEFAULT FALSE,
+		ADD COLUMN ProgressEmitterEnabled BOOLEAN NOT NULL DEFAULT FALSE,
+		ADD COLUMN CustomDefaultFolderPath BOOLEAN NOT NULL DEFAULT FALSE,
+		ADD COLUMN WeakHashSelection VARCHAR(32) NOT NULL DEFAULT '',
+		ADD COLUMN CustomTrafficClass BOOLEAN NOT NULL DEFAULT FALSE,
+		ADD COLUMN CustomTempIndexMinBlocks BOOLEAN NOT NULL DEFAULT FALSE,
+		ADD COLUMN TemporariesDisabled BOOLEAN NOT NULL DEFAULT FALSE,
+		ADD COLUMN TemporariesCustom BOOLEAN NOT NULL DEFAULT FALSE,
+		ADD COLUMN LimitBandwidthInLan BOOLEAN NOT NULL DEFAULT FALSE,
+		ADD COLUMN CustomReleaseURL BOOLEAN NOT NULL DEFAULT FALSE,
+		ADD COLUMN RestartOnWakeup BOOLEAN NOT NULL DEFAULT FALSE,
+		ADD COLUMN CustomStunServers BOOLEAN NOT NULL DEFAULT FALSE,
+
+		ADD COLUMN FolderScanProgressDisabled INTEGER NOT NULL DEFAULT 0,
+		ADD COLUMN FolderConflictsDisabled INTEGER NOT NULL DEFAULT 0,
+		ADD COLUMN FolderConflictsUnlimited INTEGER NOT NULL DEFAULT 0,
+		ADD COLUMN FolderConflictsOther INTEGER NOT NULL DEFAULT 0,
+		ADD COLUMN FolderDisableSparseFiles INTEGER NOT NULL DEFAULT 0,
+		ADD COLUMN FolderDisableTempIndexes INTEGER NOT NULL DEFAULT 0,
+		ADD COLUMN FolderAlwaysWeakHash INTEGER NOT NULL DEFAULT 0,
+		ADD COLUMN FolderCustomWeakHashThreshold INTEGER NOT NULL DEFAULT 0,
+		ADD COLUMN FolderFsWatcherEnabled INTEGER NOT NULL DEFAULT 0,
+		ADD COLUMN FolderPullOrder JSONB NOT NULL DEFAULT '{}',
+		ADD COLUMN FolderFilesystemType JSONB NOT NULL DEFAULT '{}',
+		ADD COLUMN FolderFsWatcherDelays INT[] NOT NULL DEFAULT '{}',
+
+		ADD COLUMN GUIEnabled INTEGER NOT NULL DEFAULT 0,
+		ADD COLUMN GUIUseTLS INTEGER NOT NULL DEFAULT 0,
+		ADD COLUMN GUIUseAuth INTEGER NOT NULL DEFAULT 0,
+		ADD COLUMN GUIInsecureAdminAccess INTEGER NOT NULL DEFAULT 0,
+		ADD COLUMN GUIDebugging INTEGER NOT NULL DEFAULT 0,
+		ADD COLUMN GUIInsecureSkipHostCheck INTEGER NOT NULL DEFAULT 0,
+		ADD COLUMN GUIInsecureAllowFrameLoading INTEGER NOT NULL DEFAULT 0,
+		ADD COLUMN GUIListenLocal INTEGER NOT NULL DEFAULT 0,
+		ADD COLUMN GUIListenUnspecified INTEGER NOT NULL DEFAULT 0,
+		ADD COLUMN GUITheme JSONB NOT NULL DEFAULT '{}',
+
+		ADD COLUMN BlocksTotal INTEGER NOT NULL DEFAULT 0,
+		ADD COLUMN BlocksRenamed INTEGER NOT NULL DEFAULT 0,
+		ADD COLUMN BlocksReused INTEGER NOT NULL DEFAULT 0,
+		ADD COLUMN BlocksPulled INTEGER NOT NULL DEFAULT 0,
+		ADD COLUMN BlocksCopyOrigin INTEGER NOT NULL DEFAULT 0,
+		ADD COLUMN BlocksCopyOriginShifted INTEGER NOT NULL DEFAULT 0,
+		ADD COLUMN BlocksCopyElsewhere INTEGER NOT NULL DEFAULT 0,
+
+		ADD COLUMN Transport JSONB NOT NULL DEFAULT '{}',
+
+		ADD COLUMN IgnoreLines INTEGER NOT NULL DEFAULT 0,
+		ADD COLUMN IgnoreInverts INTEGER NOT NULL DEFAULT 0,
+		ADD COLUMN IgnoreFolded INTEGER NOT NULL DEFAULT 0,
+		ADD COLUMN IgnoreDeletable INTEGER NOT NULL DEFAULT 0,
+		ADD COLUMN IgnoreRooted INTEGER NOT NULL DEFAULT 0,
+		ADD COLUMN IgnoreIncludes INTEGER NOT NULL DEFAULT 0,
+		ADD COLUMN IgnoreEscapedIncludes INTEGER NOT NULL DEFAULT 0,
+		ADD COLUMN IgnoreDoubleStars INTEGER NOT NULL DEFAULT 0,
+		ADD COLUMN IgnoreStars INTEGER NOT NULL DEFAULT 0
+		`)
+		if err != nil {
+			return err
+		}
+	}
+
+	// V3 added late in the RC
+
+	row = db.QueryRow(`SELECT attname FROM pg_attribute WHERE attrelid = (SELECT oid FROM pg_class WHERE relname = 'reports') AND attname = 'weakhashenabled'`)
+	if err := row.Scan(&t); err != nil {
+		// The WeakHashEnabled column doesn't exist; add the new columns.
+		_, err = db.Exec(`ALTER TABLE Reports
+		ADD COLUMN WeakHashEnabled BOOLEAN NOT NULL DEFAULT FALSE
+		ADD COLUMN Address VARCHAR(45) NOT NULL DEFAULT ''
+		`)
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func insertReport(db *sql.DB, r report) error {
+	r.Received = time.Now().UTC()
+	fields := r.FieldPointers()
+	params := make([]string, len(fields))
+	for i := range params {
+		params[i] = fmt.Sprintf("$%d", i+1)
+	}
+	query := "INSERT INTO Reports (" + strings.Join(r.FieldNames(), ", ") + ") VALUES (" + strings.Join(params, ", ") + ")"
+	_, err := db.Exec(query, fields...)
+
+	return err
+}
+
+type withDBFunc func(*sql.DB, http.ResponseWriter, *http.Request)
+
+func withDB(db *sql.DB, f withDBFunc) http.HandlerFunc {
+	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+		f(db, w, r)
+	})
+}
+
+func main() {
+	log.SetFlags(log.Ltime | log.Ldate | log.Lshortfile)
+	log.SetOutput(os.Stdout)
+
+	// Template
+
+	fd, err := os.Open("static/index.html")
+	if err != nil {
+		log.Fatalln("template:", err)
+	}
+	bs, err := ioutil.ReadAll(fd)
+	if err != nil {
+		log.Fatalln("template:", err)
+	}
+	fd.Close()
+	tpl = template.Must(template.New("index.html").Funcs(funcs).Parse(string(bs)))
+
+	// DB
+
+	db, err := sql.Open("postgres", dbConn)
+	if err != nil {
+		log.Fatalln("database:", err)
+	}
+	err = setupDB(db)
+	if err != nil {
+		log.Fatalln("database:", err)
+	}
+
+	// TLS & Listening
+
+	var listener net.Listener
+	if useHTTP {
+		listener, err = net.Listen("tcp", listenAddr)
+	} else {
+		cert, err := tls.LoadX509KeyPair(certFile, keyFile)
+		if err != nil {
+			log.Fatalln("tls:", err)
+		}
+
+		cfg := &tls.Config{
+			Certificates:           []tls.Certificate{cert},
+			SessionTicketsDisabled: true,
+		}
+		listener, err = tls.Listen("tcp", listenAddr, cfg)
+	}
+	if err != nil {
+		log.Fatalln("listen:", err)
+	}
+
+	srv := http.Server{
+		ReadTimeout:  5 * time.Second,
+		WriteTimeout: 15 * time.Second,
+	}
+
+	http.HandleFunc("/", withDB(db, rootHandler))
+	http.HandleFunc("/newdata", withDB(db, newDataHandler))
+	http.HandleFunc("/summary.json", withDB(db, summaryHandler))
+	http.HandleFunc("/movement.json", withDB(db, movementHandler))
+	http.HandleFunc("/performance.json", withDB(db, performanceHandler))
+	http.HandleFunc("/blockstats.json", withDB(db, blockStatsHandler))
+	http.Handle("/static/", http.StripPrefix("/static/", http.FileServer(http.Dir("static"))))
+
+	err = srv.Serve(listener)
+	if err != nil {
+		log.Fatalln("https:", err)
+	}
+}
+
+var (
+	cacheData []byte
+	cacheTime time.Time
+	cacheMut  sync.Mutex
+)
+
+const maxCacheTime = 5 * 60 * time.Second
+
+func rootHandler(db *sql.DB, w http.ResponseWriter, r *http.Request) {
+	if r.URL.Path == "/" || r.URL.Path == "/index.html" {
+		cacheMut.Lock()
+		defer cacheMut.Unlock()
+
+		if time.Since(cacheTime) > maxCacheTime {
+			rep := getReport(db)
+			buf := new(bytes.Buffer)
+			err := tpl.Execute(buf, rep)
+			if err != nil {
+				log.Println(err)
+				http.Error(w, "Template Error", http.StatusInternalServerError)
+				return
+			}
+			cacheData = buf.Bytes()
+			cacheTime = time.Now()
+		}
+
+		w.Header().Set("Content-Type", "text/html; charset=utf-8")
+		w.Write(cacheData)
+	} else {
+		http.Error(w, "Not found", 404)
+		return
+	}
+}
+
+func newDataHandler(db *sql.DB, w http.ResponseWriter, r *http.Request) {
+	defer r.Body.Close()
+
+	addr := r.Header.Get("X-Forwarded-For")
+	if addr != "" {
+		addr = strings.Split(addr, ", ")[0]
+	} else {
+		addr = r.RemoteAddr
+	}
+
+	if host, _, err := net.SplitHostPort(addr); err == nil {
+		addr = host
+	}
+
+	if net.ParseIP(addr) == nil {
+		addr = ""
+	}
+
+	var rep report
+	rep.Date = time.Now().UTC().Format("20060102")
+	rep.Address = addr
+
+	lr := &io.LimitedReader{R: r.Body, N: 40 * 1024}
+	bs, _ := ioutil.ReadAll(lr)
+	if err := json.Unmarshal(bs, &rep); err != nil {
+		log.Println("decode:", err)
+		if debug {
+			log.Printf("%s", bs)
+		}
+		http.Error(w, "JSON Decode Error", http.StatusInternalServerError)
+		return
+	}
+
+	if err := rep.Validate(); err != nil {
+		log.Println("validate:", err)
+		if debug {
+			log.Printf("%#v", rep)
+		}
+		http.Error(w, "Validation Error", http.StatusInternalServerError)
+		return
+	}
+
+	if err := insertReport(db, rep); err != nil {
+		log.Println("insert:", err)
+		if debug {
+			log.Printf("%#v", rep)
+		}
+		http.Error(w, "Database Error", http.StatusInternalServerError)
+		return
+	}
+}
+
+func summaryHandler(db *sql.DB, w http.ResponseWriter, r *http.Request) {
+	s, err := getSummary(db)
+	if err != nil {
+		log.Println("summaryHandler:", err)
+		http.Error(w, "Database Error", http.StatusInternalServerError)
+		return
+	}
+
+	bs, err := s.MarshalJSON()
+	if err != nil {
+		log.Println("summaryHandler:", err)
+		http.Error(w, "JSON Encode Error", http.StatusInternalServerError)
+		return
+	}
+
+	w.Header().Set("Content-Type", "application/json")
+	w.Write(bs)
+}
+
+func movementHandler(db *sql.DB, w http.ResponseWriter, r *http.Request) {
+	s, err := getMovement(db)
+	if err != nil {
+		log.Println("movementHandler:", err)
+		http.Error(w, "Database Error", http.StatusInternalServerError)
+		return
+	}
+
+	bs, err := json.Marshal(s)
+	if err != nil {
+		log.Println("movementHandler:", err)
+		http.Error(w, "JSON Encode Error", http.StatusInternalServerError)
+		return
+	}
+
+	w.Header().Set("Content-Type", "application/json")
+	w.Write(bs)
+}
+
+func performanceHandler(db *sql.DB, w http.ResponseWriter, r *http.Request) {
+	s, err := getPerformance(db)
+	if err != nil {
+		log.Println("performanceHandler:", err)
+		http.Error(w, "Database Error", http.StatusInternalServerError)
+		return
+	}
+
+	bs, err := json.Marshal(s)
+	if err != nil {
+		log.Println("performanceHandler:", err)
+		http.Error(w, "JSON Encode Error", http.StatusInternalServerError)
+		return
+	}
+
+	w.Header().Set("Content-Type", "application/json")
+	w.Write(bs)
+}
+
+func blockStatsHandler(db *sql.DB, w http.ResponseWriter, r *http.Request) {
+	s, err := getBlockStats(db)
+	if err != nil {
+		log.Println("blockStatsHandler:", err)
+		http.Error(w, "Database Error", http.StatusInternalServerError)
+		return
+	}
+
+	bs, err := json.Marshal(s)
+	if err != nil {
+		log.Println("blockStatsHandler:", err)
+		http.Error(w, "JSON Encode Error", http.StatusInternalServerError)
+		return
+	}
+
+	w.Header().Set("Content-Type", "application/json")
+	w.Write(bs)
+}
+
+type category struct {
+	Values [4]float64
+	Key    string
+	Descr  string
+	Unit   string
+	Type   NumberType
+}
+
+type feature struct {
+	Key     string
+	Version string
+	Count   int
+	Pct     float64
+}
+
+type featureGroup struct {
+	Key     string
+	Version string
+	Counts  map[string]int
+}
+
+// Used in the templates
+type counter struct {
+	n int
+}
+
+func (c *counter) Current() int {
+	return c.n
+}
+
+func (c *counter) Increment() string {
+	c.n++
+	return ""
+}
+
+func (c *counter) DrawTwoDivider() bool {
+	return c.n != 0 && c.n%2 == 0
+}
+
+// add sets a key in a nested map, initializing things if needed as we go.
+func add(storage map[string]map[string]int, parent, child string, value int) {
+	n, ok := storage[parent]
+	if !ok {
+		n = make(map[string]int)
+		storage[parent] = n
+	}
+	n[child] += value
+}
+
+// inc makes sure that even for unused features, we initialize them in the
+// feature map. Furthermore, this acts as a helper that accepts booleans
+// to increment by one, or integers to increment by that integer.
+func inc(storage map[string]int, key string, i interface{}) {
+	cv := storage[key]
+	switch v := i.(type) {
+	case bool:
+		if v {
+			cv++
+		}
+	case int:
+		cv += v
+	}
+	storage[key] = cv
+}
+
+type location struct {
+	Latitude  float64
+	Longitude float64
+}
+
+func getReport(db *sql.DB) map[string]interface{} {
+	geoip, err := geoip2.Open(geoIPPath)
+	if err != nil {
+		log.Println("opening geoip db", err)
+		geoip = nil
+	} else {
+		defer geoip.Close()
+	}
+
+	nodes := 0
+	countriesTotal := 0
+	var versions []string
+	var platforms []string
+	var numFolders []int
+	var numDevices []int
+	var totFiles []int
+	var maxFiles []int
+	var totMiB []int
+	var maxMiB []int
+	var memoryUsage []int
+	var sha256Perf []float64
+	var memorySize []int
+	var uptime []int
+	var compilers []string
+	var builders []string
+	locations := make(map[location]int)
+	countries := make(map[string]int)
+
+	reports := make(map[string]int)
+	totals := make(map[string]int)
+
+	// category -> version -> feature -> count
+	features := make(map[string]map[string]map[string]int)
+	// category -> version -> feature -> group -> count
+	featureGroups := make(map[string]map[string]map[string]map[string]int)
+	for _, category := range featureOrder {
+		features[category] = make(map[string]map[string]int)
+		featureGroups[category] = make(map[string]map[string]map[string]int)
+		for _, version := range knownVersions {
+			features[category][version] = make(map[string]int)
+			featureGroups[category][version] = make(map[string]map[string]int)
+		}
+	}
+
+	// Initialize some features that hide behind if conditions, and might not
+	// be initialized.
+	add(featureGroups["Various"]["v2"], "Upgrades", "Pre-release", 0)
+	add(featureGroups["Various"]["v2"], "Upgrades", "Automatic", 0)
+	add(featureGroups["Various"]["v2"], "Upgrades", "Manual", 0)
+	add(featureGroups["Various"]["v2"], "Upgrades", "Disabled", 0)
+	add(featureGroups["Various"]["v3"], "Temporary Retention", "Disabled", 0)
+	add(featureGroups["Various"]["v3"], "Temporary Retention", "Custom", 0)
+	add(featureGroups["Various"]["v3"], "Temporary Retention", "Default", 0)
+	add(featureGroups["Connection"]["v3"], "IP version", "IPv4", 0)
+	add(featureGroups["Connection"]["v3"], "IP version", "IPv6", 0)
+	add(featureGroups["Connection"]["v3"], "IP version", "Unknown", 0)
+
+	var numCPU []int
+
+	var rep report
+
+	rows, err := db.Query(`SELECT ` + strings.Join(rep.FieldNames(), ",") + ` FROM Reports WHERE Received > now() - '1 day'::INTERVAL`)
+	if err != nil {
+		log.Println("sql:", err)
+		return nil
+	}
+	defer rows.Close()
+
+	for rows.Next() {
+		err := rows.Scan(rep.FieldPointers()...)
+
+		if err != nil {
+			log.Println("sql:", err)
+			return nil
+		}
+
+		if geoip != nil && rep.Address != "" {
+			if addr, err := net.ResolveTCPAddr("tcp", net.JoinHostPort(rep.Address, "0")); err == nil {
+				city, err := geoip.City(addr.IP)
+				if err == nil {
+					loc := location{
+						Latitude:  city.Location.Latitude,
+						Longitude: city.Location.Longitude,
+					}
+					locations[loc]++
+					countries[city.Country.Names["en"]]++
+					countriesTotal++
+				}
+			}
+		}
+
+		nodes++
+		versions = append(versions, transformVersion(rep.Version))
+		platforms = append(platforms, rep.Platform)
+		if m := compilerRe.FindStringSubmatch(rep.LongVersion); len(m) == 3 {
+			compilers = append(compilers, m[1])
+			builders = append(builders, m[2])
+		}
+		if rep.NumFolders > 0 {
+			numFolders = append(numFolders, rep.NumFolders)
+		}
+		if rep.NumDevices > 0 {
+			numDevices = append(numDevices, rep.NumDevices)
+		}
+		if rep.TotFiles > 0 {
+			totFiles = append(totFiles, rep.TotFiles)
+		}
+		if rep.FolderMaxFiles > 0 {
+			maxFiles = append(maxFiles, rep.FolderMaxFiles)
+		}
+		if rep.TotMiB > 0 {
+			totMiB = append(totMiB, rep.TotMiB*(1<<20))
+		}
+		if rep.FolderMaxMiB > 0 {
+			maxMiB = append(maxMiB, rep.FolderMaxMiB*(1<<20))
+		}
+		if rep.MemoryUsageMiB > 0 {
+			memoryUsage = append(memoryUsage, rep.MemoryUsageMiB*(1<<20))
+		}
+		if rep.SHA256Perf > 0 {
+			sha256Perf = append(sha256Perf, rep.SHA256Perf*(1<<20))
+		}
+		if rep.MemorySize > 0 {
+			memorySize = append(memorySize, rep.MemorySize*(1<<20))
+		}
+		if rep.Uptime > 0 {
+			uptime = append(uptime, rep.Uptime)
+		}
+
+		totals["Device"] += rep.NumDevices
+		totals["Folder"] += rep.NumFolders
+
+		if rep.URVersion >= 2 {
+			reports["v2"]++
+			numCPU = append(numCPU, rep.NumCPU)
+
+			// Various
+			inc(features["Various"]["v2"], "Rate limiting", rep.UsesRateLimit)
+
+			if rep.UpgradeAllowedPre {
+				add(featureGroups["Various"]["v2"], "Upgrades", "Pre-release", 1)
+			} else if rep.UpgradeAllowedAuto {
+				add(featureGroups["Various"]["v2"], "Upgrades", "Automatic", 1)
+			} else if rep.UpgradeAllowedManual {
+				add(featureGroups["Various"]["v2"], "Upgrades", "Manual", 1)
+			} else {
+				add(featureGroups["Various"]["v2"], "Upgrades", "Disabled", 1)
+			}
+
+			// Folders
+			inc(features["Folder"]["v2"], "Automatic normalization", rep.FolderUses.AutoNormalize)
+			inc(features["Folder"]["v2"], "Ignore deletes", rep.FolderUses.IgnoreDelete)
+			inc(features["Folder"]["v2"], "Ignore permissions", rep.FolderUses.IgnorePerms)
+			inc(features["Folder"]["v2"], "Mode, send-only", rep.FolderUses.ReadOnly)
+
+			add(featureGroups["Folder"]["v2"], "Versioning", "Simple", rep.FolderUses.SimpleVersioning)
+			add(featureGroups["Folder"]["v2"], "Versioning", "External", rep.FolderUses.ExternalVersioning)
+			add(featureGroups["Folder"]["v2"], "Versioning", "Staggered", rep.FolderUses.StaggeredVersioning)
+			add(featureGroups["Folder"]["v2"], "Versioning", "Trashcan", rep.FolderUses.TrashcanVersioning)
+			add(featureGroups["Folder"]["v2"], "Versioning", "Disabled", rep.NumFolders-rep.FolderUses.SimpleVersioning-rep.FolderUses.ExternalVersioning-rep.FolderUses.StaggeredVersioning-rep.FolderUses.TrashcanVersioning)
+
+			// Device
+			inc(features["Device"]["v2"], "Custom certificate", rep.DeviceUses.CustomCertName)
+			inc(features["Device"]["v2"], "Introducer", rep.DeviceUses.Introducer)
+
+			add(featureGroups["Device"]["v2"], "Compress", "Always", rep.DeviceUses.CompressAlways)
+			add(featureGroups["Device"]["v2"], "Compress", "Metadata", rep.DeviceUses.CompressMetadata)
+			add(featureGroups["Device"]["v2"], "Compress", "Nothing", rep.DeviceUses.CompressNever)
+
+			add(featureGroups["Device"]["v2"], "Addresses", "Dynamic", rep.DeviceUses.DynamicAddr)
+			add(featureGroups["Device"]["v2"], "Addresses", "Static", rep.DeviceUses.StaticAddr)
+
+			// Connections
+			inc(features["Connection"]["v2"], "Relaying, enabled", rep.Relays.Enabled)
+			inc(features["Connection"]["v2"], "Discovery, global enabled", rep.Announce.GlobalEnabled)
+			inc(features["Connection"]["v2"], "Discovery, local enabled", rep.Announce.LocalEnabled)
+
+			add(featureGroups["Connection"]["v2"], "Discovery", "Default servers (using DNS)", rep.Announce.DefaultServersDNS)
+			add(featureGroups["Connection"]["v2"], "Discovery", "Default servers (using IP)", rep.Announce.DefaultServersIP)
+			add(featureGroups["Connection"]["v2"], "Discovery", "Other servers", rep.Announce.DefaultServersIP)
+
+			add(featureGroups["Connection"]["v2"], "Relaying", "Default relays", rep.Relays.DefaultServers)
+			add(featureGroups["Connection"]["v2"], "Relaying", "Other relays", rep.Relays.OtherServers)
+		}
+
+		if rep.URVersion >= 3 {
+			reports["v3"]++
+
+			inc(features["Various"]["v3"], "Custom LAN classification", rep.AlwaysLocalNets)
+			inc(features["Various"]["v3"], "Ignore caching", rep.CacheIgnoredFiles)
+			inc(features["Various"]["v3"], "Overwrite device names", rep.OverwriteRemoteDeviceNames)
+			inc(features["Various"]["v3"], "Download progress disabled", !rep.ProgressEmitterEnabled)
+			inc(features["Various"]["v3"], "Custom default path", rep.CustomDefaultFolderPath)
+			inc(features["Various"]["v3"], "Custom traffic class", rep.CustomTrafficClass)
+			inc(features["Various"]["v3"], "Custom temporary index threshold", rep.CustomTempIndexMinBlocks)
+			inc(features["Various"]["v3"], "Weak hash enabled", rep.WeakHashEnabled)
+			inc(features["Various"]["v3"], "LAN rate limiting", rep.LimitBandwidthInLan)
+			inc(features["Various"]["v3"], "Custom release server", rep.CustomReleaseURL)
+			inc(features["Various"]["v3"], "Restart after suspend", rep.RestartOnWakeup)
+			inc(features["Various"]["v3"], "Custom stun servers", rep.CustomStunServers)
+			inc(features["Various"]["v3"], "Ignore patterns", rep.IgnoreStats.Lines > 0)
+
+			if rep.NATType != "" {
+				natType := rep.NATType
+				natType = strings.Replace(natType, "unknown", "Unknown", -1)
+				natType = strings.Replace(natType, "Symetric", "Symmetric", -1)
+				add(featureGroups["Various"]["v3"], "NAT Type", natType, 1)
+			}
+
+			if rep.TemporariesDisabled {
+				add(featureGroups["Various"]["v3"], "Temporary Retention", "Disabled", 1)
+			} else if rep.TemporariesCustom {
+				add(featureGroups["Various"]["v3"], "Temporary Retention", "Custom", 1)
+			} else {
+				add(featureGroups["Various"]["v3"], "Temporary Retention", "Default", 1)
+			}
+
+			inc(features["Folder"]["v3"], "Scan progress disabled", rep.FolderUsesV3.ScanProgressDisabled)
+			inc(features["Folder"]["v3"], "Disable sharing of partial files", rep.FolderUsesV3.DisableTempIndexes)
+			inc(features["Folder"]["v3"], "Disable sparse files", rep.FolderUsesV3.DisableSparseFiles)
+			inc(features["Folder"]["v3"], "Weak hash, always", rep.FolderUsesV3.AlwaysWeakHash)
+			inc(features["Folder"]["v3"], "Weak hash, custom threshold", rep.FolderUsesV3.CustomWeakHashThreshold)
+			inc(features["Folder"]["v3"], "Filesystem watcher", rep.FolderUsesV3.FsWatcherEnabled)
+
+			add(featureGroups["Folder"]["v3"], "Conflicts", "Disabled", rep.FolderUsesV3.ConflictsDisabled)
+			add(featureGroups["Folder"]["v3"], "Conflicts", "Unlimited", rep.FolderUsesV3.ConflictsUnlimited)
+			add(featureGroups["Folder"]["v3"], "Conflicts", "Limited", rep.FolderUsesV3.ConflictsOther)
+
+			for key, value := range rep.FolderUsesV3.PullOrder {
+				add(featureGroups["Folder"]["v3"], "Pull Order", prettyCase(key), value)
+			}
+
+			totals["GUI"] += rep.GUIStats.Enabled
+
+			inc(features["GUI"]["v3"], "Auth Enabled", rep.GUIStats.UseAuth)
+			inc(features["GUI"]["v3"], "TLS Enabled", rep.GUIStats.UseTLS)
+			inc(features["GUI"]["v3"], "Insecure Admin Access", rep.GUIStats.InsecureAdminAccess)
+			inc(features["GUI"]["v3"], "Skip Host check", rep.GUIStats.InsecureSkipHostCheck)
+			inc(features["GUI"]["v3"], "Allow Frame loading", rep.GUIStats.InsecureAllowFrameLoading)
+
+			add(featureGroups["GUI"]["v3"], "Listen address", "Local", rep.GUIStats.ListenLocal)
+			add(featureGroups["GUI"]["v3"], "Listen address", "Unspecified", rep.GUIStats.ListenUnspecified)
+			add(featureGroups["GUI"]["v3"], "Listen address", "Other", rep.GUIStats.Enabled-rep.GUIStats.ListenLocal-rep.GUIStats.ListenUnspecified)
+
+			for theme, count := range rep.GUIStats.Theme {
+				add(featureGroups["GUI"]["v3"], "Theme", prettyCase(theme), count)
+			}
+
+			for transport, count := range rep.TransportStats {
+				add(featureGroups["Connection"]["v3"], "Transport", strings.Title(transport), count)
+				if strings.HasSuffix(transport, "4") {
+					add(featureGroups["Connection"]["v3"], "IP version", "IPv4", count)
+				} else if strings.HasSuffix(transport, "6") {
+					add(featureGroups["Connection"]["v3"], "IP version", "IPv6", count)
+				} else {
+					add(featureGroups["Connection"]["v3"], "IP version", "Unknown", count)
+				}
+			}
+		}
+	}
+
+	var categories []category
+	categories = append(categories, category{
+		Values: statsForInts(totFiles),
+		Descr:  "Files Managed per Device",
+	})
+
+	categories = append(categories, category{
+		Values: statsForInts(maxFiles),
+		Descr:  "Files in Largest Folder",
+	})
+
+	categories = append(categories, category{
+		Values: statsForInts(totMiB),
+		Descr:  "Data Managed per Device",
+		Unit:   "B",
+		Type:   NumberBinary,
+	})
+
+	categories = append(categories, category{
+		Values: statsForInts(maxMiB),
+		Descr:  "Data in Largest Folder",
+		Unit:   "B",
+		Type:   NumberBinary,
+	})
+
+	categories = append(categories, category{
+		Values: statsForInts(numDevices),
+		Descr:  "Number of Devices in Cluster",
+	})
+
+	categories = append(categories, category{
+		Values: statsForInts(numFolders),
+		Descr:  "Number of Folders Configured",
+	})
+
+	categories = append(categories, category{
+		Values: statsForInts(memoryUsage),
+		Descr:  "Memory Usage",
+		Unit:   "B",
+		Type:   NumberBinary,
+	})
+
+	categories = append(categories, category{
+		Values: statsForInts(memorySize),
+		Descr:  "System Memory",
+		Unit:   "B",
+		Type:   NumberBinary,
+	})
+
+	categories = append(categories, category{
+		Values: statsForFloats(sha256Perf),
+		Descr:  "SHA-256 Hashing Performance",
+		Unit:   "B/s",
+		Type:   NumberBinary,
+	})
+
+	categories = append(categories, category{
+		Values: statsForInts(numCPU),
+		Descr:  "Number of CPU cores",
+	})
+
+	categories = append(categories, category{
+		Values: statsForInts(uptime),
+		Descr:  "Uptime (v3)",
+		Type:   NumberDuration,
+	})
+
+	reportFeatures := make(map[string][]feature)
+	for featureType, versions := range features {
+		var featureList []feature
+		for version, featureMap := range versions {
+			// We count totals of the given feature type, for example number of
+			// folders or devices, if that doesn't exist, we work out percentage
+			// against the total of the version reports. Things like "Various"
+			// never have counts.
+			total, ok := totals[featureType]
+			if !ok {
+				total = reports[version]
+			}
+			for key, count := range featureMap {
+				featureList = append(featureList, feature{
+					Key:     key,
+					Version: version,
+					Count:   count,
+					Pct:     (100 * float64(count)) / float64(total),
+				})
+			}
+		}
+		sort.Sort(sort.Reverse(sortableFeatureList(featureList)))
+		reportFeatures[featureType] = featureList
+	}
+
+	reportFeatureGroups := make(map[string][]featureGroup)
+	for featureType, versions := range featureGroups {
+		var featureList []featureGroup
+		for version, featureMap := range versions {
+			for key, counts := range featureMap {
+				featureList = append(featureList, featureGroup{
+					Key:     key,
+					Version: version,
+					Counts:  counts,
+				})
+			}
+		}
+		reportFeatureGroups[featureType] = featureList
+	}
+
+	var countryList []feature
+	for country, count := range countries {
+		countryList = append(countryList, feature{
+			Key:   country,
+			Count: count,
+			Pct:   (100 * float64(count)) / float64(countriesTotal),
+		})
+		sort.Sort(sort.Reverse(sortableFeatureList(countryList)))
+	}
+
+	r := make(map[string]interface{})
+	r["features"] = reportFeatures
+	r["featureGroups"] = reportFeatureGroups
+	r["nodes"] = nodes
+	r["versionNodes"] = reports
+	r["categories"] = categories
+	r["versions"] = group(byVersion, analyticsFor(versions, 2000), 5)
+	r["versionPenetrations"] = penetrationLevels(analyticsFor(versions, 2000), []float64{50, 75, 90, 95})
+	r["platforms"] = group(byPlatform, analyticsFor(platforms, 2000), 5)
+	r["compilers"] = group(byCompiler, analyticsFor(compilers, 2000), 3)
+	r["builders"] = analyticsFor(builders, 12)
+	r["featureOrder"] = featureOrder
+	r["locations"] = locations
+	r["contries"] = countryList
+
+	return r
+}
+
+func ensureDir(dir string, mode int) {
+	fi, err := os.Stat(dir)
+	if os.IsNotExist(err) {
+		os.MkdirAll(dir, 0700)
+	} else if mode >= 0 && err == nil && int(fi.Mode()&0777) != mode {
+		os.Chmod(dir, os.FileMode(mode))
+	}
+}
+
+var (
+	plusRe  = regexp.MustCompile(`\+.*$`)
+	plusStr = "(+dev)"
+)
+
+// transformVersion returns a version number formatted correctly, with all
+// development versions aggregated into one.
+func transformVersion(v string) string {
+	if v == "unknown-dev" {
+		return v
+	}
+	if !strings.HasPrefix(v, "v") {
+		v = "v" + v
+	}
+	v = plusRe.ReplaceAllString(v, " "+plusStr)
+
+	return v
+}
+
+type summary struct {
+	versions map[string]int   // version string to count index
+	max      map[string]int   // version string to max users per day
+	rows     map[string][]int // date to list of counts
+}
+
+func newSummary() summary {
+	return summary{
+		versions: make(map[string]int),
+		max:      make(map[string]int),
+		rows:     make(map[string][]int),
+	}
+}
+
+func (s *summary) setCount(date, version string, count int) {
+	idx, ok := s.versions[version]
+	if !ok {
+		idx = len(s.versions)
+		s.versions[version] = idx
+	}
+
+	if s.max[version] < count {
+		s.max[version] = count
+	}
+
+	row := s.rows[date]
+	if len(row) <= idx {
+		old := row
+		row = make([]int, idx+1)
+		copy(row, old)
+		s.rows[date] = row
+	}
+
+	row[idx] = count
+}
+
+func (s *summary) MarshalJSON() ([]byte, error) {
+	var versions []string
+	for v := range s.versions {
+		versions = append(versions, v)
+	}
+	sort.Strings(versions)
+
+	var filtered []string
+	for _, v := range versions {
+		if s.max[v] > 50 {
+			filtered = append(filtered, v)
+		}
+	}
+	versions = filtered
+
+	headerRow := []interface{}{"Day"}
+	for _, v := range versions {
+		headerRow = append(headerRow, v)
+	}
+
+	var table [][]interface{}
+	table = append(table, headerRow)
+
+	var dates []string
+	for k := range s.rows {
+		dates = append(dates, k)
+	}
+	sort.Strings(dates)
+
+	for _, date := range dates {
+		row := []interface{}{date}
+		for _, ver := range versions {
+			idx := s.versions[ver]
+			if len(s.rows[date]) > idx && s.rows[date][idx] > 0 {
+				row = append(row, s.rows[date][idx])
+			} else {
+				row = append(row, nil)
+			}
+		}
+		table = append(table, row)
+	}
+
+	return json.Marshal(table)
+}
+
+func getSummary(db *sql.DB) (summary, error) {
+	s := newSummary()
+
+	rows, err := db.Query(`SELECT Day, Version, Count FROM VersionSummary WHERE Day > now() - '2 year'::INTERVAL;`)
+	if err != nil {
+		return summary{}, err
+	}
+	defer rows.Close()
+
+	for rows.Next() {
+		var day time.Time
+		var ver string
+		var num int
+		err := rows.Scan(&day, &ver, &num)
+		if err != nil {
+			return summary{}, err
+		}
+
+		if ver == "v0.0" {
+			// ?
+			continue
+		}
+
+		// SUPER UGLY HACK to avoid having to do sorting properly
+		if len(ver) == 4 { // v0.x
+			ver = ver[:3] + "0" + ver[3:] // now v0.0x
+		}
+
+		s.setCount(day.Format("2006-01-02"), ver, num)
+	}
+
+	return s, nil
+}
+
+func getMovement(db *sql.DB) ([][]interface{}, error) {
+	rows, err := db.Query(`SELECT Day, Added, Removed, Bounced FROM UserMovement WHERE Day > now() - '2 year'::INTERVAL ORDER BY Day`)
+	if err != nil {
+		return nil, err
+	}
+	defer rows.Close()
+
+	res := [][]interface{}{
+		{"Day", "Joined", "Left", "Bounced"},
+	}
+
+	for rows.Next() {
+		var day time.Time
+		var added, removed, bounced int
+		err := rows.Scan(&day, &added, &removed, &bounced)
+		if err != nil {
+			return nil, err
+		}
+
+		row := []interface{}{day.Format("2006-01-02"), added, -removed, bounced}
+		if removed == 0 {
+			row[2] = nil
+		}
+		if bounced == 0 {
+			row[3] = nil
+		}
+
+		res = append(res, row)
+	}
+
+	return res, nil
+}
+
+func getPerformance(db *sql.DB) ([][]interface{}, error) {
+	rows, err := db.Query(`SELECT Day, TotFiles, TotMiB, SHA256Perf, MemorySize, MemoryUsageMiB FROM Performance WHERE Day > '2014-06-20'::TIMESTAMP ORDER BY Day`)
+	if err != nil {
+		return nil, err
+	}
+	defer rows.Close()
+
+	res := [][]interface{}{
+		{"Day", "TotFiles", "TotMiB", "SHA256Perf", "MemorySize", "MemoryUsageMiB"},
+	}
+
+	for rows.Next() {
+		var day time.Time
+		var sha256Perf float64
+		var totFiles, totMiB, memorySize, memoryUsage int
+		err := rows.Scan(&day, &totFiles, &totMiB, &sha256Perf, &memorySize, &memoryUsage)
+		if err != nil {
+			return nil, err
+		}
+
+		row := []interface{}{day.Format("2006-01-02"), totFiles, totMiB, float64(int(sha256Perf*10)) / 10, memorySize, memoryUsage}
+		res = append(res, row)
+	}
+
+	return res, nil
+}
+
+func getBlockStats(db *sql.DB) ([][]interface{}, error) {
+	rows, err := db.Query(`SELECT Day, Reports, Pulled, Renamed, Reused, CopyOrigin, CopyOriginShifted, CopyElsewhere FROM BlockStats WHERE Day > '2017-10-23'::TIMESTAMP ORDER BY Day`)
+	if err != nil {
+		return nil, err
+	}
+	defer rows.Close()
+
+	res := [][]interface{}{
+		{"Day", "Number of Reports", "Transferred (GiB)", "Saved by renaming files (GiB)", "Saved by resuming transfer (GiB)", "Saved by reusing data from old file (GiB)", "Saved by reusing shifted data from old file (GiB)", "Saved by reusing data from other files (GiB)"},
+	}
+	blocksToGb := float64(8 * 1024)
+	for rows.Next() {
+		var day time.Time
+		var reports, pulled, renamed, reused, copyOrigin, copyOriginShifted, copyElsewhere float64
+		err := rows.Scan(&day, &reports, &pulled, &renamed, &reused, &copyOrigin, &copyOriginShifted, &copyElsewhere)
+		if err != nil {
+			return nil, err
+		}
+		row := []interface{}{
+			day.Format("2006-01-02"),
+			reports,
+			pulled / blocksToGb,
+			renamed / blocksToGb,
+			reused / blocksToGb,
+			copyOrigin / blocksToGb,
+			copyOriginShifted / blocksToGb,
+			copyElsewhere / blocksToGb,
+		}
+		res = append(res, row)
+	}
+
+	return res, nil
+}
+
+type sortableFeatureList []feature
+
+func (l sortableFeatureList) Len() int {
+	return len(l)
+}
+func (l sortableFeatureList) Swap(a, b int) {
+	l[a], l[b] = l[b], l[a]
+}
+func (l sortableFeatureList) Less(a, b int) bool {
+	if l[a].Pct != l[b].Pct {
+		return l[a].Pct < l[b].Pct
+	}
+	return l[a].Key > l[b].Key
+}
+
+func prettyCase(input string) string {
+	output := ""
+	for i, runeValue := range input {
+		if i == 0 {
+			runeValue = unicode.ToUpper(runeValue)
+		} else if unicode.IsUpper(runeValue) {
+			output += " "
+		}
+		output += string(runeValue)
+	}
+	return output
+}

BIN
cmd/ursrv/static/assets/img/favicon.png


Filskillnaden har hållts tillbaka eftersom den är för stor
+ 6 - 0
cmd/ursrv/static/bootstrap/css/bootstrap-theme.min.css


Filskillnaden har hållts tillbaka eftersom den är för stor
+ 6 - 0
cmd/ursrv/static/bootstrap/css/bootstrap.min.css


Filskillnaden har hållts tillbaka eftersom den är för stor
+ 6 - 0
cmd/ursrv/static/bootstrap/js/bootstrap.min.js


BIN
cmd/ursrv/static/fonts/glyphicons-halflings-regular.eot


+ 229 - 0
cmd/ursrv/static/fonts/glyphicons-halflings-regular.svg

@@ -0,0 +1,229 @@
+<?xml version="1.0" standalone="no"?>
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd" >
+<svg xmlns="http://www.w3.org/2000/svg">
+<metadata></metadata>
+<defs>
+<font id="glyphicons_halflingsregular" horiz-adv-x="1200" >
+<font-face units-per-em="1200" ascent="960" descent="-240" />
+<missing-glyph horiz-adv-x="500" />
+<glyph />
+<glyph />
+<glyph unicode="&#xd;" />
+<glyph unicode=" " />
+<glyph unicode="*" d="M100 500v200h259l-183 183l141 141l183 -183v259h200v-259l183 183l141 -141l-183 -183h259v-200h-259l183 -183l-141 -141l-183 183v-259h-200v259l-183 -183l-141 141l183 183h-259z" />
+<glyph unicode="+" d="M0 400v300h400v400h300v-400h400v-300h-400v-400h-300v400h-400z" />
+<glyph unicode="&#xa0;" />
+<glyph unicode="&#x2000;" horiz-adv-x="652" />
+<glyph unicode="&#x2001;" horiz-adv-x="1304" />
+<glyph unicode="&#x2002;" horiz-adv-x="652" />
+<glyph unicode="&#x2003;" horiz-adv-x="1304" />
+<glyph unicode="&#x2004;" horiz-adv-x="434" />
+<glyph unicode="&#x2005;" horiz-adv-x="326" />
+<glyph unicode="&#x2006;" horiz-adv-x="217" />
+<glyph unicode="&#x2007;" horiz-adv-x="217" />
+<glyph unicode="&#x2008;" horiz-adv-x="163" />
+<glyph unicode="&#x2009;" horiz-adv-x="260" />
+<glyph unicode="&#x200a;" horiz-adv-x="72" />
+<glyph unicode="&#x202f;" horiz-adv-x="260" />
+<glyph unicode="&#x205f;" horiz-adv-x="326" />
+<glyph unicode="&#x20ac;" d="M100 500l100 100h113q0 47 5 100h-218l100 100h135q37 167 112 257q117 141 297 141q242 0 354 -189q60 -103 66 -209h-181q0 55 -25.5 99t-63.5 68t-75 36.5t-67 12.5q-24 0 -52.5 -10t-62.5 -32t-65.5 -67t-50.5 -107h379l-100 -100h-300q-6 -46 -6 -100h406l-100 -100 h-300q9 -74 33 -132t52.5 -91t62 -54.5t59 -29t46.5 -7.5q29 0 66 13t75 37t63.5 67.5t25.5 96.5h174q-31 -172 -128 -278q-107 -117 -274 -117q-205 0 -324 158q-36 46 -69 131.5t-45 205.5h-217z" />
+<glyph unicode="&#x2212;" d="M200 400h900v300h-900v-300z" />
+<glyph unicode="&#x2601;" d="M-14 494q0 -80 56.5 -137t135.5 -57h750q120 0 205 86t85 208q0 120 -85 206.5t-205 86.5q-46 0 -90 -14q-44 97 -134.5 156.5t-200.5 59.5q-152 0 -260 -107.5t-108 -260.5q0 -25 2 -37q-66 -14 -108.5 -67.5t-42.5 -122.5z" />
+<glyph unicode="&#x2709;" d="M0 100l400 400l200 -200l200 200l400 -400h-1200zM0 300v600l300 -300zM0 1100l600 -603l600 603h-1200zM900 600l300 300v-600z" />
+<glyph unicode="&#x270f;" d="M-13 -13l333 112l-223 223zM187 403l214 -214l614 614l-214 214zM887 1103l214 -214l99 92q13 13 13 32.5t-13 33.5l-153 153q-15 13 -33 13t-33 -13z" />
+<glyph unicode="&#xe000;" horiz-adv-x="500" d="M0 0z" />
+<glyph unicode="&#xe001;" d="M0 1200h1200l-500 -550v-550h300v-100h-800v100h300v550z" />
+<glyph unicode="&#xe002;" d="M14 84q18 -55 86 -75.5t147 5.5q65 21 109 69t44 90v606l600 155v-521q-64 16 -138 -7q-79 -26 -122.5 -83t-25.5 -111q17 -55 85.5 -75.5t147.5 4.5q70 23 111.5 63.5t41.5 95.5v881q0 10 -7 15.5t-17 2.5l-752 -193q-10 -3 -17 -12.5t-7 -19.5v-689q-64 17 -138 -7 q-79 -25 -122.5 -82t-25.5 -112z" />
+<glyph unicode="&#xe003;" d="M23 693q0 200 142 342t342 142t342 -142t142 -342q0 -142 -78 -261l300 -300q7 -8 7 -18t-7 -18l-109 -109q-8 -7 -18 -7t-18 7l-300 300q-119 -78 -261 -78q-200 0 -342 142t-142 342zM176 693q0 -136 97 -233t234 -97t233.5 96.5t96.5 233.5t-96.5 233.5t-233.5 96.5 t-234 -97t-97 -233z" />
+<glyph unicode="&#xe005;" d="M100 784q0 64 28 123t73 100.5t104.5 64t119 20.5t120 -38.5t104.5 -104.5q48 69 109.5 105t121.5 38t118.5 -20.5t102.5 -64t71 -100.5t27 -123q0 -57 -33.5 -117.5t-94 -124.5t-126.5 -127.5t-150 -152.5t-146 -174q-62 85 -145.5 174t-149.5 152.5t-126.5 127.5 t-94 124.5t-33.5 117.5z" />
+<glyph unicode="&#xe006;" d="M-72 800h479l146 400h2l146 -400h472l-382 -278l145 -449l-384 275l-382 -275l146 447zM168 71l2 1z" />
+<glyph unicode="&#xe007;" d="M-72 800h479l146 400h2l146 -400h472l-382 -278l145 -449l-384 275l-382 -275l146 447zM168 71l2 1zM237 700l196 -142l-73 -226l192 140l195 -141l-74 229l193 140h-235l-77 211l-78 -211h-239z" />
+<glyph unicode="&#xe008;" d="M0 0v143l400 257v100q-37 0 -68.5 74.5t-31.5 125.5v200q0 124 88 212t212 88t212 -88t88 -212v-200q0 -51 -31.5 -125.5t-68.5 -74.5v-100l400 -257v-143h-1200z" />
+<glyph unicode="&#xe009;" d="M0 0v1100h1200v-1100h-1200zM100 100h100v100h-100v-100zM100 300h100v100h-100v-100zM100 500h100v100h-100v-100zM100 700h100v100h-100v-100zM100 900h100v100h-100v-100zM300 100h600v400h-600v-400zM300 600h600v400h-600v-400zM1000 100h100v100h-100v-100z M1000 300h100v100h-100v-100zM1000 500h100v100h-100v-100zM1000 700h100v100h-100v-100zM1000 900h100v100h-100v-100z" />
+<glyph unicode="&#xe010;" d="M0 50v400q0 21 14.5 35.5t35.5 14.5h400q21 0 35.5 -14.5t14.5 -35.5v-400q0 -21 -14.5 -35.5t-35.5 -14.5h-400q-21 0 -35.5 14.5t-14.5 35.5zM0 650v400q0 21 14.5 35.5t35.5 14.5h400q21 0 35.5 -14.5t14.5 -35.5v-400q0 -21 -14.5 -35.5t-35.5 -14.5h-400 q-21 0 -35.5 14.5t-14.5 35.5zM600 50v400q0 21 14.5 35.5t35.5 14.5h400q21 0 35.5 -14.5t14.5 -35.5v-400q0 -21 -14.5 -35.5t-35.5 -14.5h-400q-21 0 -35.5 14.5t-14.5 35.5zM600 650v400q0 21 14.5 35.5t35.5 14.5h400q21 0 35.5 -14.5t14.5 -35.5v-400 q0 -21 -14.5 -35.5t-35.5 -14.5h-400q-21 0 -35.5 14.5t-14.5 35.5z" />
+<glyph unicode="&#xe011;" d="M0 50v200q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5zM0 450v200q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200 q-21 0 -35.5 14.5t-14.5 35.5zM0 850v200q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5zM400 50v200q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5 t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5zM400 450v200q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5zM400 850v200q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5t14.5 -35.5 v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5zM800 50v200q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5zM800 450v200q0 21 14.5 35.5t35.5 14.5h200 q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5zM800 850v200q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5z" />
+<glyph unicode="&#xe012;" d="M0 50v200q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5zM0 450q0 -21 14.5 -35.5t35.5 -14.5h200q21 0 35.5 14.5t14.5 35.5v200q0 21 -14.5 35.5t-35.5 14.5h-200q-21 0 -35.5 -14.5 t-14.5 -35.5v-200zM0 850v200q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5zM400 50v200q0 21 14.5 35.5t35.5 14.5h700q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5 t-35.5 -14.5h-700q-21 0 -35.5 14.5t-14.5 35.5zM400 450v200q0 21 14.5 35.5t35.5 14.5h700q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-700q-21 0 -35.5 14.5t-14.5 35.5zM400 850v200q0 21 14.5 35.5t35.5 14.5h700q21 0 35.5 -14.5t14.5 -35.5 v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-700q-21 0 -35.5 14.5t-14.5 35.5z" />
+<glyph unicode="&#xe013;" d="M29 454l419 -420l818 820l-212 212l-607 -607l-206 207z" />
+<glyph unicode="&#xe014;" d="M106 318l282 282l-282 282l212 212l282 -282l282 282l212 -212l-282 -282l282 -282l-212 -212l-282 282l-282 -282z" />
+<glyph unicode="&#xe015;" d="M23 693q0 200 142 342t342 142t342 -142t142 -342q0 -142 -78 -261l300 -300q7 -8 7 -18t-7 -18l-109 -109q-8 -7 -18 -7t-18 7l-300 300q-119 -78 -261 -78q-200 0 -342 142t-142 342zM176 693q0 -136 97 -233t234 -97t233.5 96.5t96.5 233.5t-96.5 233.5t-233.5 96.5 t-234 -97t-97 -233zM300 600v200h100v100h200v-100h100v-200h-100v-100h-200v100h-100z" />
+<glyph unicode="&#xe016;" d="M23 694q0 200 142 342t342 142t342 -142t142 -342q0 -141 -78 -262l300 -299q7 -7 7 -18t-7 -18l-109 -109q-8 -8 -18 -8t-18 8l-300 299q-120 -77 -261 -77q-200 0 -342 142t-142 342zM176 694q0 -136 97 -233t234 -97t233.5 97t96.5 233t-96.5 233t-233.5 97t-234 -97 t-97 -233zM300 601h400v200h-400v-200z" />
+<glyph unicode="&#xe017;" d="M23 600q0 183 105 331t272 210v-166q-103 -55 -165 -155t-62 -220q0 -177 125 -302t302 -125t302 125t125 302q0 120 -62 220t-165 155v166q167 -62 272 -210t105 -331q0 -118 -45.5 -224.5t-123 -184t-184 -123t-224.5 -45.5t-224.5 45.5t-184 123t-123 184t-45.5 224.5 zM500 750q0 -21 14.5 -35.5t35.5 -14.5h100q21 0 35.5 14.5t14.5 35.5v400q0 21 -14.5 35.5t-35.5 14.5h-100q-21 0 -35.5 -14.5t-14.5 -35.5v-400z" />
+<glyph unicode="&#xe018;" d="M100 1h200v300h-200v-300zM400 1v500h200v-500h-200zM700 1v800h200v-800h-200zM1000 1v1200h200v-1200h-200z" />
+<glyph unicode="&#xe019;" d="M26 601q0 -33 6 -74l151 -38l2 -6q14 -49 38 -93l3 -5l-80 -134q45 -59 105 -105l133 81l5 -3q45 -26 94 -39l5 -2l38 -151q40 -5 74 -5q27 0 74 5l38 151l6 2q46 13 93 39l5 3l134 -81q56 44 104 105l-80 134l3 5q24 44 39 93l1 6l152 38q5 40 5 74q0 28 -5 73l-152 38 l-1 6q-16 51 -39 93l-3 5l80 134q-44 58 -104 105l-134 -81l-5 3q-45 25 -93 39l-6 1l-38 152q-40 5 -74 5q-27 0 -74 -5l-38 -152l-5 -1q-50 -14 -94 -39l-5 -3l-133 81q-59 -47 -105 -105l80 -134l-3 -5q-25 -47 -38 -93l-2 -6l-151 -38q-6 -48 -6 -73zM385 601 q0 88 63 151t152 63t152 -63t63 -151q0 -89 -63 -152t-152 -63t-152 63t-63 152z" />
+<glyph unicode="&#xe020;" d="M100 1025v50q0 10 7.5 17.5t17.5 7.5h275v100q0 41 29.5 70.5t70.5 29.5h300q41 0 70.5 -29.5t29.5 -70.5v-100h275q10 0 17.5 -7.5t7.5 -17.5v-50q0 -11 -7 -18t-18 -7h-1050q-11 0 -18 7t-7 18zM200 100v800h900v-800q0 -41 -29.5 -71t-70.5 -30h-700q-41 0 -70.5 30 t-29.5 71zM300 100h100v700h-100v-700zM500 100h100v700h-100v-700zM500 1100h300v100h-300v-100zM700 100h100v700h-100v-700zM900 100h100v700h-100v-700z" />
+<glyph unicode="&#xe021;" d="M1 601l656 644l644 -644h-200v-600h-300v400h-300v-400h-300v600h-200z" />
+<glyph unicode="&#xe022;" d="M100 25v1150q0 11 7 18t18 7h475v-500h400v-675q0 -11 -7 -18t-18 -7h-850q-11 0 -18 7t-7 18zM700 800v300l300 -300h-300z" />
+<glyph unicode="&#xe023;" d="M4 600q0 162 80 299t217 217t299 80t299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299zM186 600q0 -171 121.5 -292.5t292.5 -121.5t292.5 121.5t121.5 292.5t-121.5 292.5t-292.5 121.5t-292.5 -121.5t-121.5 -292.5zM500 500v400h100 v-300h200v-100h-300z" />
+<glyph unicode="&#xe024;" d="M-100 0l431 1200h209l-21 -300h162l-20 300h208l431 -1200h-538l-41 400h-242l-40 -400h-539zM488 500h224l-27 300h-170z" />
+<glyph unicode="&#xe025;" d="M0 0v400h490l-290 300h200v500h300v-500h200l-290 -300h490v-400h-1100zM813 200h175v100h-175v-100z" />
+<glyph unicode="&#xe026;" d="M1 600q0 122 47.5 233t127.5 191t191 127.5t233 47.5t233 -47.5t191 -127.5t127.5 -191t47.5 -233t-47.5 -233t-127.5 -191t-191 -127.5t-233 -47.5t-233 47.5t-191 127.5t-127.5 191t-47.5 233zM188 600q0 -170 121 -291t291 -121t291 121t121 291t-121 291t-291 121 t-291 -121t-121 -291zM350 600h150v300h200v-300h150l-250 -300z" />
+<glyph unicode="&#xe027;" d="M4 600q0 162 80 299t217 217t299 80t299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299zM186 600q0 -171 121.5 -292.5t292.5 -121.5t292.5 121.5t121.5 292.5t-121.5 292.5t-292.5 121.5t-292.5 -121.5t-121.5 -292.5zM350 600l250 300 l250 -300h-150v-300h-200v300h-150z" />
+<glyph unicode="&#xe028;" d="M0 25v475l200 700h800q199 -700 200 -700v-475q0 -11 -7 -18t-18 -7h-1150q-11 0 -18 7t-7 18zM200 500h200l50 -200h300l50 200h200l-97 500h-606z" />
+<glyph unicode="&#xe029;" d="M4 600q0 162 80 299t217 217t299 80t299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299zM186 600q0 -172 121.5 -293t292.5 -121t292.5 121t121.5 293q0 171 -121.5 292.5t-292.5 121.5t-292.5 -121.5t-121.5 -292.5zM500 397v401 l297 -200z" />
+<glyph unicode="&#xe030;" d="M23 600q0 -118 45.5 -224.5t123 -184t184 -123t224.5 -45.5t224.5 45.5t184 123t123 184t45.5 224.5h-150q0 -177 -125 -302t-302 -125t-302 125t-125 302t125 302t302 125q136 0 246 -81l-146 -146h400v400l-145 -145q-157 122 -355 122q-118 0 -224.5 -45.5t-184 -123 t-123 -184t-45.5 -224.5z" />
+<glyph unicode="&#xe031;" d="M23 600q0 118 45.5 224.5t123 184t184 123t224.5 45.5q198 0 355 -122l145 145v-400h-400l147 147q-112 80 -247 80q-177 0 -302 -125t-125 -302h-150zM100 0v400h400l-147 -147q112 -80 247 -80q177 0 302 125t125 302h150q0 -118 -45.5 -224.5t-123 -184t-184 -123 t-224.5 -45.5q-198 0 -355 122z" />
+<glyph unicode="&#xe032;" d="M100 0h1100v1200h-1100v-1200zM200 100v900h900v-900h-900zM300 200v100h100v-100h-100zM300 400v100h100v-100h-100zM300 600v100h100v-100h-100zM300 800v100h100v-100h-100zM500 200h500v100h-500v-100zM500 400v100h500v-100h-500zM500 600v100h500v-100h-500z M500 800v100h500v-100h-500z" />
+<glyph unicode="&#xe033;" d="M0 100v600q0 41 29.5 70.5t70.5 29.5h100v200q0 82 59 141t141 59h300q82 0 141 -59t59 -141v-200h100q41 0 70.5 -29.5t29.5 -70.5v-600q0 -41 -29.5 -70.5t-70.5 -29.5h-900q-41 0 -70.5 29.5t-29.5 70.5zM400 800h300v150q0 21 -14.5 35.5t-35.5 14.5h-200 q-21 0 -35.5 -14.5t-14.5 -35.5v-150z" />
+<glyph unicode="&#xe034;" d="M100 0v1100h100v-1100h-100zM300 400q60 60 127.5 84t127.5 17.5t122 -23t119 -30t110 -11t103 42t91 120.5v500q-40 -81 -101.5 -115.5t-127.5 -29.5t-138 25t-139.5 40t-125.5 25t-103 -29.5t-65 -115.5v-500z" />
+<glyph unicode="&#xe035;" d="M0 275q0 -11 7 -18t18 -7h50q11 0 18 7t7 18v300q0 127 70.5 231.5t184.5 161.5t245 57t245 -57t184.5 -161.5t70.5 -231.5v-300q0 -11 7 -18t18 -7h50q11 0 18 7t7 18v300q0 116 -49.5 227t-131 192.5t-192.5 131t-227 49.5t-227 -49.5t-192.5 -131t-131 -192.5 t-49.5 -227v-300zM200 20v460q0 8 6 14t14 6h160q8 0 14 -6t6 -14v-460q0 -8 -6 -14t-14 -6h-160q-8 0 -14 6t-6 14zM800 20v460q0 8 6 14t14 6h160q8 0 14 -6t6 -14v-460q0 -8 -6 -14t-14 -6h-160q-8 0 -14 6t-6 14z" />
+<glyph unicode="&#xe036;" d="M0 400h300l300 -200v800l-300 -200h-300v-400zM688 459l141 141l-141 141l71 71l141 -141l141 141l71 -71l-141 -141l141 -141l-71 -71l-141 141l-141 -141z" />
+<glyph unicode="&#xe037;" d="M0 400h300l300 -200v800l-300 -200h-300v-400zM700 857l69 53q111 -135 111 -310q0 -169 -106 -302l-67 54q86 110 86 248q0 146 -93 257z" />
+<glyph unicode="&#xe038;" d="M0 401v400h300l300 200v-800l-300 200h-300zM702 858l69 53q111 -135 111 -310q0 -170 -106 -303l-67 55q86 110 86 248q0 145 -93 257zM889 951l7 -8q123 -151 123 -344q0 -189 -119 -339l-7 -8l81 -66l6 8q142 178 142 405q0 230 -144 408l-6 8z" />
+<glyph unicode="&#xe039;" d="M0 0h500v500h-200v100h-100v-100h-200v-500zM0 600h100v100h400v100h100v100h-100v300h-500v-600zM100 100v300h300v-300h-300zM100 800v300h300v-300h-300zM200 200v100h100v-100h-100zM200 900h100v100h-100v-100zM500 500v100h300v-300h200v-100h-100v-100h-200v100 h-100v100h100v200h-200zM600 0v100h100v-100h-100zM600 1000h100v-300h200v-300h300v200h-200v100h200v500h-600v-200zM800 800v300h300v-300h-300zM900 0v100h300v-100h-300zM900 900v100h100v-100h-100zM1100 200v100h100v-100h-100z" />
+<glyph unicode="&#xe040;" d="M0 200h100v1000h-100v-1000zM100 0v100h300v-100h-300zM200 200v1000h100v-1000h-100zM500 0v91h100v-91h-100zM500 200v1000h200v-1000h-200zM700 0v91h100v-91h-100zM800 200v1000h100v-1000h-100zM900 0v91h200v-91h-200zM1000 200v1000h200v-1000h-200z" />
+<glyph unicode="&#xe041;" d="M1 700v475q0 10 7.5 17.5t17.5 7.5h474l700 -700l-500 -500zM148 953q0 -42 29 -71q30 -30 71.5 -30t71.5 30q29 29 29 71t-29 71q-30 30 -71.5 30t-71.5 -30q-29 -29 -29 -71z" />
+<glyph unicode="&#xe042;" d="M2 700v475q0 11 7 18t18 7h474l700 -700l-500 -500zM148 953q0 -42 30 -71q29 -30 71 -30t71 30q30 29 30 71t-30 71q-29 30 -71 30t-71 -30q-30 -29 -30 -71zM701 1200h100l700 -700l-500 -500l-50 50l450 450z" />
+<glyph unicode="&#xe043;" d="M100 0v1025l175 175h925v-1000l-100 -100v1000h-750l-100 -100h750v-1000h-900z" />
+<glyph unicode="&#xe044;" d="M200 0l450 444l450 -443v1150q0 20 -14.5 35t-35.5 15h-800q-21 0 -35.5 -15t-14.5 -35v-1151z" />
+<glyph unicode="&#xe045;" d="M0 100v700h200l100 -200h600l100 200h200v-700h-200v200h-800v-200h-200zM253 829l40 -124h592l62 124l-94 346q-2 11 -10 18t-18 7h-450q-10 0 -18 -7t-10 -18zM281 24l38 152q2 10 11.5 17t19.5 7h500q10 0 19.5 -7t11.5 -17l38 -152q2 -10 -3.5 -17t-15.5 -7h-600 q-10 0 -15.5 7t-3.5 17z" />
+<glyph unicode="&#xe046;" d="M0 200q0 -41 29.5 -70.5t70.5 -29.5h1000q41 0 70.5 29.5t29.5 70.5v600q0 41 -29.5 70.5t-70.5 29.5h-150q-4 8 -11.5 21.5t-33 48t-53 61t-69 48t-83.5 21.5h-200q-41 0 -82 -20.5t-70 -50t-52 -59t-34 -50.5l-12 -20h-150q-41 0 -70.5 -29.5t-29.5 -70.5v-600z M356 500q0 100 72 172t172 72t172 -72t72 -172t-72 -172t-172 -72t-172 72t-72 172zM494 500q0 -44 31 -75t75 -31t75 31t31 75t-31 75t-75 31t-75 -31t-31 -75zM900 700v100h100v-100h-100z" />
+<glyph unicode="&#xe047;" d="M53 0h365v66q-41 0 -72 11t-49 38t1 71l92 234h391l82 -222q16 -45 -5.5 -88.5t-74.5 -43.5v-66h417v66q-34 1 -74 43q-18 19 -33 42t-21 37l-6 13l-385 998h-93l-399 -1006q-24 -48 -52 -75q-12 -12 -33 -25t-36 -20l-15 -7v-66zM416 521l178 457l46 -140l116 -317h-340 z" />
+<glyph unicode="&#xe048;" d="M100 0v89q41 7 70.5 32.5t29.5 65.5v827q0 28 -1 39.5t-5.5 26t-15.5 21t-29 14t-49 14.5v70h471q120 0 213 -88t93 -228q0 -55 -11.5 -101.5t-28 -74t-33.5 -47.5t-28 -28l-12 -7q8 -3 21.5 -9t48 -31.5t60.5 -58t47.5 -91.5t21.5 -129q0 -84 -59 -156.5t-142 -111 t-162 -38.5h-500zM400 200h161q89 0 153 48.5t64 132.5q0 90 -62.5 154.5t-156.5 64.5h-159v-400zM400 700h139q76 0 130 61.5t54 138.5q0 82 -84 130.5t-239 48.5v-379z" />
+<glyph unicode="&#xe049;" d="M200 0v57q77 7 134.5 40.5t65.5 80.5l173 849q10 56 -10 74t-91 37q-6 1 -10.5 2.5t-9.5 2.5v57h425l2 -57q-33 -8 -62 -25.5t-46 -37t-29.5 -38t-17.5 -30.5l-5 -12l-128 -825q-10 -52 14 -82t95 -36v-57h-500z" />
+<glyph unicode="&#xe050;" d="M-75 200h75v800h-75l125 167l125 -167h-75v-800h75l-125 -167zM300 900v300h150h700h150v-300h-50q0 29 -8 48.5t-18.5 30t-33.5 15t-39.5 5.5t-50.5 1h-200v-850l100 -50v-100h-400v100l100 50v850h-200q-34 0 -50.5 -1t-40 -5.5t-33.5 -15t-18.5 -30t-8.5 -48.5h-49z " />
+<glyph unicode="&#xe051;" d="M33 51l167 125v-75h800v75l167 -125l-167 -125v75h-800v-75zM100 901v300h150h700h150v-300h-50q0 29 -8 48.5t-18 30t-33.5 15t-40 5.5t-50.5 1h-200v-650l100 -50v-100h-400v100l100 50v650h-200q-34 0 -50.5 -1t-39.5 -5.5t-33.5 -15t-18.5 -30t-8 -48.5h-50z" />
+<glyph unicode="&#xe052;" d="M0 50q0 -20 14.5 -35t35.5 -15h1100q21 0 35.5 15t14.5 35v100q0 21 -14.5 35.5t-35.5 14.5h-1100q-21 0 -35.5 -14.5t-14.5 -35.5v-100zM0 350q0 -20 14.5 -35t35.5 -15h800q21 0 35.5 15t14.5 35v100q0 21 -14.5 35.5t-35.5 14.5h-800q-21 0 -35.5 -14.5t-14.5 -35.5 v-100zM0 650q0 -20 14.5 -35t35.5 -15h1000q21 0 35.5 15t14.5 35v100q0 21 -14.5 35.5t-35.5 14.5h-1000q-21 0 -35.5 -14.5t-14.5 -35.5v-100zM0 950q0 -20 14.5 -35t35.5 -15h600q21 0 35.5 15t14.5 35v100q0 21 -14.5 35.5t-35.5 14.5h-600q-21 0 -35.5 -14.5 t-14.5 -35.5v-100z" />
+<glyph unicode="&#xe053;" d="M0 50q0 -20 14.5 -35t35.5 -15h1100q21 0 35.5 15t14.5 35v100q0 21 -14.5 35.5t-35.5 14.5h-1100q-21 0 -35.5 -14.5t-14.5 -35.5v-100zM0 650q0 -20 14.5 -35t35.5 -15h1100q21 0 35.5 15t14.5 35v100q0 21 -14.5 35.5t-35.5 14.5h-1100q-21 0 -35.5 -14.5t-14.5 -35.5 v-100zM200 350q0 -20 14.5 -35t35.5 -15h700q21 0 35.5 15t14.5 35v100q0 21 -14.5 35.5t-35.5 14.5h-700q-21 0 -35.5 -14.5t-14.5 -35.5v-100zM200 950q0 -20 14.5 -35t35.5 -15h700q21 0 35.5 15t14.5 35v100q0 21 -14.5 35.5t-35.5 14.5h-700q-21 0 -35.5 -14.5 t-14.5 -35.5v-100z" />
+<glyph unicode="&#xe054;" d="M0 50v100q0 21 14.5 35.5t35.5 14.5h1100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-1100q-21 0 -35.5 15t-14.5 35zM100 650v100q0 21 14.5 35.5t35.5 14.5h1000q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-1000q-21 0 -35.5 15 t-14.5 35zM300 350v100q0 21 14.5 35.5t35.5 14.5h800q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-800q-21 0 -35.5 15t-14.5 35zM500 950v100q0 21 14.5 35.5t35.5 14.5h600q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-600 q-21 0 -35.5 15t-14.5 35z" />
+<glyph unicode="&#xe055;" d="M0 50v100q0 21 14.5 35.5t35.5 14.5h1100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-1100q-21 0 -35.5 15t-14.5 35zM0 350v100q0 21 14.5 35.5t35.5 14.5h1100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-1100q-21 0 -35.5 15 t-14.5 35zM0 650v100q0 21 14.5 35.5t35.5 14.5h1100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-1100q-21 0 -35.5 15t-14.5 35zM0 950v100q0 21 14.5 35.5t35.5 14.5h1100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-1100 q-21 0 -35.5 15t-14.5 35z" />
+<glyph unicode="&#xe056;" d="M0 50v100q0 21 14.5 35.5t35.5 14.5h100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-100q-21 0 -35.5 15t-14.5 35zM0 350v100q0 21 14.5 35.5t35.5 14.5h100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-100q-21 0 -35.5 15 t-14.5 35zM0 650v100q0 21 14.5 35.5t35.5 14.5h100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-100q-21 0 -35.5 15t-14.5 35zM0 950v100q0 21 14.5 35.5t35.5 14.5h100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-100q-21 0 -35.5 15 t-14.5 35zM300 50v100q0 21 14.5 35.5t35.5 14.5h800q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-800q-21 0 -35.5 15t-14.5 35zM300 350v100q0 21 14.5 35.5t35.5 14.5h800q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-800 q-21 0 -35.5 15t-14.5 35zM300 650v100q0 21 14.5 35.5t35.5 14.5h800q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-800q-21 0 -35.5 15t-14.5 35zM300 950v100q0 21 14.5 35.5t35.5 14.5h800q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15 h-800q-21 0 -35.5 15t-14.5 35z" />
+<glyph unicode="&#xe057;" d="M-101 500v100h201v75l166 -125l-166 -125v75h-201zM300 0h100v1100h-100v-1100zM500 50q0 -20 14.5 -35t35.5 -15h600q20 0 35 15t15 35v100q0 21 -15 35.5t-35 14.5h-600q-21 0 -35.5 -14.5t-14.5 -35.5v-100zM500 350q0 -20 14.5 -35t35.5 -15h300q20 0 35 15t15 35 v100q0 21 -15 35.5t-35 14.5h-300q-21 0 -35.5 -14.5t-14.5 -35.5v-100zM500 650q0 -20 14.5 -35t35.5 -15h500q20 0 35 15t15 35v100q0 21 -15 35.5t-35 14.5h-500q-21 0 -35.5 -14.5t-14.5 -35.5v-100zM500 950q0 -20 14.5 -35t35.5 -15h100q20 0 35 15t15 35v100 q0 21 -15 35.5t-35 14.5h-100q-21 0 -35.5 -14.5t-14.5 -35.5v-100z" />
+<glyph unicode="&#xe058;" d="M1 50q0 -20 14.5 -35t35.5 -15h600q20 0 35 15t15 35v100q0 21 -15 35.5t-35 14.5h-600q-21 0 -35.5 -14.5t-14.5 -35.5v-100zM1 350q0 -20 14.5 -35t35.5 -15h300q20 0 35 15t15 35v100q0 21 -15 35.5t-35 14.5h-300q-21 0 -35.5 -14.5t-14.5 -35.5v-100zM1 650 q0 -20 14.5 -35t35.5 -15h500q20 0 35 15t15 35v100q0 21 -15 35.5t-35 14.5h-500q-21 0 -35.5 -14.5t-14.5 -35.5v-100zM1 950q0 -20 14.5 -35t35.5 -15h100q20 0 35 15t15 35v100q0 21 -15 35.5t-35 14.5h-100q-21 0 -35.5 -14.5t-14.5 -35.5v-100zM801 0v1100h100v-1100 h-100zM934 550l167 -125v75h200v100h-200v75z" />
+<glyph unicode="&#xe059;" d="M0 275v650q0 31 22 53t53 22h750q31 0 53 -22t22 -53v-650q0 -31 -22 -53t-53 -22h-750q-31 0 -53 22t-22 53zM900 600l300 300v-600z" />
+<glyph unicode="&#xe060;" d="M0 44v1012q0 18 13 31t31 13h1112q19 0 31.5 -13t12.5 -31v-1012q0 -18 -12.5 -31t-31.5 -13h-1112q-18 0 -31 13t-13 31zM100 263l247 182l298 -131l-74 156l293 318l236 -288v500h-1000v-737zM208 750q0 56 39 95t95 39t95 -39t39 -95t-39 -95t-95 -39t-95 39t-39 95z " />
+<glyph unicode="&#xe062;" d="M148 745q0 124 60.5 231.5t165 172t226.5 64.5q123 0 227 -63t164.5 -169.5t60.5 -229.5t-73 -272q-73 -114 -166.5 -237t-150.5 -189l-57 -66q-10 9 -27 26t-66.5 70.5t-96 109t-104 135.5t-100.5 155q-63 139 -63 262zM342 772q0 -107 75.5 -182.5t181.5 -75.5 q107 0 182.5 75.5t75.5 182.5t-75.5 182t-182.5 75t-182 -75.5t-75 -181.5z" />
+<glyph unicode="&#xe063;" d="M1 600q0 122 47.5 233t127.5 191t191 127.5t233 47.5t233 -47.5t191 -127.5t127.5 -191t47.5 -233t-47.5 -233t-127.5 -191t-191 -127.5t-233 -47.5t-233 47.5t-191 127.5t-127.5 191t-47.5 233zM173 600q0 -177 125.5 -302t301.5 -125v854q-176 0 -301.5 -125 t-125.5 -302z" />
+<glyph unicode="&#xe064;" d="M117 406q0 94 34 186t88.5 172.5t112 159t115 177t87.5 194.5q21 -71 57.5 -142.5t76 -130.5t83 -118.5t82 -117t70 -116t50 -125.5t18.5 -136q0 -89 -39 -165.5t-102 -126.5t-140 -79.5t-156 -33.5q-114 6 -211.5 53t-161.5 138.5t-64 210.5zM243 414q14 -82 59.5 -136 t136.5 -80l16 98q-7 6 -18 17t-34 48t-33 77q-15 73 -14 143.5t10 122.5l9 51q-92 -110 -119.5 -185t-12.5 -156z" />
+<glyph unicode="&#xe065;" d="M0 400v300q0 165 117.5 282.5t282.5 117.5q366 -6 397 -14l-186 -186h-311q-41 0 -70.5 -29.5t-29.5 -70.5v-500q0 -41 29.5 -70.5t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5v125l200 200v-225q0 -165 -117.5 -282.5t-282.5 -117.5h-300q-165 0 -282.5 117.5 t-117.5 282.5zM436 341l161 50l412 412l-114 113l-405 -405zM995 1015l113 -113l113 113l-21 85l-92 28z" />
+<glyph unicode="&#xe066;" d="M0 400v300q0 165 117.5 282.5t282.5 117.5h261l2 -80q-133 -32 -218 -120h-145q-41 0 -70.5 -29.5t-29.5 -70.5v-500q0 -41 29.5 -70.5t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5l200 153v-53q0 -165 -117.5 -282.5t-282.5 -117.5h-300q-165 0 -282.5 117.5t-117.5 282.5 zM423 524q30 38 81.5 64t103 35.5t99 14t77.5 3.5l29 -1v-209l360 324l-359 318v-216q-7 0 -19 -1t-48 -8t-69.5 -18.5t-76.5 -37t-76.5 -59t-62 -88t-39.5 -121.5z" />
+<glyph unicode="&#xe067;" d="M0 400v300q0 165 117.5 282.5t282.5 117.5h300q60 0 127 -23l-178 -177h-349q-41 0 -70.5 -29.5t-29.5 -70.5v-500q0 -41 29.5 -70.5t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5v69l200 200v-169q0 -165 -117.5 -282.5t-282.5 -117.5h-300q-165 0 -282.5 117.5 t-117.5 282.5zM342 632l283 -284l566 567l-136 137l-430 -431l-147 147z" />
+<glyph unicode="&#xe068;" d="M0 603l300 296v-198h200v200h-200l300 300l295 -300h-195v-200h200v198l300 -296l-300 -300v198h-200v-200h195l-295 -300l-300 300h200v200h-200v-198z" />
+<glyph unicode="&#xe069;" d="M200 50v1000q0 21 14.5 35.5t35.5 14.5h100q21 0 35.5 -14.5t14.5 -35.5v-437l500 487v-1100l-500 488v-438q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5z" />
+<glyph unicode="&#xe070;" d="M0 50v1000q0 21 14.5 35.5t35.5 14.5h100q21 0 35.5 -14.5t14.5 -35.5v-437l500 487v-487l500 487v-1100l-500 488v-488l-500 488v-438q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5z" />
+<glyph unicode="&#xe071;" d="M136 550l564 550v-487l500 487v-1100l-500 488v-488z" />
+<glyph unicode="&#xe072;" d="M200 0l900 550l-900 550v-1100z" />
+<glyph unicode="&#xe073;" d="M200 150q0 -21 14.5 -35.5t35.5 -14.5h200q21 0 35.5 14.5t14.5 35.5v800q0 21 -14.5 35.5t-35.5 14.5h-200q-21 0 -35.5 -14.5t-14.5 -35.5v-800zM600 150q0 -21 14.5 -35.5t35.5 -14.5h200q21 0 35.5 14.5t14.5 35.5v800q0 21 -14.5 35.5t-35.5 14.5h-200 q-21 0 -35.5 -14.5t-14.5 -35.5v-800z" />
+<glyph unicode="&#xe074;" d="M200 150q0 -20 14.5 -35t35.5 -15h800q21 0 35.5 15t14.5 35v800q0 21 -14.5 35.5t-35.5 14.5h-800q-21 0 -35.5 -14.5t-14.5 -35.5v-800z" />
+<glyph unicode="&#xe075;" d="M0 0v1100l500 -487v487l564 -550l-564 -550v488z" />
+<glyph unicode="&#xe076;" d="M0 0v1100l500 -487v487l500 -487v437q0 21 14.5 35.5t35.5 14.5h100q21 0 35.5 -14.5t14.5 -35.5v-1000q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5v438l-500 -488v488z" />
+<glyph unicode="&#xe077;" d="M300 0v1100l500 -487v437q0 21 14.5 35.5t35.5 14.5h100q21 0 35.5 -14.5t14.5 -35.5v-1000q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5v438z" />
+<glyph unicode="&#xe078;" d="M100 250v100q0 21 14.5 35.5t35.5 14.5h1000q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-1000q-21 0 -35.5 14.5t-14.5 35.5zM100 500h1100l-550 564z" />
+<glyph unicode="&#xe079;" d="M185 599l592 -592l240 240l-353 353l353 353l-240 240z" />
+<glyph unicode="&#xe080;" d="M272 194l353 353l-353 353l241 240l572 -571l21 -22l-1 -1v-1l-592 -591z" />
+<glyph unicode="&#xe081;" d="M3 600q0 162 80 299.5t217.5 217.5t299.5 80t299.5 -80t217.5 -217.5t80 -299.5t-80 -300t-217.5 -218t-299.5 -80t-299.5 80t-217.5 218t-80 300zM300 500h200v-200h200v200h200v200h-200v200h-200v-200h-200v-200z" />
+<glyph unicode="&#xe082;" d="M3 600q0 162 80 299.5t217.5 217.5t299.5 80t299.5 -80t217.5 -217.5t80 -299.5t-80 -300t-217.5 -218t-299.5 -80t-299.5 80t-217.5 218t-80 300zM300 500h600v200h-600v-200z" />
+<glyph unicode="&#xe083;" d="M3 600q0 162 80 299.5t217.5 217.5t299.5 80t299.5 -80t217.5 -217.5t80 -299.5t-80 -300t-217.5 -218t-299.5 -80t-299.5 80t-217.5 218t-80 300zM246 459l213 -213l141 142l141 -142l213 213l-142 141l142 141l-213 212l-141 -141l-141 142l-212 -213l141 -141z" />
+<glyph unicode="&#xe084;" d="M3 600q0 162 80 299.5t217.5 217.5t299.5 80t299.5 -80t217.5 -217.5t80 -299.5t-80 -299.5t-217.5 -217.5t-299.5 -80t-299.5 80t-217.5 217.5t-80 299.5zM270 551l276 -277l411 411l-175 174l-236 -236l-102 102z" />
+<glyph unicode="&#xe085;" d="M3 600q0 162 80 299.5t217.5 217.5t299.5 80t299.5 -80t217.5 -217.5t80 -299.5t-80 -300t-217.5 -218t-299.5 -80t-299.5 80t-217.5 218t-80 300zM363 700h144q4 0 11.5 -1t11 -1t6.5 3t3 9t1 11t3.5 8.5t3.5 6t5.5 4t6.5 2.5t9 1.5t9 0.5h11.5h12.5q19 0 30 -10t11 -26 q0 -22 -4 -28t-27 -22q-5 -1 -12.5 -3t-27 -13.5t-34 -27t-26.5 -46t-11 -68.5h200q5 3 14 8t31.5 25.5t39.5 45.5t31 69t14 94q0 51 -17.5 89t-42 58t-58.5 32t-58.5 15t-51.5 3q-105 0 -172 -56t-67 -183zM500 300h200v100h-200v-100z" />
+<glyph unicode="&#xe086;" d="M3 600q0 162 80 299.5t217.5 217.5t299.5 80t299.5 -80t217.5 -217.5t80 -299.5t-80 -300t-217.5 -218t-299.5 -80t-299.5 80t-217.5 218t-80 300zM400 300h400v100h-100v300h-300v-100h100v-200h-100v-100zM500 800h200v100h-200v-100z" />
+<glyph unicode="&#xe087;" d="M0 500v200h194q15 60 36 104.5t55.5 86t88 69t126.5 40.5v200h200v-200q54 -20 113 -60t112.5 -105.5t71.5 -134.5h203v-200h-203q-25 -102 -116.5 -186t-180.5 -117v-197h-200v197q-140 27 -208 102.5t-98 200.5h-194zM290 500q24 -73 79.5 -127.5t130.5 -78.5v206h200 v-206q149 48 201 206h-201v200h200q-25 74 -76 127.5t-124 76.5v-204h-200v203q-75 -24 -130 -77.5t-79 -125.5h209v-200h-210z" />
+<glyph unicode="&#xe088;" d="M4 600q0 162 80 299t217 217t299 80t299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299zM186 600q0 -171 121.5 -292.5t292.5 -121.5t292.5 121.5t121.5 292.5t-121.5 292.5t-292.5 121.5t-292.5 -121.5t-121.5 -292.5zM356 465l135 135 l-135 135l109 109l135 -135l135 135l109 -109l-135 -135l135 -135l-109 -109l-135 135l-135 -135z" />
+<glyph unicode="&#xe089;" d="M4 600q0 162 80 299t217 217t299 80t299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299zM186 600q0 -171 121.5 -292.5t292.5 -121.5t292.5 121.5t121.5 292.5t-121.5 292.5t-292.5 121.5t-292.5 -121.5t-121.5 -292.5zM322 537l141 141 l87 -87l204 205l142 -142l-346 -345z" />
+<glyph unicode="&#xe090;" d="M4 600q0 162 80 299t217 217t299 80t299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299zM186 600q0 -115 62 -215l568 567q-100 62 -216 62q-171 0 -292.5 -121.5t-121.5 -292.5zM391 245q97 -59 209 -59q171 0 292.5 121.5t121.5 292.5 q0 112 -59 209z" />
+<glyph unicode="&#xe091;" d="M0 547l600 453v-300h600v-300h-600v-301z" />
+<glyph unicode="&#xe092;" d="M0 400v300h600v300l600 -453l-600 -448v301h-600z" />
+<glyph unicode="&#xe093;" d="M204 600l450 600l444 -600h-298v-600h-300v600h-296z" />
+<glyph unicode="&#xe094;" d="M104 600h296v600h300v-600h298l-449 -600z" />
+<glyph unicode="&#xe095;" d="M0 200q6 132 41 238.5t103.5 193t184 138t271.5 59.5v271l600 -453l-600 -448v301q-95 -2 -183 -20t-170 -52t-147 -92.5t-100 -135.5z" />
+<glyph unicode="&#xe096;" d="M0 0v400l129 -129l294 294l142 -142l-294 -294l129 -129h-400zM635 777l142 -142l294 294l129 -129v400h-400l129 -129z" />
+<glyph unicode="&#xe097;" d="M34 176l295 295l-129 129h400v-400l-129 130l-295 -295zM600 600v400l129 -129l295 295l142 -141l-295 -295l129 -130h-400z" />
+<glyph unicode="&#xe101;" d="M23 600q0 118 45.5 224.5t123 184t184 123t224.5 45.5t224.5 -45.5t184 -123t123 -184t45.5 -224.5t-45.5 -224.5t-123 -184t-184 -123t-224.5 -45.5t-224.5 45.5t-184 123t-123 184t-45.5 224.5zM456 851l58 -302q4 -20 21.5 -34.5t37.5 -14.5h54q20 0 37.5 14.5 t21.5 34.5l58 302q4 20 -8 34.5t-33 14.5h-207q-20 0 -32 -14.5t-8 -34.5zM500 300h200v100h-200v-100z" />
+<glyph unicode="&#xe102;" d="M0 800h100v-200h400v300h200v-300h400v200h100v100h-111v6t-1 15t-3 18l-34 172q-11 39 -41.5 63t-69.5 24q-32 0 -61 -17l-239 -144q-22 -13 -40 -35q-19 24 -40 36l-238 144q-33 18 -62 18q-39 0 -69.5 -23t-40.5 -61l-35 -177q-2 -8 -3 -18t-1 -15v-6h-111v-100z M100 0h400v400h-400v-400zM200 900q-3 0 14 48t35 96l18 47l214 -191h-281zM700 0v400h400v-400h-400zM731 900l202 197q5 -12 12 -32.5t23 -64t25 -72t7 -28.5h-269z" />
+<glyph unicode="&#xe103;" d="M0 -22v143l216 193q-9 53 -13 83t-5.5 94t9 113t38.5 114t74 124q47 60 99.5 102.5t103 68t127.5 48t145.5 37.5t184.5 43.5t220 58.5q0 -189 -22 -343t-59 -258t-89 -181.5t-108.5 -120t-122 -68t-125.5 -30t-121.5 -1.5t-107.5 12.5t-87.5 17t-56.5 7.5l-99 -55z M238.5 300.5q19.5 -6.5 86.5 76.5q55 66 367 234q70 38 118.5 69.5t102 79t99 111.5t86.5 148q22 50 24 60t-6 19q-7 5 -17 5t-26.5 -14.5t-33.5 -39.5q-35 -51 -113.5 -108.5t-139.5 -89.5l-61 -32q-369 -197 -458 -401q-48 -111 -28.5 -117.5z" />
+<glyph unicode="&#xe104;" d="M111 408q0 -33 5 -63q9 -56 44 -119.5t105 -108.5q31 -21 64 -16t62 23.5t57 49.5t48 61.5t35 60.5q32 66 39 184.5t-13 157.5q79 -80 122 -164t26 -184q-5 -33 -20.5 -69.5t-37.5 -80.5q-10 -19 -14.5 -29t-12 -26t-9 -23.5t-3 -19t2.5 -15.5t11 -9.5t19.5 -5t30.5 2.5 t42 8q57 20 91 34t87.5 44.5t87 64t65.5 88.5t47 122q38 172 -44.5 341.5t-246.5 278.5q22 -44 43 -129q39 -159 -32 -154q-15 2 -33 9q-79 33 -120.5 100t-44 175.5t48.5 257.5q-13 -8 -34 -23.5t-72.5 -66.5t-88.5 -105.5t-60 -138t-8 -166.5q2 -12 8 -41.5t8 -43t6 -39.5 t3.5 -39.5t-1 -33.5t-6 -31.5t-13.5 -24t-21 -20.5t-31 -12q-38 -10 -67 13t-40.5 61.5t-15 81.5t10.5 75q-52 -46 -83.5 -101t-39 -107t-7.5 -85z" />
+<glyph unicode="&#xe105;" d="M-61 600l26 40q6 10 20 30t49 63.5t74.5 85.5t97 90t116.5 83.5t132.5 59t145.5 23.5t145.5 -23.5t132.5 -59t116.5 -83.5t97 -90t74.5 -85.5t49 -63.5t20 -30l26 -40l-26 -40q-6 -10 -20 -30t-49 -63.5t-74.5 -85.5t-97 -90t-116.5 -83.5t-132.5 -59t-145.5 -23.5 t-145.5 23.5t-132.5 59t-116.5 83.5t-97 90t-74.5 85.5t-49 63.5t-20 30zM120 600q7 -10 40.5 -58t56 -78.5t68 -77.5t87.5 -75t103 -49.5t125 -21.5t123.5 20t100.5 45.5t85.5 71.5t66.5 75.5t58 81.5t47 66q-1 1 -28.5 37.5t-42 55t-43.5 53t-57.5 63.5t-58.5 54 q49 -74 49 -163q0 -124 -88 -212t-212 -88t-212 88t-88 212q0 85 46 158q-102 -87 -226 -258zM377 656q49 -124 154 -191l105 105q-37 24 -75 72t-57 84l-20 36z" />
+<glyph unicode="&#xe106;" d="M-61 600l26 40q6 10 20 30t49 63.5t74.5 85.5t97 90t116.5 83.5t132.5 59t145.5 23.5q61 0 121 -17l37 142h148l-314 -1200h-148l37 143q-82 21 -165 71.5t-140 102t-109.5 112t-72 88.5t-29.5 43zM120 600q210 -282 393 -336l37 141q-107 18 -178.5 101.5t-71.5 193.5 q0 85 46 158q-102 -87 -226 -258zM377 656q49 -124 154 -191l47 47l23 87q-30 28 -59 69t-44 68l-14 26zM780 161l38 145q22 15 44.5 34t46 44t40.5 44t41 50.5t33.5 43.5t33 44t24.5 34q-97 127 -140 175l39 146q67 -54 131.5 -125.5t87.5 -103.5t36 -52l26 -40l-26 -40 q-7 -12 -25.5 -38t-63.5 -79.5t-95.5 -102.5t-124 -100t-146.5 -79z" />
+<glyph unicode="&#xe107;" d="M-97.5 34q13.5 -34 50.5 -34h1294q37 0 50.5 35.5t-7.5 67.5l-642 1056q-20 33 -48 36t-48 -29l-642 -1066q-21 -32 -7.5 -66zM155 200l445 723l445 -723h-345v100h-200v-100h-345zM500 600l100 -300l100 300v100h-200v-100z" />
+<glyph unicode="&#xe108;" d="M100 262v41q0 20 11 44.5t26 38.5l363 325v339q0 62 44 106t106 44t106 -44t44 -106v-339l363 -325q15 -14 26 -38.5t11 -44.5v-41q0 -20 -12 -26.5t-29 5.5l-359 249v-263q100 -91 100 -113v-64q0 -21 -13 -29t-32 1l-94 78h-222l-94 -78q-19 -9 -32 -1t-13 29v64 q0 22 100 113v263l-359 -249q-17 -12 -29 -5.5t-12 26.5z" />
+<glyph unicode="&#xe109;" d="M0 50q0 -20 14.5 -35t35.5 -15h1000q21 0 35.5 15t14.5 35v750h-1100v-750zM0 900h1100v150q0 21 -14.5 35.5t-35.5 14.5h-150v100h-100v-100h-500v100h-100v-100h-150q-21 0 -35.5 -14.5t-14.5 -35.5v-150zM100 100v100h100v-100h-100zM100 300v100h100v-100h-100z M100 500v100h100v-100h-100zM300 100v100h100v-100h-100zM300 300v100h100v-100h-100zM300 500v100h100v-100h-100zM500 100v100h100v-100h-100zM500 300v100h100v-100h-100zM500 500v100h100v-100h-100zM700 100v100h100v-100h-100zM700 300v100h100v-100h-100zM700 500 v100h100v-100h-100zM900 100v100h100v-100h-100zM900 300v100h100v-100h-100zM900 500v100h100v-100h-100z" />
+<glyph unicode="&#xe110;" d="M0 200v200h259l600 600h241v198l300 -295l-300 -300v197h-159l-600 -600h-341zM0 800h259l122 -122l141 142l-181 180h-341v-200zM678 381l141 142l122 -123h159v198l300 -295l-300 -300v197h-241z" />
+<glyph unicode="&#xe111;" d="M0 400v600q0 41 29.5 70.5t70.5 29.5h1000q41 0 70.5 -29.5t29.5 -70.5v-600q0 -41 -29.5 -70.5t-70.5 -29.5h-596l-304 -300v300h-100q-41 0 -70.5 29.5t-29.5 70.5z" />
+<glyph unicode="&#xe112;" d="M100 600v200h300v-250q0 -113 6 -145q17 -92 102 -117q39 -11 92 -11q37 0 66.5 5.5t50 15.5t36 24t24 31.5t14 37.5t7 42t2.5 45t0 47v25v250h300v-200q0 -42 -3 -83t-15 -104t-31.5 -116t-58 -109.5t-89 -96.5t-129 -65.5t-174.5 -25.5t-174.5 25.5t-129 65.5t-89 96.5 t-58 109.5t-31.5 116t-15 104t-3 83zM100 900v300h300v-300h-300zM800 900v300h300v-300h-300z" />
+<glyph unicode="&#xe113;" d="M-30 411l227 -227l352 353l353 -353l226 227l-578 579z" />
+<glyph unicode="&#xe114;" d="M70 797l580 -579l578 579l-226 227l-353 -353l-352 353z" />
+<glyph unicode="&#xe115;" d="M-198 700l299 283l300 -283h-203v-400h385l215 -200h-800v600h-196zM402 1000l215 -200h381v-400h-198l299 -283l299 283h-200v600h-796z" />
+<glyph unicode="&#xe116;" d="M18 939q-5 24 10 42q14 19 39 19h896l38 162q5 17 18.5 27.5t30.5 10.5h94q20 0 35 -14.5t15 -35.5t-15 -35.5t-35 -14.5h-54l-201 -961q-2 -4 -6 -10.5t-19 -17.5t-33 -11h-31v-50q0 -20 -14.5 -35t-35.5 -15t-35.5 15t-14.5 35v50h-300v-50q0 -20 -14.5 -35t-35.5 -15 t-35.5 15t-14.5 35v50h-50q-21 0 -35.5 15t-14.5 35q0 21 14.5 35.5t35.5 14.5h535l48 200h-633q-32 0 -54.5 21t-27.5 43z" />
+<glyph unicode="&#xe117;" d="M0 0v800h1200v-800h-1200zM0 900v100h200q0 41 29.5 70.5t70.5 29.5h300q41 0 70.5 -29.5t29.5 -70.5h500v-100h-1200z" />
+<glyph unicode="&#xe118;" d="M1 0l300 700h1200l-300 -700h-1200zM1 400v600h200q0 41 29.5 70.5t70.5 29.5h300q41 0 70.5 -29.5t29.5 -70.5h500v-200h-1000z" />
+<glyph unicode="&#xe119;" d="M302 300h198v600h-198l298 300l298 -300h-198v-600h198l-298 -300z" />
+<glyph unicode="&#xe120;" d="M0 600l300 298v-198h600v198l300 -298l-300 -297v197h-600v-197z" />
+<glyph unicode="&#xe121;" d="M0 100v100q0 41 29.5 70.5t70.5 29.5h1000q41 0 70.5 -29.5t29.5 -70.5v-100q0 -41 -29.5 -70.5t-70.5 -29.5h-1000q-41 0 -70.5 29.5t-29.5 70.5zM31 400l172 739q5 22 23 41.5t38 19.5h672q19 0 37.5 -22.5t23.5 -45.5l172 -732h-1138zM800 100h100v100h-100v-100z M1000 100h100v100h-100v-100z" />
+<glyph unicode="&#xe122;" d="M-101 600v50q0 24 25 49t50 38l25 13v-250l-11 5.5t-24 14t-30 21.5t-24 27.5t-11 31.5zM99 500v250v5q0 13 0.5 18.5t2.5 13t8 10.5t15 3h200l675 250v-850l-675 200h-38l47 -276q2 -12 -3 -17.5t-11 -6t-21 -0.5h-8h-83q-20 0 -34.5 14t-18.5 35q-56 337 -56 351z M1100 200v850q0 21 14.5 35.5t35.5 14.5q20 0 35 -14.5t15 -35.5v-850q0 -20 -15 -35t-35 -15q-21 0 -35.5 15t-14.5 35z" />
+<glyph unicode="&#xe123;" d="M74 350q0 21 13.5 35.5t33.5 14.5h17l118 173l63 327q15 77 76 140t144 83l-18 32q-6 19 3 32t29 13h94q20 0 29 -10.5t3 -29.5l-18 -37q83 -19 144 -82.5t76 -140.5l63 -327l118 -173h17q20 0 33.5 -14.5t13.5 -35.5q0 -20 -13 -40t-31 -27q-22 -9 -63 -23t-167.5 -37 t-251.5 -23t-245.5 20.5t-178.5 41.5l-58 20q-18 7 -31 27.5t-13 40.5zM497 110q12 -49 40 -79.5t63 -30.5t63 30.5t39 79.5q-48 -6 -102 -6t-103 6z" />
+<glyph unicode="&#xe124;" d="M21 445l233 -45l-78 -224l224 78l45 -233l155 179l155 -179l45 233l224 -78l-78 224l234 45l-180 155l180 156l-234 44l78 225l-224 -78l-45 233l-155 -180l-155 180l-45 -233l-224 78l78 -225l-233 -44l179 -156z" />
+<glyph unicode="&#xe125;" d="M0 200h200v600h-200v-600zM300 275q0 -75 100 -75h61q123 -100 139 -100h250q46 0 83 57l238 344q29 31 29 74v100q0 44 -30.5 84.5t-69.5 40.5h-328q28 118 28 125v150q0 44 -30.5 84.5t-69.5 40.5h-50q-27 0 -51 -20t-38 -48l-96 -198l-145 -196q-20 -26 -20 -63v-400z M400 300v375l150 212l100 213h50v-175l-50 -225h450v-125l-250 -375h-214l-136 100h-100z" />
+<glyph unicode="&#xe126;" d="M0 400v600h200v-600h-200zM300 525v400q0 75 100 75h61q123 100 139 100h250q46 0 83 -57l238 -344q29 -31 29 -74v-100q0 -44 -30.5 -84.5t-69.5 -40.5h-328q28 -118 28 -125v-150q0 -44 -30.5 -84.5t-69.5 -40.5h-50q-27 0 -51 20t-38 48l-96 198l-145 196 q-20 26 -20 63zM400 525l150 -212l100 -213h50v175l-50 225h450v125l-250 375h-214l-136 -100h-100v-375z" />
+<glyph unicode="&#xe127;" d="M8 200v600h200v-600h-200zM308 275v525q0 17 14 35.5t28 28.5l14 9l362 230q14 6 25 6q17 0 29 -12l109 -112q14 -14 14 -34q0 -18 -11 -32l-85 -121h302q85 0 138.5 -38t53.5 -110t-54.5 -111t-138.5 -39h-107l-130 -339q-7 -22 -20.5 -41.5t-28.5 -19.5h-341 q-7 0 -90 81t-83 94zM408 289l100 -89h293l131 339q6 21 19.5 41t28.5 20h203q16 0 25 15t9 36q0 20 -9 34.5t-25 14.5h-457h-6.5h-7.5t-6.5 0.5t-6 1t-5 1.5t-5.5 2.5t-4 4t-4 5.5q-5 12 -5 20q0 14 10 27l147 183l-86 83l-339 -236v-503z" />
+<glyph unicode="&#xe128;" d="M-101 651q0 72 54 110t139 37h302l-85 121q-11 16 -11 32q0 21 14 34l109 113q13 12 29 12q11 0 25 -6l365 -230q7 -4 16.5 -10.5t26 -26t16.5 -36.5v-526q0 -13 -85.5 -93.5t-93.5 -80.5h-342q-15 0 -28.5 20t-19.5 41l-131 339h-106q-84 0 -139 39t-55 111zM-1 601h222 q15 0 28.5 -20.5t19.5 -40.5l131 -339h293l106 89v502l-342 237l-87 -83l145 -184q10 -11 10 -26q0 -11 -5 -20q-1 -3 -3.5 -5.5l-4 -4t-5 -2.5t-5.5 -1.5t-6.5 -1t-6.5 -0.5h-7.5h-6.5h-476v-100zM999 201v600h200v-600h-200z" />
+<glyph unicode="&#xe129;" d="M97 719l230 -363q4 -6 10.5 -15.5t26 -25t36.5 -15.5h525q13 0 94 83t81 90v342q0 15 -20 28.5t-41 19.5l-339 131v106q0 84 -39 139t-111 55t-110 -53.5t-38 -138.5v-302l-121 84q-15 12 -33.5 11.5t-32.5 -13.5l-112 -110q-22 -22 -6 -53zM172 739l83 86l183 -146 q22 -18 47 -5q3 1 5.5 3.5l4 4t2.5 5t1.5 5.5t1 6.5t0.5 6v7.5v7v456q0 22 25 31t50 -0.5t25 -30.5v-202q0 -16 20 -29.5t41 -19.5l339 -130v-294l-89 -100h-503zM400 0v200h600v-200h-600z" />
+<glyph unicode="&#xe130;" d="M1 585q-15 -31 7 -53l112 -110q13 -13 32 -13.5t34 10.5l121 85l-1 -302q0 -84 38.5 -138t110.5 -54t111 55t39 139v106l339 131q20 6 40.5 19.5t20.5 28.5v342q0 7 -81 90t-94 83h-525q-17 0 -35.5 -14t-28.5 -28l-10 -15zM76 565l237 339h503l89 -100v-294l-340 -130 q-20 -6 -40 -20t-20 -29v-202q0 -22 -25 -31t-50 0t-25 31v456v14.5t-1.5 11.5t-5 12t-9.5 7q-24 13 -46 -5l-184 -146zM305 1104v200h600v-200h-600z" />
+<glyph unicode="&#xe131;" d="M5 597q0 122 47.5 232.5t127.5 190.5t190.5 127.5t232.5 47.5q162 0 299.5 -80t217.5 -218t80 -300t-80 -299.5t-217.5 -217.5t-299.5 -80t-300 80t-218 217.5t-80 299.5zM300 500h300l-2 -194l402 294l-402 298v-197h-298v-201z" />
+<glyph unicode="&#xe132;" d="M0 597q0 122 47.5 232.5t127.5 190.5t190.5 127.5t231.5 47.5q122 0 232.5 -47.5t190.5 -127.5t127.5 -190.5t47.5 -232.5q0 -162 -80 -299.5t-218 -217.5t-300 -80t-299.5 80t-217.5 217.5t-80 299.5zM200 600l400 -294v194h302v201h-300v197z" />
+<glyph unicode="&#xe133;" d="M5 597q0 122 47.5 232.5t127.5 190.5t190.5 127.5t232.5 47.5q121 0 231.5 -47.5t190.5 -127.5t127.5 -190.5t47.5 -232.5q0 -162 -80 -299.5t-217.5 -217.5t-299.5 -80t-300 80t-218 217.5t-80 299.5zM300 600h200v-300h200v300h200l-300 400z" />
+<glyph unicode="&#xe134;" d="M5 597q0 122 47.5 232.5t127.5 190.5t190.5 127.5t232.5 47.5q121 0 231.5 -47.5t190.5 -127.5t127.5 -190.5t47.5 -232.5q0 -162 -80 -299.5t-217.5 -217.5t-299.5 -80t-300 80t-218 217.5t-80 299.5zM300 600l300 -400l300 400h-200v300h-200v-300h-200z" />
+<glyph unicode="&#xe135;" d="M5 597q0 122 47.5 232.5t127.5 190.5t190.5 127.5t232.5 47.5q121 0 231.5 -47.5t190.5 -127.5t127.5 -190.5t47.5 -232.5q0 -162 -80 -299.5t-217.5 -217.5t-299.5 -80t-300 80t-218 217.5t-80 299.5zM254 780q-8 -34 5.5 -93t7.5 -87q0 -9 17 -44t16 -60q12 0 23 -5.5 t23 -15t20 -13.5q20 -10 108 -42q22 -8 53 -31.5t59.5 -38.5t57.5 -11q8 -18 -15 -55.5t-20 -57.5q12 -21 22.5 -34.5t28 -27t36.5 -17.5q0 -6 -3 -15.5t-3.5 -14.5t4.5 -17q101 -2 221 111q31 30 47 48t34 49t21 62q-14 9 -37.5 9.5t-35.5 7.5q-14 7 -49 15t-52 19 q-9 0 -39.5 -0.5t-46.5 -1.5t-39 -6.5t-39 -16.5q-50 -35 -66 -12q-4 2 -3.5 25.5t0.5 25.5q-6 13 -26.5 17t-24.5 7q2 22 -2 41t-16.5 28t-38.5 -20q-23 -25 -42 4q-19 28 -8 58q8 16 22 22q6 -1 26 -1.5t33.5 -4.5t19.5 -13q12 -19 32 -37.5t34 -27.5l14 -8q0 3 9.5 39.5 t5.5 57.5q-4 23 14.5 44.5t22.5 31.5q5 14 10 35t8.5 31t15.5 22.5t34 21.5q-6 18 10 37q8 0 23.5 -1.5t24.5 -1.5t20.5 4.5t20.5 15.5q-10 23 -30.5 42.5t-38 30t-49 26.5t-43.5 23q11 41 1 44q31 -13 58.5 -14.5t39.5 3.5l11 4q6 36 -17 53.5t-64 28.5t-56 23 q-19 -3 -37 0q-15 -12 -36.5 -21t-34.5 -12t-44 -8t-39 -6q-15 -3 -46 0t-45 -3q-20 -6 -51.5 -25.5t-34.5 -34.5q-3 -11 6.5 -22.5t8.5 -18.5q-3 -34 -27.5 -91t-29.5 -79zM518 915q3 12 16 30.5t16 25.5q10 -10 18.5 -10t14 6t14.5 14.5t16 12.5q0 -18 8 -42.5t16.5 -44 t9.5 -23.5q-6 1 -39 5t-53.5 10t-36.5 16z" />
+<glyph unicode="&#xe136;" d="M0 164.5q0 21.5 15 37.5l600 599q-33 101 6 201.5t135 154.5q164 92 306 -9l-259 -138l145 -232l251 126q13 -175 -151 -267q-123 -70 -253 -23l-596 -596q-15 -16 -36.5 -16t-36.5 16l-111 110q-15 15 -15 36.5z" />
+<glyph unicode="&#xe137;" horiz-adv-x="1220" d="M0 196v100q0 41 29.5 70.5t70.5 29.5h1000q41 0 70.5 -29.5t29.5 -70.5v-100q0 -41 -29.5 -70.5t-70.5 -29.5h-1000q-41 0 -70.5 29.5t-29.5 70.5zM0 596v100q0 41 29.5 70.5t70.5 29.5h1000q41 0 70.5 -29.5t29.5 -70.5v-100q0 -41 -29.5 -70.5t-70.5 -29.5h-1000 q-41 0 -70.5 29.5t-29.5 70.5zM0 996v100q0 41 29.5 70.5t70.5 29.5h1000q41 0 70.5 -29.5t29.5 -70.5v-100q0 -41 -29.5 -70.5t-70.5 -29.5h-1000q-41 0 -70.5 29.5t-29.5 70.5zM600 596h500v100h-500v-100zM800 196h300v100h-300v-100zM900 996h200v100h-200v-100z" />
+<glyph unicode="&#xe138;" d="M100 1100v100h1000v-100h-1000zM150 1000h900l-350 -500v-300l-200 -200v500z" />
+<glyph unicode="&#xe139;" d="M0 200v200h1200v-200q0 -41 -29.5 -70.5t-70.5 -29.5h-1000q-41 0 -70.5 29.5t-29.5 70.5zM0 500v400q0 41 29.5 70.5t70.5 29.5h300v100q0 41 29.5 70.5t70.5 29.5h200q41 0 70.5 -29.5t29.5 -70.5v-100h300q41 0 70.5 -29.5t29.5 -70.5v-400h-500v100h-200v-100h-500z M500 1000h200v100h-200v-100z" />
+<glyph unicode="&#xe140;" d="M0 0v400l129 -129l200 200l142 -142l-200 -200l129 -129h-400zM0 800l129 129l200 -200l142 142l-200 200l129 129h-400v-400zM729 329l142 142l200 -200l129 129v-400h-400l129 129zM729 871l200 200l-129 129h400v-400l-129 129l-200 -200z" />
+<glyph unicode="&#xe141;" d="M0 596q0 162 80 299t217 217t299 80t299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299zM182 596q0 -172 121.5 -293t292.5 -121t292.5 121t121.5 293q0 171 -121.5 292.5t-292.5 121.5t-292.5 -121.5t-121.5 -292.5zM291 655 q0 23 15.5 38.5t38.5 15.5t39 -16t16 -38q0 -23 -16 -39t-39 -16q-22 0 -38 16t-16 39zM400 850q0 22 16 38.5t39 16.5q22 0 38 -16t16 -39t-16 -39t-38 -16q-23 0 -39 16.5t-16 38.5zM513 609q0 32 21 56.5t52 29.5l122 126l1 1q-9 14 -9 28q0 22 16 38.5t39 16.5 q22 0 38 -16t16 -39t-16 -39t-38 -16q-16 0 -29 10l-55 -145q17 -22 17 -51q0 -36 -25.5 -61.5t-61.5 -25.5q-37 0 -62.5 25.5t-25.5 61.5zM800 655q0 22 16 38t39 16t38.5 -15.5t15.5 -38.5t-16 -39t-38 -16q-23 0 -39 16t-16 39z" />
+<glyph unicode="&#xe142;" d="M-40 375q-13 -95 35 -173q35 -57 94 -89t129 -32q63 0 119 28q33 16 65 40.5t52.5 45.5t59.5 64q40 44 57 61l394 394q35 35 47 84t-3 96q-27 87 -117 104q-20 2 -29 2q-46 0 -79.5 -17t-67.5 -51l-388 -396l-7 -7l69 -67l377 373q20 22 39 38q23 23 50 23q38 0 53 -36 q16 -39 -20 -75l-547 -547q-52 -52 -125 -52q-55 0 -100 33t-54 96q-5 35 2.5 66t31.5 63t42 50t56 54q24 21 44 41l348 348q52 52 82.5 79.5t84 54t107.5 26.5q25 0 48 -4q95 -17 154 -94.5t51 -175.5q-7 -101 -98 -192l-252 -249l-253 -256l7 -7l69 -60l517 511 q67 67 95 157t11 183q-16 87 -67 154t-130 103q-69 33 -152 33q-107 0 -197 -55q-40 -24 -111 -95l-512 -512q-68 -68 -81 -163z" />
+<glyph unicode="&#xe143;" d="M79 784q0 131 99 229.5t230 98.5q144 0 242 -129q103 129 245 129q130 0 227 -98.5t97 -229.5q0 -46 -17.5 -91t-61 -99t-77 -89.5t-104.5 -105.5q-197 -191 -293 -322l-17 -23l-16 23q-43 58 -100 122.5t-92 99.5t-101 100l-84.5 84.5t-68 74t-60 78t-33.5 70.5t-15 78z M250 784q0 -27 30.5 -70t61.5 -75.5t95 -94.5l22 -22q93 -90 190 -201q82 92 195 203l12 12q64 62 97.5 97t64.5 79t31 72q0 71 -48 119.5t-106 48.5q-73 0 -131 -83l-118 -171l-114 174q-51 80 -124 80q-59 0 -108.5 -49.5t-49.5 -118.5z" />
+<glyph unicode="&#xe144;" d="M57 353q0 -94 66 -160l141 -141q66 -66 159 -66q95 0 159 66l283 283q66 66 66 159t-66 159l-141 141q-12 12 -19 17l-105 -105l212 -212l-389 -389l-247 248l95 95l-18 18q-46 45 -75 101l-55 -55q-66 -66 -66 -159zM269 706q0 -93 66 -159l141 -141l19 -17l105 105 l-212 212l389 389l247 -247l-95 -96l18 -18q46 -46 77 -99l29 29q35 35 62.5 88t27.5 96q0 93 -66 159l-141 141q-66 66 -159 66q-95 0 -159 -66l-283 -283q-66 -64 -66 -159z" />
+<glyph unicode="&#xe145;" d="M200 100v953q0 21 30 46t81 48t129 38t163 15t162 -15t127 -38t79 -48t29 -46v-953q0 -41 -29.5 -70.5t-70.5 -29.5h-600q-41 0 -70.5 29.5t-29.5 70.5zM300 300h600v700h-600v-700zM496 150q0 -43 30.5 -73.5t73.5 -30.5t73.5 30.5t30.5 73.5t-30.5 73.5t-73.5 30.5 t-73.5 -30.5t-30.5 -73.5z" />
+<glyph unicode="&#xe146;" d="M0 0l303 380l207 208l-210 212h300l267 279l-35 36q-15 14 -15 35t15 35q14 15 35 15t35 -15l283 -282q15 -15 15 -36t-15 -35q-14 -15 -35 -15t-35 15l-36 35l-279 -267v-300l-212 210l-208 -207z" />
+<glyph unicode="&#xe148;" d="M295 433h139q5 -77 48.5 -126.5t117.5 -64.5v335l-27 7q-46 14 -79 26.5t-72 36t-62.5 52t-40 72.5t-16.5 99q0 92 44 159.5t109 101t144 40.5v78h100v-79q38 -4 72.5 -13.5t75.5 -31.5t71 -53.5t51.5 -84t24.5 -118.5h-159q-8 72 -35 109.5t-101 50.5v-307l64 -14 q34 -7 64 -16.5t70 -31.5t67.5 -52t47.5 -80.5t20 -112.5q0 -139 -89 -224t-244 -96v-77h-100v78q-152 17 -237 104q-40 40 -52.5 93.5t-15.5 139.5zM466 889q0 -29 8 -51t16.5 -34t29.5 -22.5t31 -13.5t38 -10q7 -2 11 -3v274q-61 -8 -97.5 -37.5t-36.5 -102.5zM700 237 q170 18 170 151q0 64 -44 99.5t-126 60.5v-311z" />
+<glyph unicode="&#xe149;" d="M100 600v100h166q-24 49 -44 104q-10 26 -14.5 55.5t-3 72.5t25 90t68.5 87q97 88 263 88q129 0 230 -89t101 -208h-153q0 52 -34 89.5t-74 51.5t-76 14q-37 0 -79 -14.5t-62 -35.5q-41 -44 -41 -101q0 -11 2.5 -24.5t5.5 -24t9.5 -26.5t10.5 -25t14 -27.5t14 -25.5 t15.5 -27t13.5 -24h242v-100h-197q8 -50 -2.5 -115t-31.5 -94q-41 -59 -99 -113q35 11 84 18t70 7q32 1 102 -16t104 -17q76 0 136 30l50 -147q-41 -25 -80.5 -36.5t-59 -13t-61.5 -1.5q-23 0 -128 33t-155 29q-39 -4 -82 -17t-66 -25l-24 -11l-55 145l16.5 11t15.5 10 t13.5 9.5t14.5 12t14.5 14t17.5 18.5q48 55 54 126.5t-30 142.5h-221z" />
+<glyph unicode="&#xe150;" d="M2 300l298 -300l298 300h-198v900h-200v-900h-198zM602 900l298 300l298 -300h-198v-900h-200v900h-198z" />
+<glyph unicode="&#xe151;" d="M2 300h198v900h200v-900h198l-298 -300zM700 0v200h100v-100h200v-100h-300zM700 400v100h300v-200h-99v-100h-100v100h99v100h-200zM700 700v500h300v-500h-100v100h-100v-100h-100zM801 900h100v200h-100v-200z" />
+<glyph unicode="&#xe152;" d="M2 300h198v900h200v-900h198l-298 -300zM700 0v500h300v-500h-100v100h-100v-100h-100zM700 700v200h100v-100h200v-100h-300zM700 1100v100h300v-200h-99v-100h-100v100h99v100h-200zM801 200h100v200h-100v-200z" />
+<glyph unicode="&#xe153;" d="M2 300l298 -300l298 300h-198v900h-200v-900h-198zM800 100v400h300v-500h-100v100h-200zM800 1100v100h200v-500h-100v400h-100zM901 200h100v200h-100v-200z" />
+<glyph unicode="&#xe154;" d="M2 300l298 -300l298 300h-198v900h-200v-900h-198zM800 400v100h200v-500h-100v400h-100zM800 800v400h300v-500h-100v100h-200zM901 900h100v200h-100v-200z" />
+<glyph unicode="&#xe155;" d="M2 300l298 -300l298 300h-198v900h-200v-900h-198zM700 100v200h500v-200h-500zM700 400v200h400v-200h-400zM700 700v200h300v-200h-300zM700 1000v200h200v-200h-200z" />
+<glyph unicode="&#xe156;" d="M2 300l298 -300l298 300h-198v900h-200v-900h-198zM700 100v200h200v-200h-200zM700 400v200h300v-200h-300zM700 700v200h400v-200h-400zM700 1000v200h500v-200h-500z" />
+<glyph unicode="&#xe157;" d="M0 400v300q0 165 117.5 282.5t282.5 117.5h300q162 0 281 -118.5t119 -281.5v-300q0 -165 -118.5 -282.5t-281.5 -117.5h-300q-165 0 -282.5 117.5t-117.5 282.5zM200 300q0 -41 29.5 -70.5t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5v500q0 41 -29.5 70.5t-70.5 29.5 h-500q-41 0 -70.5 -29.5t-29.5 -70.5v-500z" />
+<glyph unicode="&#xe158;" d="M0 400v300q0 163 119 281.5t281 118.5h300q165 0 282.5 -117.5t117.5 -282.5v-300q0 -165 -117.5 -282.5t-282.5 -117.5h-300q-163 0 -281.5 117.5t-118.5 282.5zM200 300q0 -41 29.5 -70.5t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5v500q0 41 -29.5 70.5t-70.5 29.5 h-500q-41 0 -70.5 -29.5t-29.5 -70.5v-500zM400 300l333 250l-333 250v-500z" />
+<glyph unicode="&#xe159;" d="M0 400v300q0 163 117.5 281.5t282.5 118.5h300q163 0 281.5 -119t118.5 -281v-300q0 -165 -117.5 -282.5t-282.5 -117.5h-300q-165 0 -282.5 117.5t-117.5 282.5zM200 300q0 -41 29.5 -70.5t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5v500q0 41 -29.5 70.5t-70.5 29.5 h-500q-41 0 -70.5 -29.5t-29.5 -70.5v-500zM300 700l250 -333l250 333h-500z" />
+<glyph unicode="&#xe160;" d="M0 400v300q0 165 117.5 282.5t282.5 117.5h300q165 0 282.5 -117.5t117.5 -282.5v-300q0 -162 -118.5 -281t-281.5 -119h-300q-165 0 -282.5 118.5t-117.5 281.5zM200 300q0 -41 29.5 -70.5t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5v500q0 41 -29.5 70.5t-70.5 29.5 h-500q-41 0 -70.5 -29.5t-29.5 -70.5v-500zM300 400h500l-250 333z" />
+<glyph unicode="&#xe161;" d="M0 400v300h300v200l400 -350l-400 -350v200h-300zM500 0v200h500q41 0 70.5 29.5t29.5 70.5v500q0 41 -29.5 70.5t-70.5 29.5h-500v200h400q165 0 282.5 -117.5t117.5 -282.5v-300q0 -165 -117.5 -282.5t-282.5 -117.5h-400z" />
+<glyph unicode="&#xe162;" d="M216 519q10 -19 32 -19h302q-155 -438 -160 -458q-5 -21 4 -32l9 -8l9 -1q13 0 26 16l538 630q15 19 6 36q-8 18 -32 16h-300q1 4 78 219.5t79 227.5q2 17 -6 27l-8 8h-9q-16 0 -25 -15q-4 -5 -98.5 -111.5t-228 -257t-209.5 -238.5q-17 -19 -7 -40z" />
+<glyph unicode="&#xe163;" d="M0 400q0 -165 117.5 -282.5t282.5 -117.5h300q47 0 100 15v185h-500q-41 0 -70.5 29.5t-29.5 70.5v500q0 41 29.5 70.5t70.5 29.5h500v185q-14 4 -114 7.5t-193 5.5l-93 2q-165 0 -282.5 -117.5t-117.5 -282.5v-300zM600 400v300h300v200l400 -350l-400 -350v200h-300z " />
+<glyph unicode="&#xe164;" d="M0 400q0 -165 117.5 -282.5t282.5 -117.5h300q163 0 281.5 117.5t118.5 282.5v98l-78 73l-122 -123v-148q0 -41 -29.5 -70.5t-70.5 -29.5h-500q-41 0 -70.5 29.5t-29.5 70.5v500q0 41 29.5 70.5t70.5 29.5h156l118 122l-74 78h-100q-165 0 -282.5 -117.5t-117.5 -282.5 v-300zM496 709l353 342l-149 149h500v-500l-149 149l-342 -353z" />
+<glyph unicode="&#xe165;" d="M4 600q0 162 80 299t217 217t299 80t299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299zM186 600q0 -171 121.5 -292.5t292.5 -121.5t292.5 121.5t121.5 292.5t-121.5 292.5t-292.5 121.5t-292.5 -121.5t-121.5 -292.5zM406 600 q0 80 57 137t137 57t137 -57t57 -137t-57 -137t-137 -57t-137 57t-57 137z" />
+<glyph unicode="&#xe166;" d="M0 0v275q0 11 7 18t18 7h1048q11 0 19 -7.5t8 -17.5v-275h-1100zM100 800l445 -500l450 500h-295v400h-300v-400h-300zM900 150h100v50h-100v-50z" />
+<glyph unicode="&#xe167;" d="M0 0v275q0 11 7 18t18 7h1048q11 0 19 -7.5t8 -17.5v-275h-1100zM100 700h300v-300h300v300h295l-445 500zM900 150h100v50h-100v-50z" />
+<glyph unicode="&#xe168;" d="M0 0v275q0 11 7 18t18 7h1048q11 0 19 -7.5t8 -17.5v-275h-1100zM100 705l305 -305l596 596l-154 155l-442 -442l-150 151zM900 150h100v50h-100v-50z" />
+<glyph unicode="&#xe169;" d="M0 0v275q0 11 7 18t18 7h1048q11 0 19 -7.5t8 -17.5v-275h-1100zM100 988l97 -98l212 213l-97 97zM200 401h700v699l-250 -239l-149 149l-212 -212l149 -149zM900 150h100v50h-100v-50z" />
+<glyph unicode="&#xe170;" d="M0 0v275q0 11 7 18t18 7h1048q11 0 19 -7.5t8 -17.5v-275h-1100zM200 612l212 -212l98 97l-213 212zM300 1200l239 -250l-149 -149l212 -212l149 148l248 -237v700h-699zM900 150h100v50h-100v-50z" />
+<glyph unicode="&#xe171;" d="M23 415l1177 784v-1079l-475 272l-310 -393v416h-392zM494 210l672 938l-672 -712v-226z" />
+<glyph unicode="&#xe172;" d="M0 150v1000q0 20 14.5 35t35.5 15h250v-300h500v300h100l200 -200v-850q0 -21 -15 -35.5t-35 -14.5h-150v400h-700v-400h-150q-21 0 -35.5 14.5t-14.5 35.5zM600 1000h100v200h-100v-200z" />
+<glyph unicode="&#xe173;" d="M0 150v1000q0 20 14.5 35t35.5 15h250v-300h500v300h100l200 -200v-218l-276 -275l-120 120l-126 -127h-378v-400h-150q-21 0 -35.5 14.5t-14.5 35.5zM581 306l123 123l120 -120l353 352l123 -123l-475 -476zM600 1000h100v200h-100v-200z" />
+<glyph unicode="&#xe174;" d="M0 150v1000q0 20 14.5 35t35.5 15h250v-300h500v300h100l200 -200v-269l-103 -103l-170 170l-298 -298h-329v-400h-150q-21 0 -35.5 14.5t-14.5 35.5zM600 1000h100v200h-100v-200zM700 133l170 170l-170 170l127 127l170 -170l170 170l127 -128l-170 -169l170 -170 l-127 -127l-170 170l-170 -170z" />
+<glyph unicode="&#xe175;" d="M0 150v1000q0 20 14.5 35t35.5 15h250v-300h500v300h100l200 -200v-300h-400v-200h-500v-400h-150q-21 0 -35.5 14.5t-14.5 35.5zM600 300l300 -300l300 300h-200v300h-200v-300h-200zM600 1000v200h100v-200h-100z" />
+<glyph unicode="&#xe176;" d="M0 150v1000q0 20 14.5 35t35.5 15h250v-300h500v300h100l200 -200v-402l-200 200l-298 -298h-402v-400h-150q-21 0 -35.5 14.5t-14.5 35.5zM600 300h200v-300h200v300h200l-300 300zM600 1000v200h100v-200h-100z" />
+<glyph unicode="&#xe177;" d="M0 250q0 -21 14.5 -35.5t35.5 -14.5h1100q21 0 35.5 14.5t14.5 35.5v550h-1200v-550zM0 900h1200v150q0 21 -14.5 35.5t-35.5 14.5h-1100q-21 0 -35.5 -14.5t-14.5 -35.5v-150zM100 300v200h400v-200h-400z" />
+<glyph unicode="&#xe178;" d="M0 400l300 298v-198h400v-200h-400v-198zM100 800v200h100v-200h-100zM300 800v200h100v-200h-100zM500 800v200h400v198l300 -298l-300 -298v198h-400zM800 300v200h100v-200h-100zM1000 300h100v200h-100v-200z" />
+<glyph unicode="&#xe179;" d="M100 700v400l50 100l50 -100v-300h100v300l50 100l50 -100v-300h100v300l50 100l50 -100v-400l-100 -203v-447q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v447zM800 597q0 -29 10.5 -55.5t25 -43t29 -28.5t25.5 -18l10 -5v-397q0 -21 14.5 -35.5 t35.5 -14.5h200q21 0 35.5 14.5t14.5 35.5v1106q0 31 -18 40.5t-44 -7.5l-276 -117q-25 -16 -43.5 -50.5t-18.5 -65.5v-359z" />
+<glyph unicode="&#xe180;" d="M100 0h400v56q-75 0 -87.5 6t-12.5 44v394h500v-394q0 -38 -12.5 -44t-87.5 -6v-56h400v56q-4 0 -11 0.5t-24 3t-30 7t-24 15t-11 24.5v888q0 22 25 34.5t50 13.5l25 2v56h-400v-56q75 0 87.5 -6t12.5 -44v-394h-500v394q0 38 12.5 44t87.5 6v56h-400v-56q4 0 11 -0.5 t24 -3t30 -7t24 -15t11 -24.5v-888q0 -22 -25 -34.5t-50 -13.5l-25 -2v-56z" />
+<glyph unicode="&#xe181;" d="M0 300q0 -41 29.5 -70.5t70.5 -29.5h300q41 0 70.5 29.5t29.5 70.5v500q0 41 -29.5 70.5t-70.5 29.5h-300q-41 0 -70.5 -29.5t-29.5 -70.5v-500zM100 100h400l200 200h105l295 98v-298h-425l-100 -100h-375zM100 300v200h300v-200h-300zM100 600v200h300v-200h-300z M100 1000h400l200 -200v-98l295 98h105v200h-425l-100 100h-375zM700 402v163l400 133v-163z" />
+<glyph unicode="&#xe182;" d="M16.5 974.5q0.5 -21.5 16 -90t46.5 -140t104 -177.5t175 -208q103 -103 207.5 -176t180 -103.5t137 -47t92.5 -16.5l31 1l163 162q16 17 13 40.5t-22 37.5l-192 136q-19 14 -45 12t-42 -19l-119 -118q-143 103 -267 227q-126 126 -227 268l118 118q17 17 20 41.5 t-11 44.5l-139 194q-14 19 -36.5 22t-40.5 -14l-162 -162q-1 -11 -0.5 -32.5z" />
+<glyph unicode="&#xe183;" d="M0 50v212q0 20 10.5 45.5t24.5 39.5l365 303v50q0 4 1 10.5t12 22.5t30 28.5t60 23t97 10.5t97 -10t60 -23.5t30 -27.5t12 -24l1 -10v-50l365 -303q14 -14 24.5 -39.5t10.5 -45.5v-212q0 -21 -15 -35.5t-35 -14.5h-1100q-21 0 -35.5 14.5t-14.5 35.5zM0 712 q0 -21 14.5 -33.5t34.5 -8.5l202 33q20 4 34.5 21t14.5 38v146q141 24 300 24t300 -24v-146q0 -21 14.5 -38t34.5 -21l202 -33q20 -4 34.5 8.5t14.5 33.5v200q-6 8 -19 20.5t-63 45t-112 57t-171 45t-235 20.5q-92 0 -175 -10.5t-141.5 -27t-108.5 -36.5t-81.5 -40 t-53.5 -36.5t-31 -27.5l-9 -10v-200z" />
+<glyph unicode="&#xe184;" d="M100 0v100h1100v-100h-1100zM175 200h950l-125 150v250l100 100v400h-100v-200h-100v200h-200v-200h-100v200h-200v-200h-100v200h-100v-400l100 -100v-250z" />
+<glyph unicode="&#xe185;" d="M100 0h300v400q0 41 -29.5 70.5t-70.5 29.5h-100q-41 0 -70.5 -29.5t-29.5 -70.5v-400zM500 0v1000q0 41 29.5 70.5t70.5 29.5h100q41 0 70.5 -29.5t29.5 -70.5v-1000h-300zM900 0v700q0 41 29.5 70.5t70.5 29.5h100q41 0 70.5 -29.5t29.5 -70.5v-700h-300z" />
+<glyph unicode="&#xe186;" d="M-100 300v500q0 124 88 212t212 88h700q124 0 212 -88t88 -212v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212zM100 200h900v700h-900v-700zM200 300h300v300h-200v100h200v100h-300v-300h200v-100h-200v-100zM600 300h200v100h100v300h-100v100h-200v-500 zM700 400v300h100v-300h-100z" />
+<glyph unicode="&#xe187;" d="M-100 300v500q0 124 88 212t212 88h700q124 0 212 -88t88 -212v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212zM100 200h900v700h-900v-700zM200 300h100v200h100v-200h100v500h-100v-200h-100v200h-100v-500zM600 300h200v100h100v300h-100v100h-200v-500 zM700 400v300h100v-300h-100z" />
+<glyph unicode="&#xe188;" d="M-100 300v500q0 124 88 212t212 88h700q124 0 212 -88t88 -212v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212zM100 200h900v700h-900v-700zM200 300h300v100h-200v300h200v100h-300v-500zM600 300h300v100h-200v300h200v100h-300v-500z" />
+<glyph unicode="&#xe189;" d="M-100 300v500q0 124 88 212t212 88h700q124 0 212 -88t88 -212v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212zM100 200h900v700h-900v-700zM200 550l300 -150v300zM600 400l300 150l-300 150v-300z" />
+<glyph unicode="&#xe190;" d="M-100 300v500q0 124 88 212t212 88h700q124 0 212 -88t88 -212v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212zM100 200h900v700h-900v-700zM200 300v500h700v-500h-700zM300 400h130q41 0 68 42t27 107t-28.5 108t-66.5 43h-130v-300zM575 549 q0 -65 27 -107t68 -42h130v300h-130q-38 0 -66.5 -43t-28.5 -108z" />
+<glyph unicode="&#xe191;" d="M-100 300v500q0 124 88 212t212 88h700q124 0 212 -88t88 -212v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212zM100 200h900v700h-900v-700zM200 300h300v300h-200v100h200v100h-300v-300h200v-100h-200v-100zM601 300h100v100h-100v-100zM700 700h100 v-400h100v500h-200v-100z" />
+<glyph unicode="&#xe192;" d="M-100 300v500q0 124 88 212t212 88h700q124 0 212 -88t88 -212v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212zM100 200h900v700h-900v-700zM200 300h300v400h-200v100h-100v-500zM301 400v200h100v-200h-100zM601 300h100v100h-100v-100zM700 700h100 v-400h100v500h-200v-100z" />
+<glyph unicode="&#xe193;" d="M-100 300v500q0 124 88 212t212 88h700q124 0 212 -88t88 -212v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212zM100 200h900v700h-900v-700zM200 700v100h300v-300h-99v-100h-100v100h99v200h-200zM201 300v100h100v-100h-100zM601 300v100h100v-100h-100z M700 700v100h200v-500h-100v400h-100z" />
+<glyph unicode="&#xe194;" d="M4 600q0 162 80 299t217 217t299 80t299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299zM186 600q0 -171 121.5 -292.5t292.5 -121.5t292.5 121.5t121.5 292.5t-121.5 292.5t-292.5 121.5t-292.5 -121.5t-121.5 -292.5zM400 500v200 l100 100h300v-100h-300v-200h300v-100h-300z" />
+<glyph unicode="&#xe195;" d="M0 600q0 162 80 299t217 217t299 80t299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299zM182 600q0 -171 121.5 -292.5t292.5 -121.5t292.5 121.5t121.5 292.5t-121.5 292.5t-292.5 121.5t-292.5 -121.5t-121.5 -292.5zM400 400v400h300 l100 -100v-100h-100v100h-200v-100h200v-100h-200v-100h-100zM700 400v100h100v-100h-100z" />
+<glyph unicode="&#xe197;" d="M-14 494q0 -80 56.5 -137t135.5 -57h222v300h400v-300h128q120 0 205 86t85 208q0 120 -85 206.5t-205 86.5q-46 0 -90 -14q-44 97 -134.5 156.5t-200.5 59.5q-152 0 -260 -107.5t-108 -260.5q0 -25 2 -37q-66 -14 -108.5 -67.5t-42.5 -122.5zM300 200h200v300h200v-300 h200l-300 -300z" />
+<glyph unicode="&#xe198;" d="M-14 494q0 -80 56.5 -137t135.5 -57h8l414 414l403 -403q94 26 154.5 104t60.5 178q0 121 -85 207.5t-205 86.5q-46 0 -90 -14q-44 97 -134.5 156.5t-200.5 59.5q-152 0 -260 -107.5t-108 -260.5q0 -25 2 -37q-66 -14 -108.5 -67.5t-42.5 -122.5zM300 200l300 300 l300 -300h-200v-300h-200v300h-200z" />
+<glyph unicode="&#xe199;" d="M100 200h400v-155l-75 -45h350l-75 45v155h400l-270 300h170l-270 300h170l-300 333l-300 -333h170l-270 -300h170z" />
+<glyph unicode="&#xe200;" d="M121 700q0 -53 28.5 -97t75.5 -65q-4 -16 -4 -38q0 -74 52.5 -126.5t126.5 -52.5q56 0 100 30v-306l-75 -45h350l-75 45v306q46 -30 100 -30q74 0 126.5 52.5t52.5 126.5q0 24 -9 55q50 32 79.5 83t29.5 112q0 90 -61.5 155.5t-150.5 71.5q-26 89 -99.5 145.5 t-167.5 56.5q-116 0 -197.5 -81.5t-81.5 -197.5q0 -4 1 -12t1 -11q-14 2 -23 2q-74 0 -126.5 -52.5t-52.5 -126.5z" />
+</font>
+</defs></svg> 

BIN
cmd/ursrv/static/fonts/glyphicons-halflings-regular.ttf


BIN
cmd/ursrv/static/fonts/glyphicons-halflings-regular.woff


+ 631 - 0
cmd/ursrv/static/index.html

@@ -0,0 +1,631 @@
+<!DOCTYPE html>
+<!--
+Copyright (C) 2014 Jakob Borg and other contributors. All rights reserved.
+Use of this source code is governed by an MIT-style license that can be
+found in the LICENSE file.
+-->
+<html lang="en">
+<head>
+  <meta charset="utf-8">
+  <meta http-equiv="X-UA-Compatible" content="IE=edge">
+  <meta name="viewport" content="width=device-width, initial-scale=1.0">
+  <meta name="description" content="">
+  <meta name="author" content="">
+  <link rel="shortcut icon" href="static/assets/img/favicon.png">
+
+  <title>Syncthing Usage Reports</title>
+  <link href="static/bootstrap/css/bootstrap.min.css" rel="stylesheet">
+  <script type="text/javascript" src="https://ajax.googleapis.com/ajax/libs/jquery/1.10.2/jquery.min.js"></script>
+  <script type="text/javascript" src="static/bootstrap/js/bootstrap.min.js"></script>
+  <script type="text/javascript" src="https://maps.googleapis.com/maps/api/js?libraries=visualization"></script>
+  <style type="text/css">
+    body {
+      margin: 40px;
+      font-family: "Helvetica Neue", Helvetica, Arial, sans-serif;
+    }
+    tr.main td {
+      font-weight: bold;
+    }
+    tr.child td.first {
+      padding-left: 2em;
+    }
+    .progress-bar {
+      overflow:hidden;
+      white-space:nowrap;
+      text-overflow: ellipsis;
+    }
+  </style>
+  <script type="text/javascript"
+          src="https://www.google.com/jsapi?autoload={
+            'modules':[{
+              'name':'visualization',
+              'version':'1',
+              'packages':['corechart']
+            }]
+          }"></script>
+
+  <script type="text/javascript">
+    google.setOnLoadCallback(drawVersionChart);
+    google.setOnLoadCallback(drawMovementChart);
+    google.setOnLoadCallback(drawBlockStatsChart);
+    google.setOnLoadCallback(drawPerformanceCharts);
+    google.setOnLoadCallback(drawHeatMap);
+
+    function drawVersionChart() {
+      var jsonData = $.ajax({url: "summary.json", dataType:"json", async: false}).responseText;
+      var rows = JSON.parse(jsonData);
+
+      var data = new google.visualization.DataTable();
+      data.addColumn('date', 'Day');
+      for (var i = 1; i < rows[0].length; i++){
+        data.addColumn('number', rows[0][i]);
+      }
+      for (var i = 1; i < rows.length; i++){
+        rows[i][0] = new Date(rows[i][0]);
+        data.addRow(rows[i]);
+      };
+
+      var options = {
+        legend: { position: 'bottom', alignment: 'center' },
+        isStacked: true,
+        colors: ['rgb(102,194,165)','rgb(252,141,98)','rgb(141,160,203)','rgb(231,138,195)','rgb(166,216,84)','rgb(255,217,47)'],
+        chartArea: {left: 80, top: 20, width: '1020', height: '300'},
+      };
+
+      var chart = new google.visualization.AreaChart(document.getElementById('versionChart'));
+      chart.draw(data, options);
+    }
+
+    function drawMovementChart() {
+      var jsonData = $.ajax({url: "movement.json", dataType:"json", async: false}).responseText;
+      var rows = JSON.parse(jsonData);
+
+      var data = new google.visualization.DataTable();
+      data.addColumn('date', 'Day');
+      for (var i = 1; i < rows[0].length; i++){
+        data.addColumn('number', rows[0][i]);
+      }
+
+      for (var i = 1; i < rows.length; i++){
+        rows[i][0] = new Date(rows[i][0]);
+        if (rows[i][1] > 500) {
+          rows[i][1] = null;
+        }
+        if (rows[i][2] < -500) {
+          rows[i][2] = null;
+        }
+        data.addRow(rows[i]);
+      };
+
+      var options = {
+        legend: { position: 'bottom', alignment: 'center' },
+        colors: ['rgb(102,194,165)','rgb(252,141,98)','rgb(141,160,203)','rgb(231,138,195)','rgb(166,216,84)','rgb(255,217,47)'],
+        chartArea: {left: 80, top: 20, width: '1020', height: '300'},
+      };
+
+      var chart = new google.visualization.AreaChart(document.getElementById('movementChart'));
+      chart.draw(data, options);
+    }
+
+    function formatGibibytes(gibibytes, decimals) {
+      if(gibibytes == 0) return '0 GiB';
+      var k = 1024,
+        dm = decimals || 2,
+        sizes = ['GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB'],
+        i = Math.floor(Math.log(gibibytes) / Math.log(k));
+      if (i < 0) {
+        sizes = 'MiB';
+      } else {
+        sizes = sizes[i];
+      }
+      return parseFloat((gibibytes / Math.pow(k, i)).toFixed(dm)) + ' ' + sizes;
+    }
+
+
+    function drawBlockStatsChart() {
+      var jsonData = $.ajax({url: "blockstats.json", dataType:"json", async: false}).responseText;
+      var rows = JSON.parse(jsonData);
+
+      var data = new google.visualization.DataTable();
+      data.addColumn('date', 'Day');
+      for (var i = 1; i < rows[0].length; i++){
+        data.addColumn('number', rows[0][i]);
+      }
+
+      var totals = [0, 0, 0, 0, 0, 0];
+      for (var i = 1; i < rows.length; i++){
+        rows[i][0] = new Date(rows[i][0]);
+        for (var j = 2; j < rows[i].length; j++) {
+          totals[j-2] += rows[i][j];
+        }
+        data.addRow(rows[i]);
+      };
+
+      var totalTotals = totals.reduce(function(a, b) { return a + b; }, 0);
+
+      if (totalTotals > 0) {
+        var content = "<table class='table'>\n"
+        for (var j = 2; j < rows[0].length; j++) {
+          content += "<tr><td><b>" + rows[0][j].replace(' (GiB)', '') + "</b></td><td>" + formatGibibytes(totals[j-2].toFixed(2)) + " (" + ((100*totals[j-2])/totalTotals).toFixed(2) +"%)</td></tr>\n";
+        }
+        content += "</table>";
+        document.getElementById("data-to-date").innerHTML = content;
+      } else {
+        // No data, hide it.
+        document.getElementById("block-stats").outerHTML  = "";
+        return;
+      }
+
+      var options = {
+        focusTarget: 'category',
+        vAxes: {0: {}, 1: {}},
+        series: {0: {type: 'line', targetAxisIndex:1}},
+        isStacked: true,
+        legend: {position: 'none'},
+        colors: ['rgb(102,194,165)','rgb(252,141,98)','rgb(141,160,203)','rgb(231,138,195)','rgb(166,216,84)','rgb(255,217,47)'],
+        chartArea: {left: 80, top: 20, width: '1020', height: '300'},
+      };
+
+      var chart = new google.visualization.AreaChart(document.getElementById('blockStatsChart'));
+      chart.draw(data, options);
+    }
+
+    function drawPerformanceCharts() {
+      var jsonData = $.ajax({url: "/performance.json", dataType:"json", async: false}).responseText;
+      var rows = JSON.parse(jsonData);
+      for (var i = 1; i < rows.length; i++){
+        rows[i][0] = new Date(rows[i][0]);
+      }
+
+      drawChart(rows, 1, 'Total Number of Files', 'totFilesChart', 1e6, 1);
+      drawChart(rows, 2, 'Total Folder Size (GiB)', 'totMiBChart', 1e6, 1024);
+      drawChart(rows, 3, 'Hash Performance (MiB/s)', 'hashPerfChart', 1000, 1);
+      drawChart(rows, 4, 'System RAM Size (GiB)', 'memSizeChart', 1e6, 1024);
+      drawChart(rows, 5, 'Memory Usage (MiB)', 'memUsageChart', 250, 1);
+    }
+
+    function drawChart(rows, index, title, id, cutoff, divisor) {
+      var data = new google.visualization.DataTable();
+      data.addColumn('date', 'Day');
+      data.addColumn('number', title);
+
+      var row;
+      for (var i = 1; i < rows.length; i++){
+          row = [rows[i][0], rows[i][index] / divisor];
+        if (row[1] > cutoff) {
+          row[1] = null;
+        }
+        data.addRow(row);
+      }
+
+      var options = {
+        legend: { position: 'bottom', alignment: 'center' },
+        colors: ['rgb(102,194,165)','rgb(252,141,98)','rgb(141,160,203)','rgb(231,138,195)','rgb(166,216,84)','rgb(255,217,47)'],
+        chartArea: {left: 80, top: 20, width: '1020', height: '300'},
+        vAxes: {0: {minValue: 0}},
+      };
+
+      var chart = new google.visualization.LineChart(document.getElementById(id));
+      chart.draw(data, options);
+    }
+
+    var locations = [];
+    {{range $location, $weight := .locations}}
+    locations.push({location: new google.maps.LatLng({{- $location.Latitude -}}, {{- $location.Longitude -}}), weight: {{- $weight -}}});
+    {{- end}}
+
+    function drawHeatMap() {
+      if (locations.length == 0) {
+        return;
+      }
+      var mapBounds = new google.maps.LatLngBounds();
+      var map = new google.maps.Map(document.getElementById('map'), {
+        zoom: 1,
+        mapTypeId: google.maps.MapTypeId.ROADMAP
+      });
+      var heatmap = new google.maps.visualization.HeatmapLayer({
+        data: locations
+      });
+      heatmap.set('radius', 10);
+      heatmap.set('maxIntensity', 20);
+      heatmap.set('gradient', [
+        'rgba(0, 255, 255, 0)',
+        'rgba(0, 255, 255, 1)',
+        'rgba(0, 191, 255, 1)',
+        'rgba(0, 127, 255, 1)',
+        'rgba(0, 63, 255, 1)',
+        'rgba(0, 0, 255, 1)',
+        'rgba(0, 0, 223, 1)',
+        'rgba(0, 0, 191, 1)',
+        'rgba(0, 0, 159, 1)',
+        'rgba(0, 0, 127, 1)',
+        'rgba(63, 0, 91, 1)',
+        'rgba(127, 0, 63, 1)',
+        'rgba(191, 0, 31, 1)',
+        'rgba(255, 0, 0, 1)'
+      ]);
+      heatmap.setMap(map);
+      for (var x = 0; x < locations.length; x++) {
+        mapBounds.extend(locations[x].location);
+      }
+      map.fitBounds(mapBounds);
+      if (locations.length == 1) {
+        map.setZoom(13);
+      }
+    }
+  </script>
+</head>
+
+<body>
+  <div class="container">
+    <div class="row">
+      <div class="col-md-12">
+        <h1>Syncthing Usage Data</h1>
+
+        <h4 id="active-users">Active Users per Day and Version</h4>
+        <p>
+          This is the total number of unique users with reporting enabled, per day. Area color represents the major version.
+        </p>
+        <div class="img-thumbnail" id="versionChart" style="width: 1130px; height: 400px; padding: 10px;"></div>
+
+        <h4 id="joining-leaving">Users Joining and Leaving per Day</h4>
+        <p>
+          This is the total number of unique users joining and leaving per day. A user is counted as "joined" on first the day their unique ID is seen, and as "left" on the last day the unique ID was seen before a two weeks or longer absence. "Bounced" refers to users who joined and left on the same day.
+        </p>
+        <div class="img-thumbnail" id="movementChart" style="width: 1130px; height: 400px; padding: 10px;"></div>
+        <p class="text-muted">
+         Reappearance of users cause the "left" data to shrink retroactively.
+        </p>
+        <div id="block-stats">
+          <h4>Data Transfers per Day</h4>
+          <p>
+            This is total data transferred per day. Also shows how much data was saved (not transferred) by each of the methods syncthing uses.
+          </p>
+          <div class="img-thumbnail" id="blockStatsChart" style="width: 1130px; height: 400px; padding: 10px;"></div>
+          <h4 id="totals-to-date">Totals to date</h4>
+          <p id="data-to-date">
+            No data
+          </p>
+        </div>
+
+        <h4 id="metrics">Usage Metrics</h4>
+        <p>
+          This is the aggregated usage report data for the last 24 hours. Data based on <b>{{.nodes}}</b> devices that have reported in.
+        </p>
+
+        {{if .locations}}
+        <div class="img-thumbnail" id="map" style="width: 1130px; height: 400px; padding: 10px;"></div>
+        <p class="text-muted">
+         Heatmap max intensity is capped at 20 reports within a location.
+        </p>
+        <div class="panel panel-default">
+          <div class="panel-heading">
+            <h4 class="panel-title">
+              <a data-toggle="collapse" href="#collapseTwo">Break down per country</a>
+            </h4>
+          </div>
+          <div id="collapseTwo" class="panel-collapse collapse">
+            <div class="panel-body">
+              <div class="row">
+                <div class="col-md-6">
+                  <table class="table table-striped">
+                    <tbody>
+                      {{range .contries | slice 2 1}}
+                      <tr>
+                        <td style="width: 45%">{{.Key}}</td>
+                        <td style="width: 5%" class="text-right">{{if ge .Pct 10.0}}{{.Pct | printf "%.0f"}}{{else if ge .Pct 1.0}}{{.Pct | printf "%.01f"}}{{else}}{{.Pct | printf "%.02f"}}{{end}}%</td>
+                        <td style="width: 5%" class="text-right">{{.Count}}</td>
+                        <td>
+                            <div class="progress-bar" role="progressbar" aria-valuenow="{{.Pct | printf "%.02f"}}" aria-valuemin="0" aria-valuemax="100" style="width: {{.Pct | printf "%.02f"}}%; height:20px"></div>
+                        </td>
+                      </tr>
+                      {{end}}
+                    </tbody>
+                  </table>
+                </div>
+                <div class="col-md-6">
+                  <table class="table table-striped">
+                    <tbody>
+                      {{range .contries | slice 2 2}}
+                      <tr>
+                        <td style="width: 45%">{{.Key}}</td>
+                        <td style="width: 5%" class="text-right">{{if ge .Pct 10.0}}{{.Pct | printf "%.0f"}}{{else if ge .Pct 1.0}}{{.Pct | printf "%.01f"}}{{else}}{{.Pct | printf "%.02f"}}{{end}}%</td>
+                        <td style="width: 5%" class="text-right">{{.Count}}</td>
+                        <td>
+                            <div class="progress-bar" role="progressbar" aria-valuenow="{{.Pct | printf "%.02f"}}" aria-valuemin="0" aria-valuemax="100" style="width: {{.Pct | printf "%.02f"}}%; height:20px"></div>
+                        </td>
+                      </tr>
+                      {{end}}
+                    </tbody>
+                  </table>
+                </div>
+              </div>
+            </div>
+          </div>
+        </div>
+        {{end}}
+        <table class="table table-striped">
+          <thead>
+            <tr>
+              <th></th>
+              <th colspan="4" class="text-center">
+                <a href="https://en.wikipedia.org/wiki/Percentile">Percentile</a>
+              </th>
+            </tr>
+            <tr>
+              <th></th>
+              <th class="text-right">5%</th>
+              <th class="text-right">50%</th>
+              <th class="text-right">95%</th>
+              <th class="text-right">100%</th>
+            </tr>
+          </thead>
+          <tbody>
+            {{range .categories}}
+            <tr>
+              <td>{{.Descr}}</td>
+              <td class="text-right">{{index .Values 0 | number .Type | commatize " "}}{{.Unit}}</td>
+              <td class="text-right">{{index .Values 1 | number .Type | commatize " "}}{{.Unit}}</td>
+              <td class="text-right">{{index .Values 2 | number .Type | commatize " "}}{{.Unit}}</td>
+              <td class="text-right">{{index .Values 3 | number .Type | commatize " "}}{{.Unit}}</td>
+            </tr>
+            {{end}}
+          </tbody>
+        </table>
+      </div>
+    </div>
+
+    <div class="row">
+      <div class="col-md-6">
+        <table class="table table-striped">
+          <thead>
+            <tr>
+              <th>Version</th><th class="text-right">Devices</th><th class="text-right">Share</th>
+            </tr>
+          </thead>
+          <tbody>
+            {{range .versions}}
+              {{if gt .Percentage 0.5}}
+                <tr class="main">
+                  <td>{{.Key}}</td>
+                  <td class="text-right">{{.Count}}</td>
+                  <td class="text-right">{{.Percentage | printf "%.01f"}}%</td>
+                </tr>
+                {{range .Items}}
+                  <tr class="child">
+                    <td class="first">{{.Key}}</td>
+                    <td class="text-right">{{.Count}}</td>
+                    <td class="text-right">{{.Percentage | printf "%.01f"}}%</td>
+                  </tr>
+                {{end}}
+              {{end}}
+            {{end}}
+          </tbody>
+        </table>
+        <table class="table table-striped">
+          <thead>
+            <tr>
+                <th>Penetration Level</th>
+                <th>Version</th>
+              <th class="text-right">Actual</th>
+            </tr>
+          </thead>
+          <tbody>
+            {{range .versionPenetrations}}
+            <tr>
+                <td>{{.Count}}%</td>
+                <td>&ge; {{.Key}}</td>
+              <td class="text-right">{{.Percentage | printf "%.01f"}}%</td>
+            </tr>
+            {{end}}
+          </tbody>
+        </table>
+      </div>
+
+      <div class="col-md-6">
+        <table class="table table-striped">
+          <thead>
+            <tr>
+              <th>Platform</th>
+              <th class="text-right">Devices</th>
+              <th class="text-right">Share</th>
+            </tr>
+          </thead>
+          <tbody>
+            {{range .platforms}}
+              <tr class="main">
+                <td>{{.Key}}</td>
+                <td class="text-right">{{.Count}}</td>
+                <td class="text-right">{{.Percentage | printf "%.01f"}}%</td>
+              </tr>
+              {{range .Items}}
+                <tr class="child">
+                  <td class="first">{{.Key}}</td>
+                  <td class="text-right">{{.Count}}</td>
+                  <td class="text-right">{{.Percentage | printf "%.01f"}}%</td>
+                </tr>
+              {{end}}
+            {{end}}
+          </tbody>
+        </table>
+      </div>
+
+    </div>
+    <div class="row">
+
+      <div class="col-md-6">
+        <table class="table table-striped">
+          <thead>
+            <tr>
+              <th>Compiler</th>
+              <th class="text-right">Devices</th>
+              <th class="text-right">Share</th>
+            </tr>
+          </thead>
+          <tbody>
+            {{range .compilers}}
+              <tr class="main">
+                <td>{{.Key}}</td>
+                <td class="text-right">{{.Count}}</td>
+                <td class="text-right">{{.Percentage | printf "%.01f"}}%</td>
+              </tr>
+              {{range .Items}}
+                <tr class="child">
+                  <td class="first">{{.Key}}</td>
+                  <td class="text-right">{{.Count}}</td>
+                  <td class="text-right">{{.Percentage | printf "%.01f"}}%</td>
+                </tr>
+              {{end}}
+            {{end}}
+          </tbody>
+        </table>
+      </div>
+
+      <div class="col-md-6">
+        <table class="table table-striped">
+          <thead>
+            <tr>
+              <th>Builder</th>
+              <th class="text-right">Devices</th>
+              <th class="text-right">Share</th>
+            </tr>
+          </thead>
+          <tbody>
+            {{range .builders}}
+            <tr>
+              <td>{{.Key}}</td>
+              <td class="text-right">{{.Count}}</td>
+              <td class="text-right">{{.Percentage | printf "%.01f"}}%</td>
+            </tr>
+            {{end}}
+          </tbody>
+        </table>
+      </div>
+
+    </div>
+
+    <div class="row">
+        <div class="col-md-12">
+            <h4 id="features">Feature Usage</h4>
+            <p>
+                The following lists feature usage. Some features are reported per report, some are per sum of units within report (eg. devices with static addresses among all known devices per report).
+                Currently there are <b>{{.versionNodes.v2}}</b> devices reporting for version 2 and <b>{{.versionNodes.v3}}</b> for version 3.
+            </p>
+        </div>
+    </div>
+
+
+    <div class="row">
+    {{$i := counter}}
+    {{range $featureName := .featureOrder}}
+      {{$featureValues := index $.features $featureName }}
+      {{if $i.DrawTwoDivider}}
+        </div>
+        <div class="row">
+      {{end}}
+      {{ $i.Increment }}
+      <div class="col-md-6">
+          <table class="table table-striped">
+              <thead><tr>
+                  <th>{{$featureName}} Features</th><th colspan="2" class="text-center">Usage</th>
+              </tr></thead>
+              <tbody>
+                  {{range $featureValues}}
+                  <tr>
+                      <td style="width: 50%">{{.Key}} ({{.Version}})</td>
+                      <td style="width: 10%" class="text-right">{{if ge .Pct 10.0}}{{.Pct | printf "%.0f"}}{{else if ge .Pct 1.0}}{{.Pct | printf "%.01f"}}{{else}}{{.Pct | printf "%.02f"}}{{end}}%</td>
+                      <td style="width: 40%" {{if lt .Pct 5.0}}data-toggle="tooltip" title='{{.Count}}'{{end}}>
+                          <div class="progress-bar" role="progressbar" aria-valuenow="{{.Pct | printf "%.02f"}}" aria-valuemin="0" aria-valuemax="100" style="width: {{.Pct | printf "%.02f"}}%; height:20px" {{if ge .Pct 5.0}}data-toggle="tooltip" title='{{.Count}}'{{end}}></div>
+                      </td>
+                  </tr>
+                  {{end}}
+              </tbody>
+          </table>
+      </div>
+    {{end}}
+    </div>
+
+    <div class="row">
+        <div class="col-md-12">
+            <h4 id="features">Feature Group Usage</h4>
+            <p>
+                The following lists feature usage groups, which might include multiple occourances of a feature use per report.
+            </p>
+        </div>
+    </div>
+
+    <div class="row">
+    {{$i := counter}}
+    {{range $featureName := .featureOrder}}
+      {{$featureValues := index $.featureGroups $featureName }}
+      {{if $i.DrawTwoDivider}}
+        </div>
+        <div class="row">
+      {{end}}
+      {{ $i.Increment }}
+      <div class="col-md-6">
+          <table class="table table-striped">
+              <thead><tr>
+                  <th>{{$featureName}} Group Features</th><th colspan="2" class="text-center">Usage</th>
+              </tr></thead>
+              <tbody>
+                  {{range $featureValues}}
+                  {{$counts := .Counts}}
+                  <tr>
+                      <td style="width: 50%">
+                          <div data-toggle="tooltip" title='{{range $key, $value := .Counts}}{{$key}} ({{$value | proportion $counts | printf "%.02f"}}% - {{$value}})</br>{{end}}'>
+                              {{.Key}} ({{.Version}})
+                          </div>
+                      </td>
+                      <td style="width: 50%">
+                          <div class="progress" role="progressbar" style="width: 100%">
+                          {{$j := counter}}
+                          {{range $key, $value := .Counts}}
+                              {{with $valuePct := $value | proportion $counts}}
+                              <div class="progress-bar {{ $j.Current | progressBarClassByIndex }}" style='width: {{$valuePct | printf "%.02f"}}%' data-toggle="tooltip" title='{{$key}} ({{$valuePct | printf "%.02f"}}% - {{$value}})'>
+                                  {{if ge $valuePct 30.0}}{{$key}}{{end}}
+                              </div>
+                              {{end}}
+                              {{ $j.Increment }}
+                          {{end}}
+                          </div>
+                      </td>
+                  </tr>
+                  {{end}}
+              </tbody>
+          </table>
+      </div>
+    {{end}}
+    </div>
+     <div class="row">
+      <div class="col-md-12">
+        <h1 id="performance-charts">Historical Performance Data</h1>
+        <p>These charts are all the average of the corresponding metric, for the entire population of a given day.</p>
+
+        <h4 id="hash-performance">Hash Performance (MiB/s)</h4>
+        <div class="img-thumbnail" id="hashPerfChart" style="width: 1130px; height: 400px; padding: 10px;"></div>
+
+        <h4 id="memory-usage">Memory Usage (MiB)</h4>
+        <div class="img-thumbnail" id="memUsageChart" style="width: 1130px; height: 400px; padding: 10px;"></div>
+
+        <h4 id="total-files">Total Number of Files</h4>
+        <div class="img-thumbnail" id="totFilesChart" style="width: 1130px; height: 400px; padding: 10px;"></div>
+
+        <h4 id="total-size">Total Folder Size (GiB)</h4>
+        <div class="img-thumbnail" id="totMiBChart" style="width: 1130px; height: 400px; padding: 10px;"></div>
+
+        <h4 id="system-ram">System RAM Size (GiB)</h4>
+        <div class="img-thumbnail" id="memSizeChart" style="width: 1130px; height: 400px; padding: 10px;"></div>
+      </div>
+    </div>
+  </div>
+  <hr>
+  <p>
+    This product includes GeoLite2 data created by MaxMind, available from
+    <a href="http://www.maxmind.com">http://www.maxmind.com</a>.
+  </p>
+  <script type="text/javascript">
+    $('[data-toggle="tooltip"]').tooltip({html:true});
+  </script>
+</body>
+</html>

+ 8 - 0
vendor/github.com/lib/pq/LICENSE.md

@@ -0,0 +1,8 @@
+Copyright (c) 2011-2013, 'pq' Contributors
+Portions Copyright (C) 2011 Blake Mizerany
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

+ 756 - 0
vendor/github.com/lib/pq/array.go

@@ -0,0 +1,756 @@
+package pq
+
+import (
+	"bytes"
+	"database/sql"
+	"database/sql/driver"
+	"encoding/hex"
+	"fmt"
+	"reflect"
+	"strconv"
+	"strings"
+)
+
+var typeByteSlice = reflect.TypeOf([]byte{})
+var typeDriverValuer = reflect.TypeOf((*driver.Valuer)(nil)).Elem()
+var typeSQLScanner = reflect.TypeOf((*sql.Scanner)(nil)).Elem()
+
+// Array returns the optimal driver.Valuer and sql.Scanner for an array or
+// slice of any dimension.
+//
+// For example:
+//  db.Query(`SELECT * FROM t WHERE id = ANY($1)`, pq.Array([]int{235, 401}))
+//
+//  var x []sql.NullInt64
+//  db.QueryRow('SELECT ARRAY[235, 401]').Scan(pq.Array(&x))
+//
+// Scanning multi-dimensional arrays is not supported.  Arrays where the lower
+// bound is not one (such as `[0:0]={1}') are not supported.
+func Array(a interface{}) interface {
+	driver.Valuer
+	sql.Scanner
+} {
+	switch a := a.(type) {
+	case []bool:
+		return (*BoolArray)(&a)
+	case []float64:
+		return (*Float64Array)(&a)
+	case []int64:
+		return (*Int64Array)(&a)
+	case []string:
+		return (*StringArray)(&a)
+
+	case *[]bool:
+		return (*BoolArray)(a)
+	case *[]float64:
+		return (*Float64Array)(a)
+	case *[]int64:
+		return (*Int64Array)(a)
+	case *[]string:
+		return (*StringArray)(a)
+	}
+
+	return GenericArray{a}
+}
+
+// ArrayDelimiter may be optionally implemented by driver.Valuer or sql.Scanner
+// to override the array delimiter used by GenericArray.
+type ArrayDelimiter interface {
+	// ArrayDelimiter returns the delimiter character(s) for this element's type.
+	ArrayDelimiter() string
+}
+
+// BoolArray represents a one-dimensional array of the PostgreSQL boolean type.
+type BoolArray []bool
+
+// Scan implements the sql.Scanner interface.
+func (a *BoolArray) Scan(src interface{}) error {
+	switch src := src.(type) {
+	case []byte:
+		return a.scanBytes(src)
+	case string:
+		return a.scanBytes([]byte(src))
+	case nil:
+		*a = nil
+		return nil
+	}
+
+	return fmt.Errorf("pq: cannot convert %T to BoolArray", src)
+}
+
+func (a *BoolArray) scanBytes(src []byte) error {
+	elems, err := scanLinearArray(src, []byte{','}, "BoolArray")
+	if err != nil {
+		return err
+	}
+	if *a != nil && len(elems) == 0 {
+		*a = (*a)[:0]
+	} else {
+		b := make(BoolArray, len(elems))
+		for i, v := range elems {
+			if len(v) != 1 {
+				return fmt.Errorf("pq: could not parse boolean array index %d: invalid boolean %q", i, v)
+			}
+			switch v[0] {
+			case 't':
+				b[i] = true
+			case 'f':
+				b[i] = false
+			default:
+				return fmt.Errorf("pq: could not parse boolean array index %d: invalid boolean %q", i, v)
+			}
+		}
+		*a = b
+	}
+	return nil
+}
+
+// Value implements the driver.Valuer interface.
+func (a BoolArray) Value() (driver.Value, error) {
+	if a == nil {
+		return nil, nil
+	}
+
+	if n := len(a); n > 0 {
+		// There will be exactly two curly brackets, N bytes of values,
+		// and N-1 bytes of delimiters.
+		b := make([]byte, 1+2*n)
+
+		for i := 0; i < n; i++ {
+			b[2*i] = ','
+			if a[i] {
+				b[1+2*i] = 't'
+			} else {
+				b[1+2*i] = 'f'
+			}
+		}
+
+		b[0] = '{'
+		b[2*n] = '}'
+
+		return string(b), nil
+	}
+
+	return "{}", nil
+}
+
+// ByteaArray represents a one-dimensional array of the PostgreSQL bytea type.
+type ByteaArray [][]byte
+
+// Scan implements the sql.Scanner interface.
+func (a *ByteaArray) Scan(src interface{}) error {
+	switch src := src.(type) {
+	case []byte:
+		return a.scanBytes(src)
+	case string:
+		return a.scanBytes([]byte(src))
+	case nil:
+		*a = nil
+		return nil
+	}
+
+	return fmt.Errorf("pq: cannot convert %T to ByteaArray", src)
+}
+
+func (a *ByteaArray) scanBytes(src []byte) error {
+	elems, err := scanLinearArray(src, []byte{','}, "ByteaArray")
+	if err != nil {
+		return err
+	}
+	if *a != nil && len(elems) == 0 {
+		*a = (*a)[:0]
+	} else {
+		b := make(ByteaArray, len(elems))
+		for i, v := range elems {
+			b[i], err = parseBytea(v)
+			if err != nil {
+				return fmt.Errorf("could not parse bytea array index %d: %s", i, err.Error())
+			}
+		}
+		*a = b
+	}
+	return nil
+}
+
+// Value implements the driver.Valuer interface. It uses the "hex" format which
+// is only supported on PostgreSQL 9.0 or newer.
+func (a ByteaArray) Value() (driver.Value, error) {
+	if a == nil {
+		return nil, nil
+	}
+
+	if n := len(a); n > 0 {
+		// There will be at least two curly brackets, 2*N bytes of quotes,
+		// 3*N bytes of hex formatting, and N-1 bytes of delimiters.
+		size := 1 + 6*n
+		for _, x := range a {
+			size += hex.EncodedLen(len(x))
+		}
+
+		b := make([]byte, size)
+
+		for i, s := 0, b; i < n; i++ {
+			o := copy(s, `,"\\x`)
+			o += hex.Encode(s[o:], a[i])
+			s[o] = '"'
+			s = s[o+1:]
+		}
+
+		b[0] = '{'
+		b[size-1] = '}'
+
+		return string(b), nil
+	}
+
+	return "{}", nil
+}
+
+// Float64Array represents a one-dimensional array of the PostgreSQL double
+// precision type.
+type Float64Array []float64
+
+// Scan implements the sql.Scanner interface.
+func (a *Float64Array) Scan(src interface{}) error {
+	switch src := src.(type) {
+	case []byte:
+		return a.scanBytes(src)
+	case string:
+		return a.scanBytes([]byte(src))
+	case nil:
+		*a = nil
+		return nil
+	}
+
+	return fmt.Errorf("pq: cannot convert %T to Float64Array", src)
+}
+
+func (a *Float64Array) scanBytes(src []byte) error {
+	elems, err := scanLinearArray(src, []byte{','}, "Float64Array")
+	if err != nil {
+		return err
+	}
+	if *a != nil && len(elems) == 0 {
+		*a = (*a)[:0]
+	} else {
+		b := make(Float64Array, len(elems))
+		for i, v := range elems {
+			if b[i], err = strconv.ParseFloat(string(v), 64); err != nil {
+				return fmt.Errorf("pq: parsing array element index %d: %v", i, err)
+			}
+		}
+		*a = b
+	}
+	return nil
+}
+
+// Value implements the driver.Valuer interface.
+func (a Float64Array) Value() (driver.Value, error) {
+	if a == nil {
+		return nil, nil
+	}
+
+	if n := len(a); n > 0 {
+		// There will be at least two curly brackets, N bytes of values,
+		// and N-1 bytes of delimiters.
+		b := make([]byte, 1, 1+2*n)
+		b[0] = '{'
+
+		b = strconv.AppendFloat(b, a[0], 'f', -1, 64)
+		for i := 1; i < n; i++ {
+			b = append(b, ',')
+			b = strconv.AppendFloat(b, a[i], 'f', -1, 64)
+		}
+
+		return string(append(b, '}')), nil
+	}
+
+	return "{}", nil
+}
+
+// GenericArray implements the driver.Valuer and sql.Scanner interfaces for
+// an array or slice of any dimension.
+type GenericArray struct{ A interface{} }
+
+func (GenericArray) evaluateDestination(rt reflect.Type) (reflect.Type, func([]byte, reflect.Value) error, string) {
+	var assign func([]byte, reflect.Value) error
+	var del = ","
+
+	// TODO calculate the assign function for other types
+	// TODO repeat this section on the element type of arrays or slices (multidimensional)
+	{
+		if reflect.PtrTo(rt).Implements(typeSQLScanner) {
+			// dest is always addressable because it is an element of a slice.
+			assign = func(src []byte, dest reflect.Value) (err error) {
+				ss := dest.Addr().Interface().(sql.Scanner)
+				if src == nil {
+					err = ss.Scan(nil)
+				} else {
+					err = ss.Scan(src)
+				}
+				return
+			}
+			goto FoundType
+		}
+
+		assign = func([]byte, reflect.Value) error {
+			return fmt.Errorf("pq: scanning to %s is not implemented; only sql.Scanner", rt)
+		}
+	}
+
+FoundType:
+
+	if ad, ok := reflect.Zero(rt).Interface().(ArrayDelimiter); ok {
+		del = ad.ArrayDelimiter()
+	}
+
+	return rt, assign, del
+}
+
+// Scan implements the sql.Scanner interface.
+func (a GenericArray) Scan(src interface{}) error {
+	dpv := reflect.ValueOf(a.A)
+	switch {
+	case dpv.Kind() != reflect.Ptr:
+		return fmt.Errorf("pq: destination %T is not a pointer to array or slice", a.A)
+	case dpv.IsNil():
+		return fmt.Errorf("pq: destination %T is nil", a.A)
+	}
+
+	dv := dpv.Elem()
+	switch dv.Kind() {
+	case reflect.Slice:
+	case reflect.Array:
+	default:
+		return fmt.Errorf("pq: destination %T is not a pointer to array or slice", a.A)
+	}
+
+	switch src := src.(type) {
+	case []byte:
+		return a.scanBytes(src, dv)
+	case string:
+		return a.scanBytes([]byte(src), dv)
+	case nil:
+		if dv.Kind() == reflect.Slice {
+			dv.Set(reflect.Zero(dv.Type()))
+			return nil
+		}
+	}
+
+	return fmt.Errorf("pq: cannot convert %T to %s", src, dv.Type())
+}
+
+func (a GenericArray) scanBytes(src []byte, dv reflect.Value) error {
+	dtype, assign, del := a.evaluateDestination(dv.Type().Elem())
+	dims, elems, err := parseArray(src, []byte(del))
+	if err != nil {
+		return err
+	}
+
+	// TODO allow multidimensional
+
+	if len(dims) > 1 {
+		return fmt.Errorf("pq: scanning from multidimensional ARRAY%s is not implemented",
+			strings.Replace(fmt.Sprint(dims), " ", "][", -1))
+	}
+
+	// Treat a zero-dimensional array like an array with a single dimension of zero.
+	if len(dims) == 0 {
+		dims = append(dims, 0)
+	}
+
+	for i, rt := 0, dv.Type(); i < len(dims); i, rt = i+1, rt.Elem() {
+		switch rt.Kind() {
+		case reflect.Slice:
+		case reflect.Array:
+			if rt.Len() != dims[i] {
+				return fmt.Errorf("pq: cannot convert ARRAY%s to %s",
+					strings.Replace(fmt.Sprint(dims), " ", "][", -1), dv.Type())
+			}
+		default:
+			// TODO handle multidimensional
+		}
+	}
+
+	values := reflect.MakeSlice(reflect.SliceOf(dtype), len(elems), len(elems))
+	for i, e := range elems {
+		if err := assign(e, values.Index(i)); err != nil {
+			return fmt.Errorf("pq: parsing array element index %d: %v", i, err)
+		}
+	}
+
+	// TODO handle multidimensional
+
+	switch dv.Kind() {
+	case reflect.Slice:
+		dv.Set(values.Slice(0, dims[0]))
+	case reflect.Array:
+		for i := 0; i < dims[0]; i++ {
+			dv.Index(i).Set(values.Index(i))
+		}
+	}
+
+	return nil
+}
+
+// Value implements the driver.Valuer interface.
+func (a GenericArray) Value() (driver.Value, error) {
+	if a.A == nil {
+		return nil, nil
+	}
+
+	rv := reflect.ValueOf(a.A)
+
+	switch rv.Kind() {
+	case reflect.Slice:
+		if rv.IsNil() {
+			return nil, nil
+		}
+	case reflect.Array:
+	default:
+		return nil, fmt.Errorf("pq: Unable to convert %T to array", a.A)
+	}
+
+	if n := rv.Len(); n > 0 {
+		// There will be at least two curly brackets, N bytes of values,
+		// and N-1 bytes of delimiters.
+		b := make([]byte, 0, 1+2*n)
+
+		b, _, err := appendArray(b, rv, n)
+		return string(b), err
+	}
+
+	return "{}", nil
+}
+
+// Int64Array represents a one-dimensional array of the PostgreSQL integer types.
+type Int64Array []int64
+
+// Scan implements the sql.Scanner interface.
+func (a *Int64Array) Scan(src interface{}) error {
+	switch src := src.(type) {
+	case []byte:
+		return a.scanBytes(src)
+	case string:
+		return a.scanBytes([]byte(src))
+	case nil:
+		*a = nil
+		return nil
+	}
+
+	return fmt.Errorf("pq: cannot convert %T to Int64Array", src)
+}
+
+func (a *Int64Array) scanBytes(src []byte) error {
+	elems, err := scanLinearArray(src, []byte{','}, "Int64Array")
+	if err != nil {
+		return err
+	}
+	if *a != nil && len(elems) == 0 {
+		*a = (*a)[:0]
+	} else {
+		b := make(Int64Array, len(elems))
+		for i, v := range elems {
+			if b[i], err = strconv.ParseInt(string(v), 10, 64); err != nil {
+				return fmt.Errorf("pq: parsing array element index %d: %v", i, err)
+			}
+		}
+		*a = b
+	}
+	return nil
+}
+
+// Value implements the driver.Valuer interface.
+func (a Int64Array) Value() (driver.Value, error) {
+	if a == nil {
+		return nil, nil
+	}
+
+	if n := len(a); n > 0 {
+		// There will be at least two curly brackets, N bytes of values,
+		// and N-1 bytes of delimiters.
+		b := make([]byte, 1, 1+2*n)
+		b[0] = '{'
+
+		b = strconv.AppendInt(b, a[0], 10)
+		for i := 1; i < n; i++ {
+			b = append(b, ',')
+			b = strconv.AppendInt(b, a[i], 10)
+		}
+
+		return string(append(b, '}')), nil
+	}
+
+	return "{}", nil
+}
+
+// StringArray represents a one-dimensional array of the PostgreSQL character types.
+type StringArray []string
+
+// Scan implements the sql.Scanner interface.
+func (a *StringArray) Scan(src interface{}) error {
+	switch src := src.(type) {
+	case []byte:
+		return a.scanBytes(src)
+	case string:
+		return a.scanBytes([]byte(src))
+	case nil:
+		*a = nil
+		return nil
+	}
+
+	return fmt.Errorf("pq: cannot convert %T to StringArray", src)
+}
+
+func (a *StringArray) scanBytes(src []byte) error {
+	elems, err := scanLinearArray(src, []byte{','}, "StringArray")
+	if err != nil {
+		return err
+	}
+	if *a != nil && len(elems) == 0 {
+		*a = (*a)[:0]
+	} else {
+		b := make(StringArray, len(elems))
+		for i, v := range elems {
+			if b[i] = string(v); v == nil {
+				return fmt.Errorf("pq: parsing array element index %d: cannot convert nil to string", i)
+			}
+		}
+		*a = b
+	}
+	return nil
+}
+
+// Value implements the driver.Valuer interface.
+func (a StringArray) Value() (driver.Value, error) {
+	if a == nil {
+		return nil, nil
+	}
+
+	if n := len(a); n > 0 {
+		// There will be at least two curly brackets, 2*N bytes of quotes,
+		// and N-1 bytes of delimiters.
+		b := make([]byte, 1, 1+3*n)
+		b[0] = '{'
+
+		b = appendArrayQuotedBytes(b, []byte(a[0]))
+		for i := 1; i < n; i++ {
+			b = append(b, ',')
+			b = appendArrayQuotedBytes(b, []byte(a[i]))
+		}
+
+		return string(append(b, '}')), nil
+	}
+
+	return "{}", nil
+}
+
+// appendArray appends rv to the buffer, returning the extended buffer and
+// the delimiter used between elements.
+//
+// It panics when n <= 0 or rv's Kind is not reflect.Array nor reflect.Slice.
+func appendArray(b []byte, rv reflect.Value, n int) ([]byte, string, error) {
+	var del string
+	var err error
+
+	b = append(b, '{')
+
+	if b, del, err = appendArrayElement(b, rv.Index(0)); err != nil {
+		return b, del, err
+	}
+
+	for i := 1; i < n; i++ {
+		b = append(b, del...)
+		if b, del, err = appendArrayElement(b, rv.Index(i)); err != nil {
+			return b, del, err
+		}
+	}
+
+	return append(b, '}'), del, nil
+}
+
+// appendArrayElement appends rv to the buffer, returning the extended buffer
+// and the delimiter to use before the next element.
+//
+// When rv's Kind is neither reflect.Array nor reflect.Slice, it is converted
+// using driver.DefaultParameterConverter and the resulting []byte or string
+// is double-quoted.
+//
+// See http://www.postgresql.org/docs/current/static/arrays.html#ARRAYS-IO
+func appendArrayElement(b []byte, rv reflect.Value) ([]byte, string, error) {
+	if k := rv.Kind(); k == reflect.Array || k == reflect.Slice {
+		if t := rv.Type(); t != typeByteSlice && !t.Implements(typeDriverValuer) {
+			if n := rv.Len(); n > 0 {
+				return appendArray(b, rv, n)
+			}
+
+			return b, "", nil
+		}
+	}
+
+	var del = ","
+	var err error
+	var iv interface{} = rv.Interface()
+
+	if ad, ok := iv.(ArrayDelimiter); ok {
+		del = ad.ArrayDelimiter()
+	}
+
+	if iv, err = driver.DefaultParameterConverter.ConvertValue(iv); err != nil {
+		return b, del, err
+	}
+
+	switch v := iv.(type) {
+	case nil:
+		return append(b, "NULL"...), del, nil
+	case []byte:
+		return appendArrayQuotedBytes(b, v), del, nil
+	case string:
+		return appendArrayQuotedBytes(b, []byte(v)), del, nil
+	}
+
+	b, err = appendValue(b, iv)
+	return b, del, err
+}
+
+func appendArrayQuotedBytes(b, v []byte) []byte {
+	b = append(b, '"')
+	for {
+		i := bytes.IndexAny(v, `"\`)
+		if i < 0 {
+			b = append(b, v...)
+			break
+		}
+		if i > 0 {
+			b = append(b, v[:i]...)
+		}
+		b = append(b, '\\', v[i])
+		v = v[i+1:]
+	}
+	return append(b, '"')
+}
+
+func appendValue(b []byte, v driver.Value) ([]byte, error) {
+	return append(b, encode(nil, v, 0)...), nil
+}
+
+// parseArray extracts the dimensions and elements of an array represented in
+// text format. Only representations emitted by the backend are supported.
+// Notably, whitespace around brackets and delimiters is significant, and NULL
+// is case-sensitive.
+//
+// See http://www.postgresql.org/docs/current/static/arrays.html#ARRAYS-IO
+func parseArray(src, del []byte) (dims []int, elems [][]byte, err error) {
+	var depth, i int
+
+	if len(src) < 1 || src[0] != '{' {
+		return nil, nil, fmt.Errorf("pq: unable to parse array; expected %q at offset %d", '{', 0)
+	}
+
+Open:
+	for i < len(src) {
+		switch src[i] {
+		case '{':
+			depth++
+			i++
+		case '}':
+			elems = make([][]byte, 0)
+			goto Close
+		default:
+			break Open
+		}
+	}
+	dims = make([]int, i)
+
+Element:
+	for i < len(src) {
+		switch src[i] {
+		case '{':
+			if depth == len(dims) {
+				break Element
+			}
+			depth++
+			dims[depth-1] = 0
+			i++
+		case '"':
+			var elem = []byte{}
+			var escape bool
+			for i++; i < len(src); i++ {
+				if escape {
+					elem = append(elem, src[i])
+					escape = false
+				} else {
+					switch src[i] {
+					default:
+						elem = append(elem, src[i])
+					case '\\':
+						escape = true
+					case '"':
+						elems = append(elems, elem)
+						i++
+						break Element
+					}
+				}
+			}
+		default:
+			for start := i; i < len(src); i++ {
+				if bytes.HasPrefix(src[i:], del) || src[i] == '}' {
+					elem := src[start:i]
+					if len(elem) == 0 {
+						return nil, nil, fmt.Errorf("pq: unable to parse array; unexpected %q at offset %d", src[i], i)
+					}
+					if bytes.Equal(elem, []byte("NULL")) {
+						elem = nil
+					}
+					elems = append(elems, elem)
+					break Element
+				}
+			}
+		}
+	}
+
+	for i < len(src) {
+		if bytes.HasPrefix(src[i:], del) && depth > 0 {
+			dims[depth-1]++
+			i += len(del)
+			goto Element
+		} else if src[i] == '}' && depth > 0 {
+			dims[depth-1]++
+			depth--
+			i++
+		} else {
+			return nil, nil, fmt.Errorf("pq: unable to parse array; unexpected %q at offset %d", src[i], i)
+		}
+	}
+
+Close:
+	for i < len(src) {
+		if src[i] == '}' && depth > 0 {
+			depth--
+			i++
+		} else {
+			return nil, nil, fmt.Errorf("pq: unable to parse array; unexpected %q at offset %d", src[i], i)
+		}
+	}
+	if depth > 0 {
+		err = fmt.Errorf("pq: unable to parse array; expected %q at offset %d", '}', i)
+	}
+	if err == nil {
+		for _, d := range dims {
+			if (len(elems) % d) != 0 {
+				err = fmt.Errorf("pq: multidimensional arrays must have elements with matching dimensions")
+			}
+		}
+	}
+	return
+}
+
+func scanLinearArray(src, del []byte, typ string) (elems [][]byte, err error) {
+	dims, elems, err := parseArray(src, del)
+	if err != nil {
+		return nil, err
+	}
+	if len(dims) > 1 {
+		return nil, fmt.Errorf("pq: cannot convert ARRAY%s to %s", strings.Replace(fmt.Sprint(dims), " ", "][", -1), typ)
+	}
+	return elems, err
+}

+ 91 - 0
vendor/github.com/lib/pq/buf.go

@@ -0,0 +1,91 @@
+package pq
+
+import (
+	"bytes"
+	"encoding/binary"
+
+	"github.com/lib/pq/oid"
+)
+
+type readBuf []byte
+
+func (b *readBuf) int32() (n int) {
+	n = int(int32(binary.BigEndian.Uint32(*b)))
+	*b = (*b)[4:]
+	return
+}
+
+func (b *readBuf) oid() (n oid.Oid) {
+	n = oid.Oid(binary.BigEndian.Uint32(*b))
+	*b = (*b)[4:]
+	return
+}
+
+// N.B: this is actually an unsigned 16-bit integer, unlike int32
+func (b *readBuf) int16() (n int) {
+	n = int(binary.BigEndian.Uint16(*b))
+	*b = (*b)[2:]
+	return
+}
+
+func (b *readBuf) string() string {
+	i := bytes.IndexByte(*b, 0)
+	if i < 0 {
+		errorf("invalid message format; expected string terminator")
+	}
+	s := (*b)[:i]
+	*b = (*b)[i+1:]
+	return string(s)
+}
+
+func (b *readBuf) next(n int) (v []byte) {
+	v = (*b)[:n]
+	*b = (*b)[n:]
+	return
+}
+
+func (b *readBuf) byte() byte {
+	return b.next(1)[0]
+}
+
+type writeBuf struct {
+	buf []byte
+	pos int
+}
+
+func (b *writeBuf) int32(n int) {
+	x := make([]byte, 4)
+	binary.BigEndian.PutUint32(x, uint32(n))
+	b.buf = append(b.buf, x...)
+}
+
+func (b *writeBuf) int16(n int) {
+	x := make([]byte, 2)
+	binary.BigEndian.PutUint16(x, uint16(n))
+	b.buf = append(b.buf, x...)
+}
+
+func (b *writeBuf) string(s string) {
+	b.buf = append(b.buf, (s + "\000")...)
+}
+
+func (b *writeBuf) byte(c byte) {
+	b.buf = append(b.buf, c)
+}
+
+func (b *writeBuf) bytes(v []byte) {
+	b.buf = append(b.buf, v...)
+}
+
+func (b *writeBuf) wrap() []byte {
+	p := b.buf[b.pos:]
+	binary.BigEndian.PutUint32(p, uint32(len(p)))
+	return b.buf
+}
+
+func (b *writeBuf) next(c byte) {
+	p := b.buf[b.pos:]
+	binary.BigEndian.PutUint32(p, uint32(len(p)))
+	b.pos = len(b.buf) + 1
+	b.buf = append(b.buf, c, 0, 0, 0, 0)
+}

+ 1854 - 0
vendor/github.com/lib/pq/conn.go

@@ -0,0 +1,1854 @@
+package pq
+
+import (
+	"bufio"
+	"crypto/md5"
+	"database/sql"
+	"database/sql/driver"
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"io"
+	"net"
+	"os"
+	"os/user"
+	"path"
+	"path/filepath"
+	"strconv"
+	"strings"
+	"time"
+	"unicode"
+
+	"github.com/lib/pq/oid"
+)
+
+// Common error types
+var (
+	ErrNotSupported              = errors.New("pq: Unsupported command")
+	ErrInFailedTransaction       = errors.New("pq: Could not complete operation in a failed transaction")
+	ErrSSLNotSupported           = errors.New("pq: SSL is not enabled on the server")
+	ErrSSLKeyHasWorldPermissions = errors.New("pq: Private key file has group or world access. Permissions should be u=rw (0600) or less")
+	ErrCouldNotDetectUsername    = errors.New("pq: Could not detect default username. Please provide one explicitly")
+
+	errUnexpectedReady = errors.New("unexpected ReadyForQuery")
+	errNoRowsAffected  = errors.New("no RowsAffected available after the empty statement")
+	errNoLastInsertID  = errors.New("no LastInsertId available after the empty statement")
+)
+
+// Driver is the Postgres database driver.
+type Driver struct{}
+
+// Open opens a new connection to the database. name is a connection string.
+// Most users should only use it through database/sql package from the standard
+// library.
+func (d *Driver) Open(name string) (driver.Conn, error) {
+	return Open(name)
+}
+
+func init() {
+	sql.Register("postgres", &Driver{})
+}
+
+type parameterStatus struct {
+	// server version in the same format as server_version_num, or 0 if
+	// unavailable
+	serverVersion int
+
+	// the current location based on the TimeZone value of the session, if
+	// available
+	currentLocation *time.Location
+}
+
+type transactionStatus byte
+
+const (
+	txnStatusIdle                transactionStatus = 'I'
+	txnStatusIdleInTransaction   transactionStatus = 'T'
+	txnStatusInFailedTransaction transactionStatus = 'E'
+)
+
+func (s transactionStatus) String() string {
+	switch s {
+	case txnStatusIdle:
+		return "idle"
+	case txnStatusIdleInTransaction:
+		return "idle in transaction"
+	case txnStatusInFailedTransaction:
+		return "in a failed transaction"
+	default:
+		errorf("unknown transactionStatus %d", s)
+	}
+
+	panic("not reached")
+}
+
+// Dialer is the dialer interface. It can be used to obtain more control over
+// how pq creates network connections.
+type Dialer interface {
+	Dial(network, address string) (net.Conn, error)
+	DialTimeout(network, address string, timeout time.Duration) (net.Conn, error)
+}
+
+type defaultDialer struct{}
+
+func (d defaultDialer) Dial(ntw, addr string) (net.Conn, error) {
+	return net.Dial(ntw, addr)
+}
+func (d defaultDialer) DialTimeout(ntw, addr string, timeout time.Duration) (net.Conn, error) {
+	return net.DialTimeout(ntw, addr, timeout)
+}
+
+type conn struct {
+	c         net.Conn
+	buf       *bufio.Reader
+	namei     int
+	scratch   [512]byte
+	txnStatus transactionStatus
+	txnFinish func()
+
+	// Save connection arguments to use during CancelRequest.
+	dialer Dialer
+	opts   values
+
+	// Cancellation key data for use with CancelRequest messages.
+	processID int
+	secretKey int
+
+	parameterStatus parameterStatus
+
+	saveMessageType   byte
+	saveMessageBuffer []byte
+
+	// If true, this connection is bad and all public-facing functions should
+	// return ErrBadConn.
+	bad bool
+
+	// If set, this connection should never use the binary format when
+	// receiving query results from prepared statements.  Only provided for
+	// debugging.
+	disablePreparedBinaryResult bool
+
+	// Whether to always send []byte parameters over as binary.  Enables single
+	// round-trip mode for non-prepared Query calls.
+	binaryParameters bool
+
+	// If true this connection is in the middle of a COPY
+	inCopy bool
+}
+
+// Handle driver-side settings in parsed connection string.
+func (cn *conn) handleDriverSettings(o values) (err error) {
+	boolSetting := func(key string, val *bool) error {
+		if value, ok := o[key]; ok {
+			if value == "yes" {
+				*val = true
+			} else if value == "no" {
+				*val = false
+			} else {
+				return fmt.Errorf("unrecognized value %q for %s", value, key)
+			}
+		}
+		return nil
+	}
+
+	err = boolSetting("disable_prepared_binary_result", &cn.disablePreparedBinaryResult)
+	if err != nil {
+		return err
+	}
+	return boolSetting("binary_parameters", &cn.binaryParameters)
+}
+
+func (cn *conn) handlePgpass(o values) {
+	// if a password was supplied, do not process .pgpass
+	if _, ok := o["password"]; ok {
+		return
+	}
+	filename := os.Getenv("PGPASSFILE")
+	if filename == "" {
+		// XXX this code doesn't work on Windows where the default filename is
+		// XXX %APPDATA%\postgresql\pgpass.conf
+		// Prefer $HOME over user.Current due to glibc bug: golang.org/issue/13470
+		userHome := os.Getenv("HOME")
+		if userHome == "" {
+			user, err := user.Current()
+			if err != nil {
+				return
+			}
+			userHome = user.HomeDir
+		}
+		filename = filepath.Join(userHome, ".pgpass")
+	}
+	fileinfo, err := os.Stat(filename)
+	if err != nil {
+		return
+	}
+	mode := fileinfo.Mode()
+	if mode&(0x77) != 0 {
+		// XXX should warn about incorrect .pgpass permissions as psql does
+		return
+	}
+	file, err := os.Open(filename)
+	if err != nil {
+		return
+	}
+	defer file.Close()
+	scanner := bufio.NewScanner(io.Reader(file))
+	hostname := o["host"]
+	ntw, _ := network(o)
+	port := o["port"]
+	db := o["dbname"]
+	username := o["user"]
+	// From: https://github.com/tg/pgpass/blob/master/reader.go
+	getFields := func(s string) []string {
+		fs := make([]string, 0, 5)
+		f := make([]rune, 0, len(s))
+
+		var esc bool
+		for _, c := range s {
+			switch {
+			case esc:
+				f = append(f, c)
+				esc = false
+			case c == '\\':
+				esc = true
+			case c == ':':
+				fs = append(fs, string(f))
+				f = f[:0]
+			default:
+				f = append(f, c)
+			}
+		}
+		return append(fs, string(f))
+	}
+	for scanner.Scan() {
+		line := scanner.Text()
+		if len(line) == 0 || line[0] == '#' {
+			continue
+		}
+		split := getFields(line)
+		if len(split) != 5 {
+			continue
+		}
+		if (split[0] == "*" || split[0] == hostname || (split[0] == "localhost" && (hostname == "" || ntw == "unix"))) && (split[1] == "*" || split[1] == port) && (split[2] == "*" || split[2] == db) && (split[3] == "*" || split[3] == username) {
+			o["password"] = split[4]
+			return
+		}
+	}
+}
+
+func (cn *conn) writeBuf(b byte) *writeBuf {
+	cn.scratch[0] = b
+	return &writeBuf{
+		buf: cn.scratch[:5],
+		pos: 1,
+	}
+}
+
+// Open opens a new connection to the database. name is a connection string.
+// Most users should only use it through database/sql package from the standard
+// library.
+func Open(name string) (_ driver.Conn, err error) {
+	return DialOpen(defaultDialer{}, name)
+}
+
+// DialOpen opens a new connection to the database using a dialer.
+func DialOpen(d Dialer, name string) (_ driver.Conn, err error) {
+	// Handle any panics during connection initialization.  Note that we
+	// specifically do *not* want to use errRecover(), as that would turn any
+	// connection errors into ErrBadConns, hiding the real error message from
+	// the user.
+	defer errRecoverNoErrBadConn(&err)
+
+	o := make(values)
+
+	// A number of defaults are applied here, in this order:
+	//
+	// * Very low precedence defaults applied in every situation
+	// * Environment variables
+	// * Explicitly passed connection information
+	o["host"] = "localhost"
+	o["port"] = "5432"
+	// N.B.: Extra float digits should be set to 3, but that breaks
+	// Postgres 8.4 and older, where the max is 2.
+	o["extra_float_digits"] = "2"
+	for k, v := range parseEnviron(os.Environ()) {
+		o[k] = v
+	}
+
+	if strings.HasPrefix(name, "postgres://") || strings.HasPrefix(name, "postgresql://") {
+		name, err = ParseURL(name)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	if err := parseOpts(name, o); err != nil {
+		return nil, err
+	}
+
+	// Use the "fallback" application name if necessary
+	if fallback, ok := o["fallback_application_name"]; ok {
+		if _, ok := o["application_name"]; !ok {
+			o["application_name"] = fallback
+		}
+	}
+
+	// We can't work with any client_encoding other than UTF-8 currently.
+	// However, we have historically allowed the user to set it to UTF-8
+	// explicitly, and there's no reason to break such programs, so allow that.
+	// Note that the "options" setting could also set client_encoding, but
+	// parsing its value is not worth it.  Instead, we always explicitly send
+	// client_encoding as a separate run-time parameter, which should override
+	// anything set in options.
+	if enc, ok := o["client_encoding"]; ok && !isUTF8(enc) {
+		return nil, errors.New("client_encoding must be absent or 'UTF8'")
+	}
+	o["client_encoding"] = "UTF8"
+	// DateStyle needs a similar treatment.
+	if datestyle, ok := o["datestyle"]; ok {
+		if datestyle != "ISO, MDY" {
+			panic(fmt.Sprintf("setting datestyle must be absent or %v; got %v",
+				"ISO, MDY", datestyle))
+		}
+	} else {
+		o["datestyle"] = "ISO, MDY"
+	}
+
+	// If a user is not provided by any other means, the last
+	// resort is to use the current operating system provided user
+	// name.
+	if _, ok := o["user"]; !ok {
+		u, err := userCurrent()
+		if err != nil {
+			return nil, err
+		}
+		o["user"] = u
+	}
+
+	cn := &conn{
+		opts:   o,
+		dialer: d,
+	}
+	err = cn.handleDriverSettings(o)
+	if err != nil {
+		return nil, err
+	}
+	cn.handlePgpass(o)
+
+	cn.c, err = dial(d, o)
+	if err != nil {
+		return nil, err
+	}
+
+	err = cn.ssl(o)
+	if err != nil {
+		return nil, err
+	}
+
+	// cn.startup panics on error. Make sure we don't leak cn.c.
+	panicking := true
+	defer func() {
+		if panicking {
+			cn.c.Close()
+		}
+	}()
+
+	cn.buf = bufio.NewReader(cn.c)
+	cn.startup(o)
+
+	// reset the deadline, in case one was set (see dial)
+	if timeout, ok := o["connect_timeout"]; ok && timeout != "0" {
+		err = cn.c.SetDeadline(time.Time{})
+	}
+	panicking = false
+	return cn, err
+}
+
+func dial(d Dialer, o values) (net.Conn, error) {
+	ntw, addr := network(o)
+	// SSL is not necessary or supported over UNIX domain sockets
+	if ntw == "unix" {
+		o["sslmode"] = "disable"
+	}
+
+	// Zero or not specified means wait indefinitely.
+	if timeout, ok := o["connect_timeout"]; ok && timeout != "0" {
+		seconds, err := strconv.ParseInt(timeout, 10, 0)
+		if err != nil {
+			return nil, fmt.Errorf("invalid value for parameter connect_timeout: %s", err)
+		}
+		duration := time.Duration(seconds) * time.Second
+		// connect_timeout should apply to the entire connection establishment
+		// procedure, so we both use a timeout for the TCP connection
+		// establishment and set a deadline for doing the initial handshake.
+		// The deadline is then reset after startup() is done.
+		deadline := time.Now().Add(duration)
+		conn, err := d.DialTimeout(ntw, addr, duration)
+		if err != nil {
+			return nil, err
+		}
+		err = conn.SetDeadline(deadline)
+		return conn, err
+	}
+	return d.Dial(ntw, addr)
+}
+
+func network(o values) (string, string) {
+	host := o["host"]
+
+	if strings.HasPrefix(host, "/") {
+		sockPath := path.Join(host, ".s.PGSQL."+o["port"])
+		return "unix", sockPath
+	}
+
+	return "tcp", net.JoinHostPort(host, o["port"])
+}
+
+type values map[string]string
+
+// scanner implements a tokenizer for libpq-style option strings.
+type scanner struct {
+	s []rune
+	i int
+}
+
+// newScanner returns a new scanner initialized with the option string s.
+func newScanner(s string) *scanner {
+	return &scanner{[]rune(s), 0}
+}
+
+// Next returns the next rune.
+// It returns 0, false if the end of the text has been reached.
+func (s *scanner) Next() (rune, bool) {
+	if s.i >= len(s.s) {
+		return 0, false
+	}
+	r := s.s[s.i]
+	s.i++
+	return r, true
+}
+
+// SkipSpaces returns the next non-whitespace rune.
+// It returns 0, false if the end of the text has been reached.
+func (s *scanner) SkipSpaces() (rune, bool) {
+	r, ok := s.Next()
+	for unicode.IsSpace(r) && ok {
+		r, ok = s.Next()
+	}
+	return r, ok
+}
+
+// parseOpts parses the options from name and adds them to the values.
+//
+// The parsing code is based on conninfo_parse from libpq's fe-connect.c
+func parseOpts(name string, o values) error {
+	s := newScanner(name)
+
+	for {
+		var (
+			keyRunes, valRunes []rune
+			r                  rune
+			ok                 bool
+		)
+
+		if r, ok = s.SkipSpaces(); !ok {
+			break
+		}
+
+		// Scan the key
+		for !unicode.IsSpace(r) && r != '=' {
+			keyRunes = append(keyRunes, r)
+			if r, ok = s.Next(); !ok {
+				break
+			}
+		}
+
+		// Skip any whitespace if we're not at the = yet
+		if r != '=' {
+			r, ok = s.SkipSpaces()
+		}
+
+		// The current character should be =
+		if r != '=' || !ok {
+			return fmt.Errorf(`missing "=" after %q in connection info string"`, string(keyRunes))
+		}
+
+		// Skip any whitespace after the =
+		if r, ok = s.SkipSpaces(); !ok {
+			// If we reach the end here, the last value is just an empty string as per libpq.
+			o[string(keyRunes)] = ""
+			break
+		}
+
+		if r != '\'' {
+			for !unicode.IsSpace(r) {
+				if r == '\\' {
+					if r, ok = s.Next(); !ok {
+						return fmt.Errorf(`missing character after backslash`)
+					}
+				}
+				valRunes = append(valRunes, r)
+
+				if r, ok = s.Next(); !ok {
+					break
+				}
+			}
+		} else {
+		quote:
+			for {
+				if r, ok = s.Next(); !ok {
+					return fmt.Errorf(`unterminated quoted string literal in connection string`)
+				}
+				switch r {
+				case '\'':
+					break quote
+				case '\\':
+					r, _ = s.Next()
+					fallthrough
+				default:
+					valRunes = append(valRunes, r)
+				}
+			}
+		}
+
+		o[string(keyRunes)] = string(valRunes)
+	}
+
+	return nil
+}
+
+func (cn *conn) isInTransaction() bool {
+	return cn.txnStatus == txnStatusIdleInTransaction ||
+		cn.txnStatus == txnStatusInFailedTransaction
+}
+
+func (cn *conn) checkIsInTransaction(intxn bool) {
+	if cn.isInTransaction() != intxn {
+		cn.bad = true
+		errorf("unexpected transaction status %v", cn.txnStatus)
+	}
+}
+
+func (cn *conn) Begin() (_ driver.Tx, err error) {
+	return cn.begin("")
+}
+
+func (cn *conn) begin(mode string) (_ driver.Tx, err error) {
+	if cn.bad {
+		return nil, driver.ErrBadConn
+	}
+	defer cn.errRecover(&err)
+
+	cn.checkIsInTransaction(false)
+	_, commandTag, err := cn.simpleExec("BEGIN" + mode)
+	if err != nil {
+		return nil, err
+	}
+	if commandTag != "BEGIN" {
+		cn.bad = true
+		return nil, fmt.Errorf("unexpected command tag %s", commandTag)
+	}
+	if cn.txnStatus != txnStatusIdleInTransaction {
+		cn.bad = true
+		return nil, fmt.Errorf("unexpected transaction status %v", cn.txnStatus)
+	}
+	return cn, nil
+}
+
+func (cn *conn) closeTxn() {
+	if finish := cn.txnFinish; finish != nil {
+		finish()
+	}
+}
+
+func (cn *conn) Commit() (err error) {
+	defer cn.closeTxn()
+	if cn.bad {
+		return driver.ErrBadConn
+	}
+	defer cn.errRecover(&err)
+
+	cn.checkIsInTransaction(true)
+	// We don't want the client to think that everything is okay if it tries
+	// to commit a failed transaction.  However, no matter what we return,
+	// database/sql will release this connection back into the free connection
+	// pool so we have to abort the current transaction here.  Note that you
+	// would get the same behaviour if you issued a COMMIT in a failed
+	// transaction, so it's also the least surprising thing to do here.
+	if cn.txnStatus == txnStatusInFailedTransaction {
+		if err := cn.Rollback(); err != nil {
+			return err
+		}
+		return ErrInFailedTransaction
+	}
+
+	_, commandTag, err := cn.simpleExec("COMMIT")
+	if err != nil {
+		if cn.isInTransaction() {
+			cn.bad = true
+		}
+		return err
+	}
+	if commandTag != "COMMIT" {
+		cn.bad = true
+		return fmt.Errorf("unexpected command tag %s", commandTag)
+	}
+	cn.checkIsInTransaction(false)
+	return nil
+}
+
+func (cn *conn) Rollback() (err error) {
+	defer cn.closeTxn()
+	if cn.bad {
+		return driver.ErrBadConn
+	}
+	defer cn.errRecover(&err)
+
+	cn.checkIsInTransaction(true)
+	_, commandTag, err := cn.simpleExec("ROLLBACK")
+	if err != nil {
+		if cn.isInTransaction() {
+			cn.bad = true
+		}
+		return err
+	}
+	if commandTag != "ROLLBACK" {
+		return fmt.Errorf("unexpected command tag %s", commandTag)
+	}
+	cn.checkIsInTransaction(false)
+	return nil
+}
+
+func (cn *conn) gname() string {
+	cn.namei++
+	return strconv.FormatInt(int64(cn.namei), 10)
+}
+
+func (cn *conn) simpleExec(q string) (res driver.Result, commandTag string, err error) {
+	b := cn.writeBuf('Q')
+	b.string(q)
+	cn.send(b)
+
+	for {
+		t, r := cn.recv1()
+		switch t {
+		case 'C':
+			res, commandTag = cn.parseComplete(r.string())
+		case 'Z':
+			cn.processReadyForQuery(r)
+			if res == nil && err == nil {
+				err = errUnexpectedReady
+			}
+			// done
+			return
+		case 'E':
+			err = parseError(r)
+		case 'I':
+			res = emptyRows
+		case 'T', 'D':
+			// ignore any results
+		default:
+			cn.bad = true
+			errorf("unknown response for simple query: %q", t)
+		}
+	}
+}
+
+func (cn *conn) simpleQuery(q string) (res *rows, err error) {
+	defer cn.errRecover(&err)
+
+	b := cn.writeBuf('Q')
+	b.string(q)
+	cn.send(b)
+
+	for {
+		t, r := cn.recv1()
+		switch t {
+		case 'C', 'I':
+			// We allow queries which don't return any results through Query as
+			// well as Exec.  We still have to give database/sql a rows object
+			// the user can close, though, to avoid connections from being
+			// leaked.  A "rows" with done=true works fine for that purpose.
+			if err != nil {
+				cn.bad = true
+				errorf("unexpected message %q in simple query execution", t)
+			}
+			if res == nil {
+				res = &rows{
+					cn: cn,
+				}
+			}
+			// Set the result and tag to the last command complete if there wasn't a
+			// query already run. Although queries usually return from here and cede
+			// control to Next, a query with zero results does not.
+			if t == 'C' && res.colNames == nil {
+				res.result, res.tag = cn.parseComplete(r.string())
+			}
+			res.done = true
+		case 'Z':
+			cn.processReadyForQuery(r)
+			// done
+			return
+		case 'E':
+			res = nil
+			err = parseError(r)
+		case 'D':
+			if res == nil {
+				cn.bad = true
+				errorf("unexpected DataRow in simple query execution")
+			}
+			// the query didn't fail; kick off to Next
+			cn.saveMessage(t, r)
+			return
+		case 'T':
+			// res might be non-nil here if we received a previous
+			// CommandComplete, but that's fine; just overwrite it
+			res = &rows{cn: cn}
+			res.colNames, res.colFmts, res.colTyps = parsePortalRowDescribe(r)
+
+			// To work around a bug in QueryRow in Go 1.2 and earlier, wait
+			// until the first DataRow has been received.
+		default:
+			cn.bad = true
+			errorf("unknown response for simple query: %q", t)
+		}
+	}
+}
+
+type noRows struct{}
+
+var emptyRows noRows
+
+var _ driver.Result = noRows{}
+
+func (noRows) LastInsertId() (int64, error) {
+	return 0, errNoLastInsertID
+}
+
+func (noRows) RowsAffected() (int64, error) {
+	return 0, errNoRowsAffected
+}
+
+// Decides which column formats to use for a prepared statement.  The input is
+// an array of type oids, one element per result column.
+func decideColumnFormats(colTyps []fieldDesc, forceText bool) (colFmts []format, colFmtData []byte) {
+	if len(colTyps) == 0 {
+		return nil, colFmtDataAllText
+	}
+
+	colFmts = make([]format, len(colTyps))
+	if forceText {
+		return colFmts, colFmtDataAllText
+	}
+
+	allBinary := true
+	allText := true
+	for i, t := range colTyps {
+		switch t.OID {
+		// This is the list of types to use binary mode for when receiving them
+		// through a prepared statement.  If a type appears in this list, it
+		// must also be implemented in binaryDecode in encode.go.
+		case oid.T_bytea:
+			fallthrough
+		case oid.T_int8:
+			fallthrough
+		case oid.T_int4:
+			fallthrough
+		case oid.T_int2:
+			fallthrough
+		case oid.T_uuid:
+			colFmts[i] = formatBinary
+			allText = false
+
+		default:
+			allBinary = false
+		}
+	}
+
+	if allBinary {
+		return colFmts, colFmtDataAllBinary
+	} else if allText {
+		return colFmts, colFmtDataAllText
+	} else {
+		colFmtData = make([]byte, 2+len(colFmts)*2)
+		binary.BigEndian.PutUint16(colFmtData, uint16(len(colFmts)))
+		for i, v := range colFmts {
+			binary.BigEndian.PutUint16(colFmtData[2+i*2:], uint16(v))
+		}
+		return colFmts, colFmtData
+	}
+}
+
+func (cn *conn) prepareTo(q, stmtName string) *stmt {
+	st := &stmt{cn: cn, name: stmtName}
+
+	b := cn.writeBuf('P')
+	b.string(st.name)
+	b.string(q)
+	b.int16(0)
+
+	b.next('D')
+	b.byte('S')
+	b.string(st.name)
+
+	b.next('S')
+	cn.send(b)
+
+	cn.readParseResponse()
+	st.paramTyps, st.colNames, st.colTyps = cn.readStatementDescribeResponse()
+	st.colFmts, st.colFmtData = decideColumnFormats(st.colTyps, cn.disablePreparedBinaryResult)
+	cn.readReadyForQuery()
+	return st
+}
+
+func (cn *conn) Prepare(q string) (_ driver.Stmt, err error) {
+	if cn.bad {
+		return nil, driver.ErrBadConn
+	}
+	defer cn.errRecover(&err)
+
+	if len(q) >= 4 && strings.EqualFold(q[:4], "COPY") {
+		s, err := cn.prepareCopyIn(q)
+		if err == nil {
+			cn.inCopy = true
+		}
+		return s, err
+	}
+	return cn.prepareTo(q, cn.gname()), nil
+}
+
+func (cn *conn) Close() (err error) {
+	// Skip cn.bad return here because we always want to close a connection.
+	defer cn.errRecover(&err)
+
+	// Ensure that cn.c.Close is always run. Since error handling is done with
+	// panics and cn.errRecover, the Close must be in a defer.
+	defer func() {
+		cerr := cn.c.Close()
+		if err == nil {
+			err = cerr
+		}
+	}()
+
+	// Don't go through send(); ListenerConn relies on us not scribbling on the
+	// scratch buffer of this connection.
+	return cn.sendSimpleMessage('X')
+}
+
+// Implement the "Queryer" interface
+func (cn *conn) Query(query string, args []driver.Value) (driver.Rows, error) {
+	return cn.query(query, args)
+}
+
+func (cn *conn) query(query string, args []driver.Value) (_ *rows, err error) {
+	if cn.bad {
+		return nil, driver.ErrBadConn
+	}
+	if cn.inCopy {
+		return nil, errCopyInProgress
+	}
+	defer cn.errRecover(&err)
+
+	// Check to see if we can use the "simpleQuery" interface, which is
+	// *much* faster than going through prepare/exec
+	if len(args) == 0 {
+		return cn.simpleQuery(query)
+	}
+
+	if cn.binaryParameters {
+		cn.sendBinaryModeQuery(query, args)
+
+		cn.readParseResponse()
+		cn.readBindResponse()
+		rows := &rows{cn: cn}
+		rows.colNames, rows.colFmts, rows.colTyps = cn.readPortalDescribeResponse()
+		cn.postExecuteWorkaround()
+		return rows, nil
+	}
+	st := cn.prepareTo(query, "")
+	st.exec(args)
+	return &rows{
+		cn:       cn,
+		colNames: st.colNames,
+		colTyps:  st.colTyps,
+		colFmts:  st.colFmts,
+	}, nil
+}
+
+// Implement the optional "Execer" interface for one-shot queries
+func (cn *conn) Exec(query string, args []driver.Value) (res driver.Result, err error) {
+	if cn.bad {
+		return nil, driver.ErrBadConn
+	}
+	defer cn.errRecover(&err)
+
+	// Check to see if we can use the "simpleExec" interface, which is
+	// *much* faster than going through prepare/exec
+	if len(args) == 0 {
+		// ignore commandTag, our caller doesn't care
+		r, _, err := cn.simpleExec(query)
+		return r, err
+	}
+
+	if cn.binaryParameters {
+		cn.sendBinaryModeQuery(query, args)
+
+		cn.readParseResponse()
+		cn.readBindResponse()
+		cn.readPortalDescribeResponse()
+		cn.postExecuteWorkaround()
+		res, _, err = cn.readExecuteResponse("Execute")
+		return res, err
+	}
+	// Use the unnamed statement to defer planning until bind
+	// time, or else value-based selectivity estimates cannot be
+	// used.
+	st := cn.prepareTo(query, "")
+	r, err := st.Exec(args)
+	if err != nil {
+		panic(err)
+	}
+	return r, err
+}
+
+func (cn *conn) send(m *writeBuf) {
+	_, err := cn.c.Write(m.wrap())
+	if err != nil {
+		panic(err)
+	}
+}
+
+func (cn *conn) sendStartupPacket(m *writeBuf) error {
+	_, err := cn.c.Write((m.wrap())[1:])
+	return err
+}
+
+// Send a message of type typ to the server on the other end of cn.  The
+// message should have no payload.  This method does not use the scratch
+// buffer.
+func (cn *conn) sendSimpleMessage(typ byte) (err error) {
+	_, err = cn.c.Write([]byte{typ, '\x00', '\x00', '\x00', '\x04'})
+	return err
+}
+
+// saveMessage memorizes a message and its buffer in the conn struct.
+// recvMessage will then return these values on the next call to it.  This
+// method is useful in cases where you have to see what the next message is
+// going to be (e.g. to see whether it's an error or not) but you can't handle
+// the message yourself.
+func (cn *conn) saveMessage(typ byte, buf *readBuf) {
+	if cn.saveMessageType != 0 {
+		cn.bad = true
+		errorf("unexpected saveMessageType %d", cn.saveMessageType)
+	}
+	cn.saveMessageType = typ
+	cn.saveMessageBuffer = *buf
+}
+
+// recvMessage receives any message from the backend, or returns an error if
+// a problem occurred while reading the message.
+func (cn *conn) recvMessage(r *readBuf) (byte, error) {
+	// workaround for a QueryRow bug, see exec
+	if cn.saveMessageType != 0 {
+		t := cn.saveMessageType
+		*r = cn.saveMessageBuffer
+		cn.saveMessageType = 0
+		cn.saveMessageBuffer = nil
+		return t, nil
+	}
+
+	x := cn.scratch[:5]
+	_, err := io.ReadFull(cn.buf, x)
+	if err != nil {
+		return 0, err
+	}
+
+	// read the type and length of the message that follows
+	t := x[0]
+	n := int(binary.BigEndian.Uint32(x[1:])) - 4
+	var y []byte
+	if n <= len(cn.scratch) {
+		y = cn.scratch[:n]
+	} else {
+		y = make([]byte, n)
+	}
+	_, err = io.ReadFull(cn.buf, y)
+	if err != nil {
+		return 0, err
+	}
+	*r = y
+	return t, nil
+}
+
+// recv receives a message from the backend, but if an error happened while
+// reading the message or the received message was an ErrorResponse, it panics.
+// NoticeResponses are ignored.  This function should generally be used only
+// during the startup sequence.
+func (cn *conn) recv() (t byte, r *readBuf) {
+	for {
+		var err error
+		r = &readBuf{}
+		t, err = cn.recvMessage(r)
+		if err != nil {
+			panic(err)
+		}
+
+		switch t {
+		case 'E':
+			panic(parseError(r))
+		case 'N':
+			// ignore
+		default:
+			return
+		}
+	}
+}
+
+// recv1Buf is exactly equivalent to recv1, except it uses a buffer supplied by
+// the caller to avoid an allocation.
+func (cn *conn) recv1Buf(r *readBuf) byte {
+	for {
+		t, err := cn.recvMessage(r)
+		if err != nil {
+			panic(err)
+		}
+
+		switch t {
+		case 'A', 'N':
+			// ignore
+		case 'S':
+			cn.processParameterStatus(r)
+		default:
+			return t
+		}
+	}
+}
+
+// recv1 receives a message from the backend, panicking if an error occurs
+// while attempting to read it.  All asynchronous messages are ignored, with
+// the exception of ErrorResponse.
+func (cn *conn) recv1() (t byte, r *readBuf) {
+	r = &readBuf{}
+	t = cn.recv1Buf(r)
+	return t, r
+}
+
+func (cn *conn) ssl(o values) error {
+	upgrade, err := ssl(o)
+	if err != nil {
+		return err
+	}
+
+	if upgrade == nil {
+		// Nothing to do
+		return nil
+	}
+
+	w := cn.writeBuf(0)
+	w.int32(80877103)
+	if err = cn.sendStartupPacket(w); err != nil {
+		return err
+	}
+
+	b := cn.scratch[:1]
+	_, err = io.ReadFull(cn.c, b)
+	if err != nil {
+		return err
+	}
+
+	if b[0] != 'S' {
+		return ErrSSLNotSupported
+	}
+
+	cn.c, err = upgrade(cn.c)
+	return err
+}
+
+// isDriverSetting returns true iff a setting is purely for configuring the
+// driver's options and should not be sent to the server in the connection
+// startup packet.
+func isDriverSetting(key string) bool {
+	switch key {
+	case "host", "port":
+		return true
+	case "password":
+		return true
+	case "sslmode", "sslcert", "sslkey", "sslrootcert":
+		return true
+	case "fallback_application_name":
+		return true
+	case "connect_timeout":
+		return true
+	case "disable_prepared_binary_result":
+		return true
+	case "binary_parameters":
+		return true
+
+	default:
+		return false
+	}
+}
+
+func (cn *conn) startup(o values) {
+	w := cn.writeBuf(0)
+	w.int32(196608)
+	// Send the backend the name of the database we want to connect to, and the
+	// user we want to connect as.  Additionally, we send over any run-time
+	// parameters potentially included in the connection string.  If the server
+	// doesn't recognize any of them, it will reply with an error.
+	for k, v := range o {
+		if isDriverSetting(k) {
+			// skip options which can't be run-time parameters
+			continue
+		}
+		// The protocol requires us to supply the database name as "database"
+		// instead of "dbname".
+		if k == "dbname" {
+			k = "database"
+		}
+		w.string(k)
+		w.string(v)
+	}
+	w.string("")
+	if err := cn.sendStartupPacket(w); err != nil {
+		panic(err)
+	}
+
+	for {
+		t, r := cn.recv()
+		switch t {
+		case 'K':
+			cn.processBackendKeyData(r)
+		case 'S':
+			cn.processParameterStatus(r)
+		case 'R':
+			cn.auth(r, o)
+		case 'Z':
+			cn.processReadyForQuery(r)
+			return
+		default:
+			errorf("unknown response for startup: %q", t)
+		}
+	}
+}
+
+func (cn *conn) auth(r *readBuf, o values) {
+	switch code := r.int32(); code {
+	case 0:
+		// OK
+	case 3:
+		w := cn.writeBuf('p')
+		w.string(o["password"])
+		cn.send(w)
+
+		t, r := cn.recv()
+		if t != 'R' {
+			errorf("unexpected password response: %q", t)
+		}
+
+		if r.int32() != 0 {
+			errorf("unexpected authentication response: %q", t)
+		}
+	case 5:
+		s := string(r.next(4))
+		w := cn.writeBuf('p')
+		w.string("md5" + md5s(md5s(o["password"]+o["user"])+s))
+		cn.send(w)
+
+		t, r := cn.recv()
+		if t != 'R' {
+			errorf("unexpected password response: %q", t)
+		}
+
+		if r.int32() != 0 {
+			errorf("unexpected authentication response: %q", t)
+		}
+	default:
+		errorf("unknown authentication response: %d", code)
+	}
+}
+
+type format int
+
+const formatText format = 0
+const formatBinary format = 1
+
+// One result-column format code with the value 1 (i.e. all binary).
+var colFmtDataAllBinary = []byte{0, 1, 0, 1}
+
+// No result-column format codes (i.e. all text).
+var colFmtDataAllText = []byte{0, 0}
+
+type stmt struct {
+	cn         *conn
+	name       string
+	colNames   []string
+	colFmts    []format
+	colFmtData []byte
+	colTyps    []fieldDesc
+	paramTyps  []oid.Oid
+	closed     bool
+}
+
+func (st *stmt) Close() (err error) {
+	if st.closed {
+		return nil
+	}
+	if st.cn.bad {
+		return driver.ErrBadConn
+	}
+	defer st.cn.errRecover(&err)
+
+	w := st.cn.writeBuf('C')
+	w.byte('S')
+	w.string(st.name)
+	st.cn.send(w)
+
+	st.cn.send(st.cn.writeBuf('S'))
+
+	t, _ := st.cn.recv1()
+	if t != '3' {
+		st.cn.bad = true
+		errorf("unexpected close response: %q", t)
+	}
+	st.closed = true
+
+	t, r := st.cn.recv1()
+	if t != 'Z' {
+		st.cn.bad = true
+		errorf("expected ready for query, but got: %q", t)
+	}
+	st.cn.processReadyForQuery(r)
+
+	return nil
+}
+
+func (st *stmt) Query(v []driver.Value) (r driver.Rows, err error) {
+	if st.cn.bad {
+		return nil, driver.ErrBadConn
+	}
+	defer st.cn.errRecover(&err)
+
+	st.exec(v)
+	return &rows{
+		cn:       st.cn,
+		colNames: st.colNames,
+		colTyps:  st.colTyps,
+		colFmts:  st.colFmts,
+	}, nil
+}
+
+func (st *stmt) Exec(v []driver.Value) (res driver.Result, err error) {
+	if st.cn.bad {
+		return nil, driver.ErrBadConn
+	}
+	defer st.cn.errRecover(&err)
+
+	st.exec(v)
+	res, _, err = st.cn.readExecuteResponse("simple query")
+	return res, err
+}
+
+func (st *stmt) exec(v []driver.Value) {
+	if len(v) >= 65536 {
+		errorf("got %d parameters but PostgreSQL only supports 65535 parameters", len(v))
+	}
+	if len(v) != len(st.paramTyps) {
+		errorf("got %d parameters but the statement requires %d", len(v), len(st.paramTyps))
+	}
+
+	cn := st.cn
+	w := cn.writeBuf('B')
+	w.byte(0) // unnamed portal
+	w.string(st.name)
+
+	if cn.binaryParameters {
+		cn.sendBinaryParameters(w, v)
+	} else {
+		w.int16(0)
+		w.int16(len(v))
+		for i, x := range v {
+			if x == nil {
+				w.int32(-1)
+			} else {
+				b := encode(&cn.parameterStatus, x, st.paramTyps[i])
+				w.int32(len(b))
+				w.bytes(b)
+			}
+		}
+	}
+	w.bytes(st.colFmtData)
+
+	w.next('E')
+	w.byte(0)
+	w.int32(0)
+
+	w.next('S')
+	cn.send(w)
+
+	cn.readBindResponse()
+	cn.postExecuteWorkaround()
+
+}
+
+func (st *stmt) NumInput() int {
+	return len(st.paramTyps)
+}
+
+// parseComplete parses the "command tag" from a CommandComplete message, and
+// returns the number of rows affected (if applicable) and a string
+// identifying only the command that was executed, e.g. "ALTER TABLE".  If the
+// command tag could not be parsed, parseComplete panics.
+func (cn *conn) parseComplete(commandTag string) (driver.Result, string) {
+	commandsWithAffectedRows := []string{
+		"SELECT ",
+		// INSERT is handled below
+		"UPDATE ",
+		"DELETE ",
+		"FETCH ",
+		"MOVE ",
+		"COPY ",
+	}
+
+	var affectedRows *string
+	for _, tag := range commandsWithAffectedRows {
+		if strings.HasPrefix(commandTag, tag) {
+			t := commandTag[len(tag):]
+			affectedRows = &t
+			commandTag = tag[:len(tag)-1]
+			break
+		}
+	}
+	// INSERT also includes the oid of the inserted row in its command tag.
+	// Oids in user tables are deprecated, and the oid is only returned when
+	// exactly one row is inserted, so it's unlikely to be of value to any
+	// real-world application and we can ignore it.
+	if affectedRows == nil && strings.HasPrefix(commandTag, "INSERT ") {
+		parts := strings.Split(commandTag, " ")
+		if len(parts) != 3 {
+			cn.bad = true
+			errorf("unexpected INSERT command tag %s", commandTag)
+		}
+		affectedRows = &parts[len(parts)-1]
+		commandTag = "INSERT"
+	}
+	// There should be no affected rows attached to the tag, just return it
+	if affectedRows == nil {
+		return driver.RowsAffected(0), commandTag
+	}
+	n, err := strconv.ParseInt(*affectedRows, 10, 64)
+	if err != nil {
+		cn.bad = true
+		errorf("could not parse commandTag: %s", err)
+	}
+	return driver.RowsAffected(n), commandTag
+}
+
+type rows struct {
+	cn       *conn
+	finish   func()
+	colNames []string
+	colTyps  []fieldDesc
+	colFmts  []format
+	done     bool
+	rb       readBuf
+	result   driver.Result
+	tag      string
+}
+
+func (rs *rows) Close() error {
+	if finish := rs.finish; finish != nil {
+		defer finish()
+	}
+	// no need to look at cn.bad as Next() will
+	for {
+		err := rs.Next(nil)
+		switch err {
+		case nil:
+		case io.EOF:
+			// rs.Next can return io.EOF on both 'Z' (ready for query) and 'T' (row
+			// description, used with HasNextResultSet). We need to fetch messages until
+			// we hit a 'Z', which is done by waiting for done to be set.
+			if rs.done {
+				return nil
+			}
+		default:
+			return err
+		}
+	}
+}
+
+func (rs *rows) Columns() []string {
+	return rs.colNames
+}
+
+func (rs *rows) Result() driver.Result {
+	if rs.result == nil {
+		return emptyRows
+	}
+	return rs.result
+}
+
+func (rs *rows) Tag() string {
+	return rs.tag
+}
+
+func (rs *rows) Next(dest []driver.Value) (err error) {
+	if rs.done {
+		return io.EOF
+	}
+
+	conn := rs.cn
+	if conn.bad {
+		return driver.ErrBadConn
+	}
+	defer conn.errRecover(&err)
+
+	for {
+		t := conn.recv1Buf(&rs.rb)
+		switch t {
+		case 'E':
+			err = parseError(&rs.rb)
+		case 'C', 'I':
+			if t == 'C' {
+				rs.result, rs.tag = conn.parseComplete(rs.rb.string())
+			}
+			continue
+		case 'Z':
+			conn.processReadyForQuery(&rs.rb)
+			rs.done = true
+			if err != nil {
+				return err
+			}
+			return io.EOF
+		case 'D':
+			n := rs.rb.int16()
+			if err != nil {
+				conn.bad = true
+				errorf("unexpected DataRow after error %s", err)
+			}
+			if n < len(dest) {
+				dest = dest[:n]
+			}
+			for i := range dest {
+				l := rs.rb.int32()
+				if l == -1 {
+					dest[i] = nil
+					continue
+				}
+				dest[i] = decode(&conn.parameterStatus, rs.rb.next(l), rs.colTyps[i].OID, rs.colFmts[i])
+			}
+			return
+		case 'T':
+			rs.colNames, rs.colFmts, rs.colTyps = parsePortalRowDescribe(&rs.rb)
+			return io.EOF
+		default:
+			errorf("unexpected message after execute: %q", t)
+		}
+	}
+}
+
+func (rs *rows) HasNextResultSet() bool {
+	return !rs.done
+}
+
+func (rs *rows) NextResultSet() error {
+	return nil
+}
+
+// QuoteIdentifier quotes an "identifier" (e.g. a table or a column name) to be
+// used as part of an SQL statement.  For example:
+//
+//    tblname := "my_table"
+//    data := "my_data"
+//    quoted := pq.QuoteIdentifier(tblname)
+//    err := db.Exec(fmt.Sprintf("INSERT INTO %s VALUES ($1)", quoted), data)
+//
+// Any double quotes in name will be escaped.  The quoted identifier will be
+// case sensitive when used in a query.  If the input string contains a zero
+// byte, the result will be truncated immediately before it.
+func QuoteIdentifier(name string) string {
+	end := strings.IndexRune(name, 0)
+	if end > -1 {
+		name = name[:end]
+	}
+	return `"` + strings.Replace(name, `"`, `""`, -1) + `"`
+}
+
+func md5s(s string) string {
+	h := md5.New()
+	h.Write([]byte(s))
+	return fmt.Sprintf("%x", h.Sum(nil))
+}
+
+func (cn *conn) sendBinaryParameters(b *writeBuf, args []driver.Value) {
+	// Do one pass over the parameters to see if we're going to send any of
+	// them over in binary.  If we are, create a paramFormats array at the
+	// same time.
+	var paramFormats []int
+	for i, x := range args {
+		_, ok := x.([]byte)
+		if ok {
+			if paramFormats == nil {
+				paramFormats = make([]int, len(args))
+			}
+			paramFormats[i] = 1
+		}
+	}
+	if paramFormats == nil {
+		b.int16(0)
+	} else {
+		b.int16(len(paramFormats))
+		for _, x := range paramFormats {
+			b.int16(x)
+		}
+	}
+
+	b.int16(len(args))
+	for _, x := range args {
+		if x == nil {
+			b.int32(-1)
+		} else {
+			datum := binaryEncode(&cn.parameterStatus, x)
+			b.int32(len(datum))
+			b.bytes(datum)
+		}
+	}
+}
+
+func (cn *conn) sendBinaryModeQuery(query string, args []driver.Value) {
+	if len(args) >= 65536 {
+		errorf("got %d parameters but PostgreSQL only supports 65535 parameters", len(args))
+	}
+
+	b := cn.writeBuf('P')
+	b.byte(0) // unnamed statement
+	b.string(query)
+	b.int16(0)
+
+	b.next('B')
+	b.int16(0) // unnamed portal and statement
+	cn.sendBinaryParameters(b, args)
+	b.bytes(colFmtDataAllText)
+
+	b.next('D')
+	b.byte('P')
+	b.byte(0) // unnamed portal
+
+	b.next('E')
+	b.byte(0)
+	b.int32(0)
+
+	b.next('S')
+	cn.send(b)
+}
+
+func (cn *conn) processParameterStatus(r *readBuf) {
+	var err error
+
+	param := r.string()
+	switch param {
+	case "server_version":
+		var major1 int
+		var major2 int
+		var minor int
+		_, err = fmt.Sscanf(r.string(), "%d.%d.%d", &major1, &major2, &minor)
+		if err == nil {
+			cn.parameterStatus.serverVersion = major1*10000 + major2*100 + minor
+		}
+
+	case "TimeZone":
+		cn.parameterStatus.currentLocation, err = time.LoadLocation(r.string())
+		if err != nil {
+			cn.parameterStatus.currentLocation = nil
+		}
+
+	default:
+		// ignore
+	}
+}
+
+func (cn *conn) processReadyForQuery(r *readBuf) {
+	cn.txnStatus = transactionStatus(r.byte())
+}
+
+func (cn *conn) readReadyForQuery() {
+	t, r := cn.recv1()
+	switch t {
+	case 'Z':
+		cn.processReadyForQuery(r)
+		return
+	default:
+		cn.bad = true
+		errorf("unexpected message %q; expected ReadyForQuery", t)
+	}
+}
+
+func (cn *conn) processBackendKeyData(r *readBuf) {
+	cn.processID = r.int32()
+	cn.secretKey = r.int32()
+}
+
+func (cn *conn) readParseResponse() {
+	t, r := cn.recv1()
+	switch t {
+	case '1':
+		return
+	case 'E':
+		err := parseError(r)
+		cn.readReadyForQuery()
+		panic(err)
+	default:
+		cn.bad = true
+		errorf("unexpected Parse response %q", t)
+	}
+}
+
+func (cn *conn) readStatementDescribeResponse() (paramTyps []oid.Oid, colNames []string, colTyps []fieldDesc) {
+	for {
+		t, r := cn.recv1()
+		switch t {
+		case 't':
+			nparams := r.int16()
+			paramTyps = make([]oid.Oid, nparams)
+			for i := range paramTyps {
+				paramTyps[i] = r.oid()
+			}
+		case 'n':
+			return paramTyps, nil, nil
+		case 'T':
+			colNames, colTyps = parseStatementRowDescribe(r)
+			return paramTyps, colNames, colTyps
+		case 'E':
+			err := parseError(r)
+			cn.readReadyForQuery()
+			panic(err)
+		default:
+			cn.bad = true
+			errorf("unexpected Describe statement response %q", t)
+		}
+	}
+}
+
+func (cn *conn) readPortalDescribeResponse() (colNames []string, colFmts []format, colTyps []fieldDesc) {
+	t, r := cn.recv1()
+	switch t {
+	case 'T':
+		return parsePortalRowDescribe(r)
+	case 'n':
+		return nil, nil, nil
+	case 'E':
+		err := parseError(r)
+		cn.readReadyForQuery()
+		panic(err)
+	default:
+		cn.bad = true
+		errorf("unexpected Describe response %q", t)
+	}
+	panic("not reached")
+}
+
+func (cn *conn) readBindResponse() {
+	t, r := cn.recv1()
+	switch t {
+	case '2':
+		return
+	case 'E':
+		err := parseError(r)
+		cn.readReadyForQuery()
+		panic(err)
+	default:
+		cn.bad = true
+		errorf("unexpected Bind response %q", t)
+	}
+}
+
+func (cn *conn) postExecuteWorkaround() {
+	// Work around a bug in sql.DB.QueryRow: in Go 1.2 and earlier it ignores
+	// any errors from rows.Next, which masks errors that happened during the
+	// execution of the query.  To avoid the problem in common cases, we wait
+	// here for one more message from the database.  If it's not an error the
+	// query will likely succeed (or perhaps has already, if it's a
+	// CommandComplete), so we push the message into the conn struct; recv1
+	// will return it as the next message for rows.Next or rows.Close.
+	// However, if it's an error, we wait until ReadyForQuery and then return
+	// the error to our caller.
+	for {
+		t, r := cn.recv1()
+		switch t {
+		case 'E':
+			err := parseError(r)
+			cn.readReadyForQuery()
+			panic(err)
+		case 'C', 'D', 'I':
+			// the query didn't fail, but we can't process this message
+			cn.saveMessage(t, r)
+			return
+		default:
+			cn.bad = true
+			errorf("unexpected message during extended query execution: %q", t)
+		}
+	}
+}
+
+// Only for Exec(), since we ignore the returned data
+func (cn *conn) readExecuteResponse(protocolState string) (res driver.Result, commandTag string, err error) {
+	for {
+		t, r := cn.recv1()
+		switch t {
+		case 'C':
+			if err != nil {
+				cn.bad = true
+				errorf("unexpected CommandComplete after error %s", err)
+			}
+			res, commandTag = cn.parseComplete(r.string())
+		case 'Z':
+			cn.processReadyForQuery(r)
+			if res == nil && err == nil {
+				err = errUnexpectedReady
+			}
+			return res, commandTag, err
+		case 'E':
+			err = parseError(r)
+		case 'T', 'D', 'I':
+			if err != nil {
+				cn.bad = true
+				errorf("unexpected %q after error %s", t, err)
+			}
+			if t == 'I' {
+				res = emptyRows
+			}
+			// ignore any results
+		default:
+			cn.bad = true
+			errorf("unknown %s response: %q", protocolState, t)
+		}
+	}
+}
+
+func parseStatementRowDescribe(r *readBuf) (colNames []string, colTyps []fieldDesc) {
+	n := r.int16()
+	colNames = make([]string, n)
+	colTyps = make([]fieldDesc, n)
+	for i := range colNames {
+		colNames[i] = r.string()
+		r.next(6)
+		colTyps[i].OID = r.oid()
+		colTyps[i].Len = r.int16()
+		colTyps[i].Mod = r.int32()
+		// format code not known when describing a statement; always 0
+		r.next(2)
+	}
+	return
+}
+
+func parsePortalRowDescribe(r *readBuf) (colNames []string, colFmts []format, colTyps []fieldDesc) {
+	n := r.int16()
+	colNames = make([]string, n)
+	colFmts = make([]format, n)
+	colTyps = make([]fieldDesc, n)
+	for i := range colNames {
+		colNames[i] = r.string()
+		r.next(6)
+		colTyps[i].OID = r.oid()
+		colTyps[i].Len = r.int16()
+		colTyps[i].Mod = r.int32()
+		colFmts[i] = format(r.int16())
+	}
+	return
+}
+
+// parseEnviron tries to mimic some of libpq's environment handling
+//
+// To ease testing, it does not directly reference os.Environ, but is
+// designed to accept its output.
+//
+// Environment-set connection information is intended to have a higher
+// precedence than a library default but lower than any explicitly
+// passed information (such as in the URL or connection string).
+func parseEnviron(env []string) (out map[string]string) {
+	out = make(map[string]string)
+
+	for _, v := range env {
+		parts := strings.SplitN(v, "=", 2)
+
+		accrue := func(keyname string) {
+			out[keyname] = parts[1]
+		}
+		unsupported := func() {
+			panic(fmt.Sprintf("setting %v not supported", parts[0]))
+		}
+
+		// The order of these is the same as is seen in the
+		// PostgreSQL 9.1 manual. Unsupported but well-defined
+		// keys cause a panic; these should be unset prior to
+		// execution. Options which pq expects to be set to a
+		// certain value are allowed, but must be set to that
+		// value if present (they can, of course, be absent).
+		switch parts[0] {
+		case "PGHOST":
+			accrue("host")
+		case "PGHOSTADDR":
+			unsupported()
+		case "PGPORT":
+			accrue("port")
+		case "PGDATABASE":
+			accrue("dbname")
+		case "PGUSER":
+			accrue("user")
+		case "PGPASSWORD":
+			accrue("password")
+		case "PGSERVICE", "PGSERVICEFILE", "PGREALM":
+			unsupported()
+		case "PGOPTIONS":
+			accrue("options")
+		case "PGAPPNAME":
+			accrue("application_name")
+		case "PGSSLMODE":
+			accrue("sslmode")
+		case "PGSSLCERT":
+			accrue("sslcert")
+		case "PGSSLKEY":
+			accrue("sslkey")
+		case "PGSSLROOTCERT":
+			accrue("sslrootcert")
+		case "PGREQUIRESSL", "PGSSLCRL":
+			unsupported()
+		case "PGREQUIREPEER":
+			unsupported()
+		case "PGKRBSRVNAME", "PGGSSLIB":
+			unsupported()
+		case "PGCONNECT_TIMEOUT":
+			accrue("connect_timeout")
+		case "PGCLIENTENCODING":
+			accrue("client_encoding")
+		case "PGDATESTYLE":
+			accrue("datestyle")
+		case "PGTZ":
+			accrue("timezone")
+		case "PGGEQO":
+			accrue("geqo")
+		case "PGSYSCONFDIR", "PGLOCALEDIR":
+			unsupported()
+		}
+	}
+
+	return out
+}
+
+// isUTF8 returns whether name is a fuzzy variation of the string "UTF-8".
+func isUTF8(name string) bool {
+	// Recognize all sorts of silly things as "UTF-8", like Postgres does
+	s := strings.Map(alnumLowerASCII, name)
+	return s == "utf8" || s == "unicode"
+}
+
+func alnumLowerASCII(ch rune) rune {
+	if 'A' <= ch && ch <= 'Z' {
+		return ch + ('a' - 'A')
+	}
+	if 'a' <= ch && ch <= 'z' || '0' <= ch && ch <= '9' {
+		return ch
+	}
+	return -1 // discard
+}

+ 131 - 0
vendor/github.com/lib/pq/conn_go18.go

@@ -0,0 +1,131 @@
+// +build go1.8
+
+package pq
+
+import (
+	"context"
+	"database/sql"
+	"database/sql/driver"
+	"fmt"
+	"io"
+	"io/ioutil"
+)
+
+// Implement the "QueryerContext" interface
+func (cn *conn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) {
+	list := make([]driver.Value, len(args))
+	for i, nv := range args {
+		list[i] = nv.Value
+	}
+	finish := cn.watchCancel(ctx)
+	r, err := cn.query(query, list)
+	if err != nil {
+		if finish != nil {
+			finish()
+		}
+		return nil, err
+	}
+	r.finish = finish
+	return r, nil
+}
+
+// Implement the "ExecerContext" interface
+func (cn *conn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) {
+	list := make([]driver.Value, len(args))
+	for i, nv := range args {
+		list[i] = nv.Value
+	}
+
+	if finish := cn.watchCancel(ctx); finish != nil {
+		defer finish()
+	}
+
+	return cn.Exec(query, list)
+}
+
+// Implement the "ConnBeginTx" interface
+func (cn *conn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) {
+	var mode string
+
+	switch sql.IsolationLevel(opts.Isolation) {
+	case sql.LevelDefault:
+		// Don't touch mode: use the server's default
+	case sql.LevelReadUncommitted:
+		mode = " ISOLATION LEVEL READ UNCOMMITTED"
+	case sql.LevelReadCommitted:
+		mode = " ISOLATION LEVEL READ COMMITTED"
+	case sql.LevelRepeatableRead:
+		mode = " ISOLATION LEVEL REPEATABLE READ"
+	case sql.LevelSerializable:
+		mode = " ISOLATION LEVEL SERIALIZABLE"
+	default:
+		return nil, fmt.Errorf("pq: isolation level not supported: %d", opts.Isolation)
+	}
+
+	if opts.ReadOnly {
+		mode += " READ ONLY"
+	} else {
+		mode += " READ WRITE"
+	}
+
+	tx, err := cn.begin(mode)
+	if err != nil {
+		return nil, err
+	}
+	cn.txnFinish = cn.watchCancel(ctx)
+	return tx, nil
+}
+
+func (cn *conn) watchCancel(ctx context.Context) func() {
+	if done := ctx.Done(); done != nil {
+		finished := make(chan struct{})
+		go func() {
+			select {
+			case <-done:
+				_ = cn.cancel()
+				finished <- struct{}{}
+			case <-finished:
+			}
+		}()
+		return func() {
+			select {
+			case <-finished:
+			case finished <- struct{}{}:
+			}
+		}
+	}
+	return nil
+}
+
+func (cn *conn) cancel() error {
+	c, err := dial(cn.dialer, cn.opts)
+	if err != nil {
+		return err
+	}
+	defer c.Close()
+
+	{
+		can := conn{
+			c: c,
+		}
+		err = can.ssl(cn.opts)
+		if err != nil {
+			return err
+		}
+
+		w := can.writeBuf(0)
+		w.int32(80877102) // cancel request code
+		w.int32(cn.processID)
+		w.int32(cn.secretKey)
+
+		if err := can.sendStartupPacket(w); err != nil {
+			return err
+		}
+	}
+
+	// Read until EOF to ensure that the server received the cancel.
+	{
+		_, err := io.Copy(ioutil.Discard, c)
+		return err
+	}
+}

+ 43 - 0
vendor/github.com/lib/pq/connector.go

@@ -0,0 +1,43 @@
+// +build go1.10
+
+package pq
+
+import (
+	"context"
+	"database/sql/driver"
+)
+
+// Connector represents a fixed configuration for the pq driver with a given
+// name. Connector satisfies the database/sql/driver Connector interface and
+// can be used to create any number of DB Conn's via the database/sql OpenDB
+// function.
+//
+// See https://golang.org/pkg/database/sql/driver/#Connector.
+// See https://golang.org/pkg/database/sql/#OpenDB.
+type connector struct {
+	name string
+}
+
+// Connect returns a connection to the database using the fixed configuration
+// of this Connector. Context is not used.
+func (c *connector) Connect(_ context.Context) (driver.Conn, error) {
+	return (&Driver{}).Open(c.name)
+}
+
+// Driver returnst the underlying driver of this Connector.
+func (c *connector) Driver() driver.Driver {
+	return &Driver{}
+}
+
+var _ driver.Connector = &connector{}
+
+// NewConnector returns a connector for the pq driver in a fixed configuration
+// with the given name. The returned connector can be used to create any number
+// of equivalent Conn's. The returned connector is intended to be used with
+// database/sql.OpenDB.
+//
+// See https://golang.org/pkg/database/sql/driver/#Connector.
+// See https://golang.org/pkg/database/sql/#OpenDB.
+func NewConnector(name string) (driver.Connector, error) {
+	return &connector{name: name}, nil
+}

+ 282 - 0
vendor/github.com/lib/pq/copy.go

@@ -0,0 +1,282 @@
+package pq
+
+import (
+	"database/sql/driver"
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"sync"
+)
+
+var (
+	errCopyInClosed               = errors.New("pq: copyin statement has already been closed")
+	errBinaryCopyNotSupported     = errors.New("pq: only text format supported for COPY")
+	errCopyToNotSupported         = errors.New("pq: COPY TO is not supported")
+	errCopyNotSupportedOutsideTxn = errors.New("pq: COPY is only allowed inside a transaction")
+	errCopyInProgress             = errors.New("pq: COPY in progress")
+)
+
+// CopyIn creates a COPY FROM statement which can be prepared with
+// Tx.Prepare().  The target table should be visible in search_path.
+func CopyIn(table string, columns ...string) string {
+	stmt := "COPY " + QuoteIdentifier(table) + " ("
+	for i, col := range columns {
+		if i != 0 {
+			stmt += ", "
+		}
+		stmt += QuoteIdentifier(col)
+	}
+	stmt += ") FROM STDIN"
+	return stmt
+}
+
+// CopyInSchema creates a COPY FROM statement which can be prepared with
+// Tx.Prepare().
+func CopyInSchema(schema, table string, columns ...string) string {
+	stmt := "COPY " + QuoteIdentifier(schema) + "." + QuoteIdentifier(table) + " ("
+	for i, col := range columns {
+		if i != 0 {
+			stmt += ", "
+		}
+		stmt += QuoteIdentifier(col)
+	}
+	stmt += ") FROM STDIN"
+	return stmt
+}
+
+type copyin struct {
+	cn      *conn
+	buffer  []byte
+	rowData chan []byte
+	done    chan bool
+
+	closed bool
+
+	sync.Mutex // guards err
+	err        error
+}
+
+const ciBufferSize = 64 * 1024
+
+// flush buffer before the buffer is filled up and needs reallocation
+const ciBufferFlushSize = 63 * 1024
+
+func (cn *conn) prepareCopyIn(q string) (_ driver.Stmt, err error) {
+	if !cn.isInTransaction() {
+		return nil, errCopyNotSupportedOutsideTxn
+	}
+
+	ci := &copyin{
+		cn:      cn,
+		buffer:  make([]byte, 0, ciBufferSize),
+		rowData: make(chan []byte),
+		done:    make(chan bool, 1),
+	}
+	// add CopyData identifier + 4 bytes for message length
+	ci.buffer = append(ci.buffer, 'd', 0, 0, 0, 0)
+
+	b := cn.writeBuf('Q')
+	b.string(q)
+	cn.send(b)
+
+awaitCopyInResponse:
+	for {
+		t, r := cn.recv1()
+		switch t {
+		case 'G':
+			if r.byte() != 0 {
+				err = errBinaryCopyNotSupported
+				break awaitCopyInResponse
+			}
+			go ci.resploop()
+			return ci, nil
+		case 'H':
+			err = errCopyToNotSupported
+			break awaitCopyInResponse
+		case 'E':
+			err = parseError(r)
+		case 'Z':
+			if err == nil {
+				ci.setBad()
+				errorf("unexpected ReadyForQuery in response to COPY")
+			}
+			cn.processReadyForQuery(r)
+			return nil, err
+		default:
+			ci.setBad()
+			errorf("unknown response for copy query: %q", t)
+		}
+	}
+
+	// something went wrong, abort COPY before we return
+	b = cn.writeBuf('f')
+	b.string(err.Error())
+	cn.send(b)
+
+	for {
+		t, r := cn.recv1()
+		switch t {
+		case 'c', 'C', 'E':
+		case 'Z':
+			// correctly aborted, we're done
+			cn.processReadyForQuery(r)
+			return nil, err
+		default:
+			ci.setBad()
+			errorf("unknown response for CopyFail: %q", t)
+		}
+	}
+}
+
+func (ci *copyin) flush(buf []byte) {
+	// set message length (without message identifier)
+	binary.BigEndian.PutUint32(buf[1:], uint32(len(buf)-1))
+
+	_, err := ci.cn.c.Write(buf)
+	if err != nil {
+		panic(err)
+	}
+}
+
+func (ci *copyin) resploop() {
+	for {
+		var r readBuf
+		t, err := ci.cn.recvMessage(&r)
+		if err != nil {
+			ci.setBad()
+			ci.setError(err)
+			ci.done <- true
+			return
+		}
+		switch t {
+		case 'C':
+			// complete
+		case 'N':
+			// NoticeResponse
+		case 'Z':
+			ci.cn.processReadyForQuery(&r)
+			ci.done <- true
+			return
+		case 'E':
+			err := parseError(&r)
+			ci.setError(err)
+		default:
+			ci.setBad()
+			ci.setError(fmt.Errorf("unknown response during CopyIn: %q", t))
+			ci.done <- true
+			return
+		}
+	}
+}
+
+func (ci *copyin) setBad() {
+	ci.Lock()
+	ci.cn.bad = true
+	ci.Unlock()
+}
+
+func (ci *copyin) isBad() bool {
+	ci.Lock()
+	b := ci.cn.bad
+	ci.Unlock()
+	return b
+}
+
+func (ci *copyin) isErrorSet() bool {
+	ci.Lock()
+	isSet := (ci.err != nil)
+	ci.Unlock()
+	return isSet
+}
+
+// setError() sets ci.err if one has not been set already.  Caller must not be
+// holding ci.Mutex.
+func (ci *copyin) setError(err error) {
+	ci.Lock()
+	if ci.err == nil {
+		ci.err = err
+	}
+	ci.Unlock()
+}
+
+func (ci *copyin) NumInput() int {
+	return -1
+}
+
+func (ci *copyin) Query(v []driver.Value) (r driver.Rows, err error) {
+	return nil, ErrNotSupported
+}
+
+// Exec inserts values into the COPY stream. The insert is asynchronous
+// and Exec can return errors from previous Exec calls to the same
+// COPY stmt.
+//
+// You need to call Exec(nil) to sync the COPY stream and to get any
+// errors from pending data, since Stmt.Close() doesn't return errors
+// to the user.
+func (ci *copyin) Exec(v []driver.Value) (r driver.Result, err error) {
+	if ci.closed {
+		return nil, errCopyInClosed
+	}
+
+	if ci.isBad() {
+		return nil, driver.ErrBadConn
+	}
+	defer ci.cn.errRecover(&err)
+
+	if ci.isErrorSet() {
+		return nil, ci.err
+	}
+
+	if len(v) == 0 {
+		return nil, ci.Close()
+	}
+
+	numValues := len(v)
+	for i, value := range v {
+		ci.buffer = appendEncodedText(&ci.cn.parameterStatus, ci.buffer, value)
+		if i < numValues-1 {
+			ci.buffer = append(ci.buffer, '\t')
+		}
+	}
+
+	ci.buffer = append(ci.buffer, '\n')
+
+	if len(ci.buffer) > ciBufferFlushSize {
+		ci.flush(ci.buffer)
+		// reset buffer, keep bytes for message identifier and length
+		ci.buffer = ci.buffer[:5]
+	}
+
+	return driver.RowsAffected(0), nil
+}
+
+func (ci *copyin) Close() (err error) {
+	if ci.closed { // Don't do anything, we're already closed
+		return nil
+	}
+	ci.closed = true
+
+	if ci.isBad() {
+		return driver.ErrBadConn
+	}
+	defer ci.cn.errRecover(&err)
+
+	if len(ci.buffer) > 0 {
+		ci.flush(ci.buffer)
+	}
+	// Avoid touching the scratch buffer as resploop could be using it.
+	err = ci.cn.sendSimpleMessage('c')
+	if err != nil {
+		return err
+	}
+
+	<-ci.done
+	ci.cn.inCopy = false
+
+	if ci.isErrorSet() {
+		err = ci.err
+		return err
+	}
+	return nil
+}

+ 245 - 0
vendor/github.com/lib/pq/doc.go

@@ -0,0 +1,245 @@
+/*
+Package pq is a pure Go Postgres driver for the database/sql package.
+
+In most cases clients will use the database/sql package instead of
+using this package directly. For example:
+
+	import (
+		"database/sql"
+
+		_ "github.com/lib/pq"
+	)
+
+	func main() {
+		connStr := "user=pqgotest dbname=pqgotest sslmode=verify-full"
+		db, err := sql.Open("postgres", connStr)
+		if err != nil {
+			log.Fatal(err)
+		}
+
+		age := 21
+		rows, err := db.Query("SELECT name FROM users WHERE age = $1", age)
+		…
+	}
+
+You can also connect to a database using a URL. For example:
+
+	connStr := "postgres://pqgotest:password@localhost/pqgotest?sslmode=verify-full"
+	db, err := sql.Open("postgres", connStr)
+
+
+Connection String Parameters
+
+
+Similarly to libpq, when establishing a connection using pq you are expected to
+supply a connection string containing zero or more parameters.
+A subset of the connection parameters supported by libpq are also supported by pq.
+Additionally, pq also lets you specify run-time parameters (such as search_path or work_mem)
+directly in the connection string.  This is different from libpq, which does not allow
+run-time parameters in the connection string, instead requiring you to supply
+them in the options parameter.
+
+For compatibility with libpq, the following special connection parameters are
+supported:
+
+	* dbname - The name of the database to connect to
+	* user - The user to sign in as
+	* password - The user's password
+	* host - The host to connect to. Values that start with / are for unix
+	  domain sockets. (default is localhost)
+	* port - The port to bind to. (default is 5432)
+	* sslmode - Whether or not to use SSL (default is require, this is not
+	  the default for libpq)
+	* fallback_application_name - An application_name to fall back to if one isn't provided.
+	* connect_timeout - Maximum wait for connection, in seconds. Zero or
+	  not specified means wait indefinitely.
+	* sslcert - Cert file location. The file must contain PEM encoded data.
+	* sslkey - Key file location. The file must contain PEM encoded data.
+	* sslrootcert - The location of the root certificate file. The file
+	  must contain PEM encoded data.
+
+Valid values for sslmode are:
+
+	* disable - No SSL
+	* require - Always SSL (skip verification)
+	* verify-ca - Always SSL (verify that the certificate presented by the
+	  server was signed by a trusted CA)
+	* verify-full - Always SSL (verify that the certification presented by
+	  the server was signed by a trusted CA and the server host name
+	  matches the one in the certificate)
+
+See http://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-CONNSTRING
+for more information about connection string parameters.
+
+Use single quotes for values that contain whitespace:
+
+    "user=pqgotest password='with spaces'"
+
+A backslash will escape the next character in values:
+
+    "user=space\ man password='it\'s valid'"
+
+Note that the connection parameter client_encoding (which sets the
+text encoding for the connection) may be set but must be "UTF8",
+matching with the same rules as Postgres. It is an error to provide
+any other value.
+
+In addition to the parameters listed above, any run-time parameter that can be
+set at backend start time can be set in the connection string.  For more
+information, see
+http://www.postgresql.org/docs/current/static/runtime-config.html.
+
+Most environment variables as specified at http://www.postgresql.org/docs/current/static/libpq-envars.html
+supported by libpq are also supported by pq.  If any of the environment
+variables not supported by pq are set, pq will panic during connection
+establishment.  Environment variables have a lower precedence than explicitly
+provided connection parameters.
+
+The pgpass mechanism as described in http://www.postgresql.org/docs/current/static/libpq-pgpass.html
+is supported, but on Windows PGPASSFILE must be specified explicitly.
+
+
+Queries
+
+
+database/sql does not dictate any specific format for parameter
+markers in query strings, and pq uses the Postgres-native ordinal markers,
+as shown above. The same marker can be reused for the same parameter:
+
+	rows, err := db.Query(`SELECT name FROM users WHERE favorite_fruit = $1
+		OR age BETWEEN $2 AND $2 + 3`, "orange", 64)
+
+pq does not support the LastInsertId() method of the Result type in database/sql.
+To return the identifier of an INSERT (or UPDATE or DELETE), use the Postgres
+RETURNING clause with a standard Query or QueryRow call:
+
+	var userid int
+	err := db.QueryRow(`INSERT INTO users(name, favorite_fruit, age)
+		VALUES('beatrice', 'starfruit', 93) RETURNING id`).Scan(&userid)
+
+For more details on RETURNING, see the Postgres documentation:
+
+	http://www.postgresql.org/docs/current/static/sql-insert.html
+	http://www.postgresql.org/docs/current/static/sql-update.html
+	http://www.postgresql.org/docs/current/static/sql-delete.html
+
+For additional instructions on querying see the documentation for the database/sql package.
+
+
+Data Types
+
+
+Parameters pass through driver.DefaultParameterConverter before they are handled
+by this package. When the binary_parameters connection option is enabled,
+[]byte values are sent directly to the backend as data in binary format.
+
+This package returns the following types for values from the PostgreSQL backend:
+
+	- integer types smallint, integer, and bigint are returned as int64
+	- floating-point types real and double precision are returned as float64
+	- character types char, varchar, and text are returned as string
+	- temporal types date, time, timetz, timestamp, and timestamptz are
+	  returned as time.Time
+	- the boolean type is returned as bool
+	- the bytea type is returned as []byte
+
+All other types are returned directly from the backend as []byte values in text format.
+
+
+Errors
+
+
+pq may return errors of type *pq.Error which can be interrogated for error details:
+
+        if err, ok := err.(*pq.Error); ok {
+            fmt.Println("pq error:", err.Code.Name())
+        }
+
+See the pq.Error type for details.
+
+
+Bulk imports
+
+You can perform bulk imports by preparing a statement returned by pq.CopyIn (or
+pq.CopyInSchema) in an explicit transaction (sql.Tx). The returned statement
+handle can then be repeatedly "executed" to copy data into the target table.
+After all data has been processed you should call Exec() once with no arguments
+to flush all buffered data. Any call to Exec() might return an error which
+should be handled appropriately, but because of the internal buffering an error
+returned by Exec() might not be related to the data passed in the call that
+failed.
+
+CopyIn uses COPY FROM internally. It is not possible to COPY outside of an
+explicit transaction in pq.
+
+Usage example:
+
+	txn, err := db.Begin()
+	if err != nil {
+		log.Fatal(err)
+	}
+
+	stmt, err := txn.Prepare(pq.CopyIn("users", "name", "age"))
+	if err != nil {
+		log.Fatal(err)
+	}
+
+	for _, user := range users {
+		_, err = stmt.Exec(user.Name, int64(user.Age))
+		if err != nil {
+			log.Fatal(err)
+		}
+	}
+
+	_, err = stmt.Exec()
+	if err != nil {
+		log.Fatal(err)
+	}
+
+	err = stmt.Close()
+	if err != nil {
+		log.Fatal(err)
+	}
+
+	err = txn.Commit()
+	if err != nil {
+		log.Fatal(err)
+	}
+
+
+Notifications
+
+
+PostgreSQL supports a simple publish/subscribe model over database
+connections.  See http://www.postgresql.org/docs/current/static/sql-notify.html
+for more information about the general mechanism.
+
+To start listening for notifications, you first have to open a new connection
+to the database by calling NewListener.  This connection can not be used for
+anything other than LISTEN / NOTIFY.  Calling Listen will open a "notification
+channel"; once a notification channel is open, a notification generated on that
+channel will effect a send on the Listener.Notify channel.  A notification
+channel will remain open until Unlisten is called, though connection loss might
+result in some notifications being lost.  To solve this problem, Listener sends
+a nil pointer over the Notify channel any time the connection is re-established
+following a connection loss.  The application can get information about the
+state of the underlying connection by setting an event callback in the call to
+NewListener.
+
+A single Listener can safely be used from concurrent goroutines, which means
+that there is often no need to create more than one Listener in your
+application.  However, a Listener is always connected to a single database, so
+you will need to create a new Listener instance for every database you want to
+receive notifications in.
+
+The channel name in both Listen and Unlisten is case sensitive, and can contain
+any characters legal in an identifier (see
+http://www.postgresql.org/docs/current/static/sql-syntax-lexical.html#SQL-SYNTAX-IDENTIFIERS
+for more information).  Note that the channel name will be truncated to 63
+bytes by the PostgreSQL server.
+
+You can find a complete, working example of Listener usage at
+http://godoc.org/github.com/lib/pq/example/listen.
+
+*/
+package pq

+ 603 - 0
vendor/github.com/lib/pq/encode.go

@@ -0,0 +1,603 @@
+package pq
+
+import (
+	"bytes"
+	"database/sql/driver"
+	"encoding/binary"
+	"encoding/hex"
+	"errors"
+	"fmt"
+	"math"
+	"strconv"
+	"strings"
+	"sync"
+	"time"
+
+	"github.com/lib/pq/oid"
+)
+
+func binaryEncode(parameterStatus *parameterStatus, x interface{}) []byte {
+	switch v := x.(type) {
+	case []byte:
+		return v
+	default:
+		return encode(parameterStatus, x, oid.T_unknown)
+	}
+}
+
+func encode(parameterStatus *parameterStatus, x interface{}, pgtypOid oid.Oid) []byte {
+	switch v := x.(type) {
+	case int64:
+		return strconv.AppendInt(nil, v, 10)
+	case float64:
+		return strconv.AppendFloat(nil, v, 'f', -1, 64)
+	case []byte:
+		if pgtypOid == oid.T_bytea {
+			return encodeBytea(parameterStatus.serverVersion, v)
+		}
+
+		return v
+	case string:
+		if pgtypOid == oid.T_bytea {
+			return encodeBytea(parameterStatus.serverVersion, []byte(v))
+		}
+
+		return []byte(v)
+	case bool:
+		return strconv.AppendBool(nil, v)
+	case time.Time:
+		return formatTs(v)
+
+	default:
+		errorf("encode: unknown type for %T", v)
+	}
+
+	panic("not reached")
+}
+
+func decode(parameterStatus *parameterStatus, s []byte, typ oid.Oid, f format) interface{} {
+	switch f {
+	case formatBinary:
+		return binaryDecode(parameterStatus, s, typ)
+	case formatText:
+		return textDecode(parameterStatus, s, typ)
+	default:
+		panic("not reached")
+	}
+}
+
+func binaryDecode(parameterStatus *parameterStatus, s []byte, typ oid.Oid) interface{} {
+	switch typ {
+	case oid.T_bytea:
+		return s
+	case oid.T_int8:
+		return int64(binary.BigEndian.Uint64(s))
+	case oid.T_int4:
+		return int64(int32(binary.BigEndian.Uint32(s)))
+	case oid.T_int2:
+		return int64(int16(binary.BigEndian.Uint16(s)))
+	case oid.T_uuid:
+		b, err := decodeUUIDBinary(s)
+		if err != nil {
+			panic(err)
+		}
+		return b
+
+	default:
+		errorf("don't know how to decode binary parameter of type %d", uint32(typ))
+	}
+
+	panic("not reached")
+}
+
+func textDecode(parameterStatus *parameterStatus, s []byte, typ oid.Oid) interface{} {
+	switch typ {
+	case oid.T_char, oid.T_varchar, oid.T_text:
+		return string(s)
+	case oid.T_bytea:
+		b, err := parseBytea(s)
+		if err != nil {
+			errorf("%s", err)
+		}
+		return b
+	case oid.T_timestamptz:
+		return parseTs(parameterStatus.currentLocation, string(s))
+	case oid.T_timestamp, oid.T_date:
+		return parseTs(nil, string(s))
+	case oid.T_time:
+		return mustParse("15:04:05", typ, s)
+	case oid.T_timetz:
+		return mustParse("15:04:05-07", typ, s)
+	case oid.T_bool:
+		return s[0] == 't'
+	case oid.T_int8, oid.T_int4, oid.T_int2:
+		i, err := strconv.ParseInt(string(s), 10, 64)
+		if err != nil {
+			errorf("%s", err)
+		}
+		return i
+	case oid.T_float4, oid.T_float8:
+		bits := 64
+		if typ == oid.T_float4 {
+			bits = 32
+		}
+		f, err := strconv.ParseFloat(string(s), bits)
+		if err != nil {
+			errorf("%s", err)
+		}
+		return f
+	}
+
+	return s
+}
+
+// appendEncodedText encodes item in text format as required by COPY
+// and appends to buf
+func appendEncodedText(parameterStatus *parameterStatus, buf []byte, x interface{}) []byte {
+	switch v := x.(type) {
+	case int64:
+		return strconv.AppendInt(buf, v, 10)
+	case float64:
+		return strconv.AppendFloat(buf, v, 'f', -1, 64)
+	case []byte:
+		encodedBytea := encodeBytea(parameterStatus.serverVersion, v)
+		return appendEscapedText(buf, string(encodedBytea))
+	case string:
+		return appendEscapedText(buf, v)
+	case bool:
+		return strconv.AppendBool(buf, v)
+	case time.Time:
+		return append(buf, formatTs(v)...)
+	case nil:
+		return append(buf, "\\N"...)
+	default:
+		errorf("encode: unknown type for %T", v)
+	}
+
+	panic("not reached")
+}
+
+func appendEscapedText(buf []byte, text string) []byte {
+	escapeNeeded := false
+	startPos := 0
+	var c byte
+
+	// check if we need to escape
+	for i := 0; i < len(text); i++ {
+		c = text[i]
+		if c == '\\' || c == '\n' || c == '\r' || c == '\t' {
+			escapeNeeded = true
+			startPos = i
+			break
+		}
+	}
+	if !escapeNeeded {
+		return append(buf, text...)
+	}
+
+	// copy till first char to escape, iterate the rest
+	result := append(buf, text[:startPos]...)
+	for i := startPos; i < len(text); i++ {
+		c = text[i]
+		switch c {
+		case '\\':
+			result = append(result, '\\', '\\')
+		case '\n':
+			result = append(result, '\\', 'n')
+		case '\r':
+			result = append(result, '\\', 'r')
+		case '\t':
+			result = append(result, '\\', 't')
+		default:
+			result = append(result, c)
+		}
+	}
+	return result
+}
+
+func mustParse(f string, typ oid.Oid, s []byte) time.Time {
+	str := string(s)
+
+	// check for a 30-minute-offset timezone
+	if (typ == oid.T_timestamptz || typ == oid.T_timetz) &&
+		str[len(str)-3] == ':' {
+		f += ":00"
+	}
+	t, err := time.Parse(f, str)
+	if err != nil {
+		errorf("decode: %s", err)
+	}
+	return t
+}
+
+var errInvalidTimestamp = errors.New("invalid timestamp")
+
+type timestampParser struct {
+	err error
+}
+
+func (p *timestampParser) expect(str string, char byte, pos int) {
+	if p.err != nil {
+		return
+	}
+	if pos+1 > len(str) {
+		p.err = errInvalidTimestamp
+		return
+	}
+	if c := str[pos]; c != char && p.err == nil {
+		p.err = fmt.Errorf("expected '%v' at position %v; got '%v'", char, pos, c)
+	}
+}
+
+func (p *timestampParser) mustAtoi(str string, begin int, end int) int {
+	if p.err != nil {
+		return 0
+	}
+	if begin < 0 || end < 0 || begin > end || end > len(str) {
+		p.err = errInvalidTimestamp
+		return 0
+	}
+	result, err := strconv.Atoi(str[begin:end])
+	if err != nil {
+		if p.err == nil {
+			p.err = fmt.Errorf("expected number; got '%v'", str)
+		}
+		return 0
+	}
+	return result
+}
+
+// The location cache caches the time zones typically used by the client.
+type locationCache struct {
+	cache map[int]*time.Location
+	lock  sync.Mutex
+}
+
+// All connections share the same list of timezones. Benchmarking shows that
+// about 5% speed could be gained by putting the cache in the connection and
+// losing the mutex, at the cost of a small amount of memory and a somewhat
+// significant increase in code complexity.
+var globalLocationCache = newLocationCache()
+
+func newLocationCache() *locationCache {
+	return &locationCache{cache: make(map[int]*time.Location)}
+}
+
+// Returns the cached timezone for the specified offset, creating and caching
+// it if necessary.
+func (c *locationCache) getLocation(offset int) *time.Location {
+	c.lock.Lock()
+	defer c.lock.Unlock()
+
+	location, ok := c.cache[offset]
+	if !ok {
+		location = time.FixedZone("", offset)
+		c.cache[offset] = location
+	}
+
+	return location
+}
+
+var infinityTsEnabled = false
+var infinityTsNegative time.Time
+var infinityTsPositive time.Time
+
+const (
+	infinityTsEnabledAlready        = "pq: infinity timestamp enabled already"
+	infinityTsNegativeMustBeSmaller = "pq: infinity timestamp: negative value must be smaller (before) than positive"
+)
+
+// EnableInfinityTs controls the handling of Postgres' "-infinity" and
+// "infinity" "timestamp"s.
+//
+// If EnableInfinityTs is not called, "-infinity" and "infinity" will return
+// []byte("-infinity") and []byte("infinity") respectively, and potentially
+// cause error "sql: Scan error on column index 0: unsupported driver -> Scan
+// pair: []uint8 -> *time.Time", when scanning into a time.Time value.
+//
+// Once EnableInfinityTs has been called, all connections created using this
+// driver will decode Postgres' "-infinity" and "infinity" for "timestamp",
+// "timestamp with time zone" and "date" types to the predefined minimum and
+// maximum times, respectively.  When encoding time.Time values, any time which
+// equals or precedes the predefined minimum time will be encoded to
+// "-infinity".  Any values at or past the maximum time will similarly be
+// encoded to "infinity".
+//
+// If EnableInfinityTs is called with negative >= positive, it will panic.
+// Calling EnableInfinityTs after a connection has been established results in
+// undefined behavior.  If EnableInfinityTs is called more than once, it will
+// panic.
+func EnableInfinityTs(negative time.Time, positive time.Time) {
+	if infinityTsEnabled {
+		panic(infinityTsEnabledAlready)
+	}
+	if !negative.Before(positive) {
+		panic(infinityTsNegativeMustBeSmaller)
+	}
+	infinityTsEnabled = true
+	infinityTsNegative = negative
+	infinityTsPositive = positive
+}
+
+/*
+ * Testing might want to toggle infinityTsEnabled
+ */
+func disableInfinityTs() {
+	infinityTsEnabled = false
+}
+
+// This is a time function specific to the Postgres default DateStyle
+// setting ("ISO, MDY"), the only one we currently support. This
+// accounts for the discrepancies between the parsing available with
+// time.Parse and the Postgres date formatting quirks.
+func parseTs(currentLocation *time.Location, str string) interface{} {
+	switch str {
+	case "-infinity":
+		if infinityTsEnabled {
+			return infinityTsNegative
+		}
+		return []byte(str)
+	case "infinity":
+		if infinityTsEnabled {
+			return infinityTsPositive
+		}
+		return []byte(str)
+	}
+	t, err := ParseTimestamp(currentLocation, str)
+	if err != nil {
+		panic(err)
+	}
+	return t
+}
+
+// ParseTimestamp parses Postgres' text format. It returns a time.Time in
+// currentLocation iff that time's offset agrees with the offset sent from the
+// Postgres server. Otherwise, ParseTimestamp returns a time.Time with the
+// fixed offset offset provided by the Postgres server.
+func ParseTimestamp(currentLocation *time.Location, str string) (time.Time, error) {
+	p := timestampParser{}
+
+	monSep := strings.IndexRune(str, '-')
+	// this is Gregorian year, not ISO Year
+	// In Gregorian system, the year 1 BC is followed by AD 1
+	year := p.mustAtoi(str, 0, monSep)
+	daySep := monSep + 3
+	month := p.mustAtoi(str, monSep+1, daySep)
+	p.expect(str, '-', daySep)
+	timeSep := daySep + 3
+	day := p.mustAtoi(str, daySep+1, timeSep)
+
+	minLen := monSep + len("01-01") + 1
+
+	isBC := strings.HasSuffix(str, " BC")
+	if isBC {
+		minLen += 3
+	}
+
+	var hour, minute, second int
+	if len(str) > minLen {
+		p.expect(str, ' ', timeSep)
+		minSep := timeSep + 3
+		p.expect(str, ':', minSep)
+		hour = p.mustAtoi(str, timeSep+1, minSep)
+		secSep := minSep + 3
+		p.expect(str, ':', secSep)
+		minute = p.mustAtoi(str, minSep+1, secSep)
+		secEnd := secSep + 3
+		second = p.mustAtoi(str, secSep+1, secEnd)
+	}
+	remainderIdx := monSep + len("01-01 00:00:00") + 1
+	// Three optional (but ordered) sections follow: the
+	// fractional seconds, the time zone offset, and the BC
+	// designation. We set them up here and adjust the other
+	// offsets if the preceding sections exist.
+
+	nanoSec := 0
+	tzOff := 0
+
+	if remainderIdx < len(str) && str[remainderIdx] == '.' {
+		fracStart := remainderIdx + 1
+		fracOff := strings.IndexAny(str[fracStart:], "-+ ")
+		if fracOff < 0 {
+			fracOff = len(str) - fracStart
+		}
+		fracSec := p.mustAtoi(str, fracStart, fracStart+fracOff)
+		nanoSec = fracSec * (1000000000 / int(math.Pow(10, float64(fracOff))))
+
+		remainderIdx += fracOff + 1
+	}
+	if tzStart := remainderIdx; tzStart < len(str) && (str[tzStart] == '-' || str[tzStart] == '+') {
+		// time zone separator is always '-' or '+' (UTC is +00)
+		var tzSign int
+		switch c := str[tzStart]; c {
+		case '-':
+			tzSign = -1
+		case '+':
+			tzSign = +1
+		default:
+			return time.Time{}, fmt.Errorf("expected '-' or '+' at position %v; got %v", tzStart, c)
+		}
+		tzHours := p.mustAtoi(str, tzStart+1, tzStart+3)
+		remainderIdx += 3
+		var tzMin, tzSec int
+		if remainderIdx < len(str) && str[remainderIdx] == ':' {
+			tzMin = p.mustAtoi(str, remainderIdx+1, remainderIdx+3)
+			remainderIdx += 3
+		}
+		if remainderIdx < len(str) && str[remainderIdx] == ':' {
+			tzSec = p.mustAtoi(str, remainderIdx+1, remainderIdx+3)
+			remainderIdx += 3
+		}
+		tzOff = tzSign * ((tzHours * 60 * 60) + (tzMin * 60) + tzSec)
+	}
+	var isoYear int
+
+	if isBC {
+		isoYear = 1 - year
+		remainderIdx += 3
+	} else {
+		isoYear = year
+	}
+	if remainderIdx < len(str) {
+		return time.Time{}, fmt.Errorf("expected end of input, got %v", str[remainderIdx:])
+	}
+	t := time.Date(isoYear, time.Month(month), day,
+		hour, minute, second, nanoSec,
+		globalLocationCache.getLocation(tzOff))
+
+	if currentLocation != nil {
+		// Set the location of the returned Time based on the session's
+		// TimeZone value, but only if the local time zone database agrees with
+		// the remote database on the offset.
+		lt := t.In(currentLocation)
+		_, newOff := lt.Zone()
+		if newOff == tzOff {
+			t = lt
+		}
+	}
+
+	return t, p.err
+}
+
+// formatTs formats t into a format postgres understands.
+func formatTs(t time.Time) []byte {
+	if infinityTsEnabled {
+		// t <= -infinity : ! (t > -infinity)
+		if !t.After(infinityTsNegative) {
+			return []byte("-infinity")
+		}
+		// t >= infinity : ! (!t < infinity)
+		if !t.Before(infinityTsPositive) {
+			return []byte("infinity")
+		}
+	}
+	return FormatTimestamp(t)
+}
+
+// FormatTimestamp formats t into Postgres' text format for timestamps.
+func FormatTimestamp(t time.Time) []byte {
+	// Need to send dates before 0001 A.D. with " BC" suffix, instead of the
+	// minus sign preferred by Go.
+	// Beware, "0000" in ISO is "1 BC", "-0001" is "2 BC" and so on
+	bc := false
+	if t.Year() <= 0 {
+		// flip year sign, and add 1, e.g: "0" will be "1", and "-10" will be "11"
+		t = t.AddDate((-t.Year())*2+1, 0, 0)
+		bc = true
+	}
+	b := []byte(t.Format("2006-01-02 15:04:05.999999999Z07:00"))
+
+	_, offset := t.Zone()
+	offset = offset % 60
+	if offset != 0 {
+		// RFC3339Nano already printed the minus sign
+		if offset < 0 {
+			offset = -offset
+		}
+
+		b = append(b, ':')
+		if offset < 10 {
+			b = append(b, '0')
+		}
+		b = strconv.AppendInt(b, int64(offset), 10)
+	}
+
+	if bc {
+		b = append(b, " BC"...)
+	}
+	return b
+}
+
+// Parse a bytea value received from the server.  Both "hex" and the legacy
+// "escape" format are supported.
+func parseBytea(s []byte) (result []byte, err error) {
+	if len(s) >= 2 && bytes.Equal(s[:2], []byte("\\x")) {
+		// bytea_output = hex
+		s = s[2:] // trim off leading "\\x"
+		result = make([]byte, hex.DecodedLen(len(s)))
+		_, err := hex.Decode(result, s)
+		if err != nil {
+			return nil, err
+		}
+	} else {
+		// bytea_output = escape
+		for len(s) > 0 {
+			if s[0] == '\\' {
+				// escaped '\\'
+				if len(s) >= 2 && s[1] == '\\' {
+					result = append(result, '\\')
+					s = s[2:]
+					continue
+				}
+
+				// '\\' followed by an octal number
+				if len(s) < 4 {
+					return nil, fmt.Errorf("invalid bytea sequence %v", s)
+				}
+				r, err := strconv.ParseInt(string(s[1:4]), 8, 9)
+				if err != nil {
+					return nil, fmt.Errorf("could not parse bytea value: %s", err.Error())
+				}
+				result = append(result, byte(r))
+				s = s[4:]
+			} else {
+				// We hit an unescaped, raw byte.  Try to read in as many as
+				// possible in one go.
+				i := bytes.IndexByte(s, '\\')
+				if i == -1 {
+					result = append(result, s...)
+					break
+				}
+				result = append(result, s[:i]...)
+				s = s[i:]
+			}
+		}
+	}
+
+	return result, nil
+}
+
+func encodeBytea(serverVersion int, v []byte) (result []byte) {
+	if serverVersion >= 90000 {
+		// Use the hex format if we know that the server supports it
+		result = make([]byte, 2+hex.EncodedLen(len(v)))
+		result[0] = '\\'
+		result[1] = 'x'
+		hex.Encode(result[2:], v)
+	} else {
+		// .. or resort to "escape"
+		for _, b := range v {
+			if b == '\\' {
+				result = append(result, '\\', '\\')
+			} else if b < 0x20 || b > 0x7e {
+				result = append(result, []byte(fmt.Sprintf("\\%03o", b))...)
+			} else {
+				result = append(result, b)
+			}
+		}
+	}
+
+	return result
+}
+
+// NullTime represents a time.Time that may be null. NullTime implements the
+// sql.Scanner interface so it can be used as a scan destination, similar to
+// sql.NullString.
+type NullTime struct {
+	Time  time.Time
+	Valid bool // Valid is true if Time is not NULL
+}
+
+// Scan implements the Scanner interface.
+func (nt *NullTime) Scan(value interface{}) error {
+	nt.Time, nt.Valid = value.(time.Time)
+	return nil
+}
+
+// Value implements the driver Valuer interface.
+func (nt NullTime) Value() (driver.Value, error) {
+	if !nt.Valid {
+		return nil, nil
+	}
+	return nt.Time, nil
+}

+ 515 - 0
vendor/github.com/lib/pq/error.go

@@ -0,0 +1,515 @@
+package pq
+
+import (
+	"database/sql/driver"
+	"fmt"
+	"io"
+	"net"
+	"runtime"
+)
+
+// Error severities
+const (
+	Efatal   = "FATAL"
+	Epanic   = "PANIC"
+	Ewarning = "WARNING"
+	Enotice  = "NOTICE"
+	Edebug   = "DEBUG"
+	Einfo    = "INFO"
+	Elog     = "LOG"
+)
+
+// Error represents an error communicating with the server.
+//
+// See http://www.postgresql.org/docs/current/static/protocol-error-fields.html for details of the fields
+type Error struct {
+	Severity         string
+	Code             ErrorCode
+	Message          string
+	Detail           string
+	Hint             string
+	Position         string
+	InternalPosition string
+	InternalQuery    string
+	Where            string
+	Schema           string
+	Table            string
+	Column           string
+	DataTypeName     string
+	Constraint       string
+	File             string
+	Line             string
+	Routine          string
+}
+
+// ErrorCode is a five-character error code.
+type ErrorCode string
+
+// Name returns a more human friendly rendering of the error code, namely the
+// "condition name".
+//
+// See http://www.postgresql.org/docs/9.3/static/errcodes-appendix.html for
+// details.
+func (ec ErrorCode) Name() string {
+	return errorCodeNames[ec]
+}
+
+// ErrorClass is only the class part of an error code.
+type ErrorClass string
+
+// Name returns the condition name of an error class.  It is equivalent to the
+// condition name of the "standard" error code (i.e. the one having the last
+// three characters "000").
+func (ec ErrorClass) Name() string {
+	return errorCodeNames[ErrorCode(ec+"000")]
+}
+
+// Class returns the error class, e.g. "28".
+//
+// See http://www.postgresql.org/docs/9.3/static/errcodes-appendix.html for
+// details.
+func (ec ErrorCode) Class() ErrorClass {
+	return ErrorClass(ec[0:2])
+}
+
+// errorCodeNames is a mapping between the five-character error codes and the
+// human readable "condition names". It is derived from the list at
+// http://www.postgresql.org/docs/9.3/static/errcodes-appendix.html
+var errorCodeNames = map[ErrorCode]string{
+	// Class 00 - Successful Completion
+	"00000": "successful_completion",
+	// Class 01 - Warning
+	"01000": "warning",
+	"0100C": "dynamic_result_sets_returned",
+	"01008": "implicit_zero_bit_padding",
+	"01003": "null_value_eliminated_in_set_function",
+	"01007": "privilege_not_granted",
+	"01006": "privilege_not_revoked",
+	"01004": "string_data_right_truncation",
+	"01P01": "deprecated_feature",
+	// Class 02 - No Data (this is also a warning class per the SQL standard)
+	"02000": "no_data",
+	"02001": "no_additional_dynamic_result_sets_returned",
+	// Class 03 - SQL Statement Not Yet Complete
+	"03000": "sql_statement_not_yet_complete",
+	// Class 08 - Connection Exception
+	"08000": "connection_exception",
+	"08003": "connection_does_not_exist",
+	"08006": "connection_failure",
+	"08001": "sqlclient_unable_to_establish_sqlconnection",
+	"08004": "sqlserver_rejected_establishment_of_sqlconnection",
+	"08007": "transaction_resolution_unknown",
+	"08P01": "protocol_violation",
+	// Class 09 - Triggered Action Exception
+	"09000": "triggered_action_exception",
+	// Class 0A - Feature Not Supported
+	"0A000": "feature_not_supported",
+	// Class 0B - Invalid Transaction Initiation
+	"0B000": "invalid_transaction_initiation",
+	// Class 0F - Locator Exception
+	"0F000": "locator_exception",
+	"0F001": "invalid_locator_specification",
+	// Class 0L - Invalid Grantor
+	"0L000": "invalid_grantor",
+	"0LP01": "invalid_grant_operation",
+	// Class 0P - Invalid Role Specification
+	"0P000": "invalid_role_specification",
+	// Class 0Z - Diagnostics Exception
+	"0Z000": "diagnostics_exception",
+	"0Z002": "stacked_diagnostics_accessed_without_active_handler",
+	// Class 20 - Case Not Found
+	"20000": "case_not_found",
+	// Class 21 - Cardinality Violation
+	"21000": "cardinality_violation",
+	// Class 22 - Data Exception
+	"22000": "data_exception",
+	"2202E": "array_subscript_error",
+	"22021": "character_not_in_repertoire",
+	"22008": "datetime_field_overflow",
+	"22012": "division_by_zero",
+	"22005": "error_in_assignment",
+	"2200B": "escape_character_conflict",
+	"22022": "indicator_overflow",
+	"22015": "interval_field_overflow",
+	"2201E": "invalid_argument_for_logarithm",
+	"22014": "invalid_argument_for_ntile_function",
+	"22016": "invalid_argument_for_nth_value_function",
+	"2201F": "invalid_argument_for_power_function",
+	"2201G": "invalid_argument_for_width_bucket_function",
+	"22018": "invalid_character_value_for_cast",
+	"22007": "invalid_datetime_format",
+	"22019": "invalid_escape_character",
+	"2200D": "invalid_escape_octet",
+	"22025": "invalid_escape_sequence",
+	"22P06": "nonstandard_use_of_escape_character",
+	"22010": "invalid_indicator_parameter_value",
+	"22023": "invalid_parameter_value",
+	"2201B": "invalid_regular_expression",
+	"2201W": "invalid_row_count_in_limit_clause",
+	"2201X": "invalid_row_count_in_result_offset_clause",
+	"22009": "invalid_time_zone_displacement_value",
+	"2200C": "invalid_use_of_escape_character",
+	"2200G": "most_specific_type_mismatch",
+	"22004": "null_value_not_allowed",
+	"22002": "null_value_no_indicator_parameter",
+	"22003": "numeric_value_out_of_range",
+	"2200H": "sequence_generator_limit_exceeded",
+	"22026": "string_data_length_mismatch",
+	"22001": "string_data_right_truncation",
+	"22011": "substring_error",
+	"22027": "trim_error",
+	"22024": "unterminated_c_string",
+	"2200F": "zero_length_character_string",
+	"22P01": "floating_point_exception",
+	"22P02": "invalid_text_representation",
+	"22P03": "invalid_binary_representation",
+	"22P04": "bad_copy_file_format",
+	"22P05": "untranslatable_character",
+	"2200L": "not_an_xml_document",
+	"2200M": "invalid_xml_document",
+	"2200N": "invalid_xml_content",
+	"2200S": "invalid_xml_comment",
+	"2200T": "invalid_xml_processing_instruction",
+	// Class 23 - Integrity Constraint Violation
+	"23000": "integrity_constraint_violation",
+	"23001": "restrict_violation",
+	"23502": "not_null_violation",
+	"23503": "foreign_key_violation",
+	"23505": "unique_violation",
+	"23514": "check_violation",
+	"23P01": "exclusion_violation",
+	// Class 24 - Invalid Cursor State
+	"24000": "invalid_cursor_state",
+	// Class 25 - Invalid Transaction State
+	"25000": "invalid_transaction_state",
+	"25001": "active_sql_transaction",
+	"25002": "branch_transaction_already_active",
+	"25008": "held_cursor_requires_same_isolation_level",
+	"25003": "inappropriate_access_mode_for_branch_transaction",
+	"25004": "inappropriate_isolation_level_for_branch_transaction",
+	"25005": "no_active_sql_transaction_for_branch_transaction",
+	"25006": "read_only_sql_transaction",
+	"25007": "schema_and_data_statement_mixing_not_supported",
+	"25P01": "no_active_sql_transaction",
+	"25P02": "in_failed_sql_transaction",
+	// Class 26 - Invalid SQL Statement Name
+	"26000": "invalid_sql_statement_name",
+	// Class 27 - Triggered Data Change Violation
+	"27000": "triggered_data_change_violation",
+	// Class 28 - Invalid Authorization Specification
+	"28000": "invalid_authorization_specification",
+	"28P01": "invalid_password",
+	// Class 2B - Dependent Privilege Descriptors Still Exist
+	"2B000": "dependent_privilege_descriptors_still_exist",
+	"2BP01": "dependent_objects_still_exist",
+	// Class 2D - Invalid Transaction Termination
+	"2D000": "invalid_transaction_termination",
+	// Class 2F - SQL Routine Exception
+	"2F000": "sql_routine_exception",
+	"2F005": "function_executed_no_return_statement",
+	"2F002": "modifying_sql_data_not_permitted",
+	"2F003": "prohibited_sql_statement_attempted",
+	"2F004": "reading_sql_data_not_permitted",
+	// Class 34 - Invalid Cursor Name
+	"34000": "invalid_cursor_name",
+	// Class 38 - External Routine Exception
+	"38000": "external_routine_exception",
+	"38001": "containing_sql_not_permitted",
+	"38002": "modifying_sql_data_not_permitted",
+	"38003": "prohibited_sql_statement_attempted",
+	"38004": "reading_sql_data_not_permitted",
+	// Class 39 - External Routine Invocation Exception
+	"39000": "external_routine_invocation_exception",
+	"39001": "invalid_sqlstate_returned",
+	"39004": "null_value_not_allowed",
+	"39P01": "trigger_protocol_violated",
+	"39P02": "srf_protocol_violated",
+	// Class 3B - Savepoint Exception
+	"3B000": "savepoint_exception",
+	"3B001": "invalid_savepoint_specification",
+	// Class 3D - Invalid Catalog Name
+	"3D000": "invalid_catalog_name",
+	// Class 3F - Invalid Schema Name
+	"3F000": "invalid_schema_name",
+	// Class 40 - Transaction Rollback
+	"40000": "transaction_rollback",
+	"40002": "transaction_integrity_constraint_violation",
+	"40001": "serialization_failure",
+	"40003": "statement_completion_unknown",
+	"40P01": "deadlock_detected",
+	// Class 42 - Syntax Error or Access Rule Violation
+	"42000": "syntax_error_or_access_rule_violation",
+	"42601": "syntax_error",
+	"42501": "insufficient_privilege",
+	"42846": "cannot_coerce",
+	"42803": "grouping_error",
+	"42P20": "windowing_error",
+	"42P19": "invalid_recursion",
+	"42830": "invalid_foreign_key",
+	"42602": "invalid_name",
+	"42622": "name_too_long",
+	"42939": "reserved_name",
+	"42804": "datatype_mismatch",
+	"42P18": "indeterminate_datatype",
+	"42P21": "collation_mismatch",
+	"42P22": "indeterminate_collation",
+	"42809": "wrong_object_type",
+	"42703": "undefined_column",
+	"42883": "undefined_function",
+	"42P01": "undefined_table",
+	"42P02": "undefined_parameter",
+	"42704": "undefined_object",
+	"42701": "duplicate_column",
+	"42P03": "duplicate_cursor",
+	"42P04": "duplicate_database",
+	"42723": "duplicate_function",
+	"42P05": "duplicate_prepared_statement",
+	"42P06": "duplicate_schema",
+	"42P07": "duplicate_table",
+	"42712": "duplicate_alias",
+	"42710": "duplicate_object",
+	"42702": "ambiguous_column",
+	"42725": "ambiguous_function",
+	"42P08": "ambiguous_parameter",
+	"42P09": "ambiguous_alias",
+	"42P10": "invalid_column_reference",
+	"42611": "invalid_column_definition",
+	"42P11": "invalid_cursor_definition",
+	"42P12": "invalid_database_definition",
+	"42P13": "invalid_function_definition",
+	"42P14": "invalid_prepared_statement_definition",
+	"42P15": "invalid_schema_definition",
+	"42P16": "invalid_table_definition",
+	"42P17": "invalid_object_definition",
+	// Class 44 - WITH CHECK OPTION Violation
+	"44000": "with_check_option_violation",
+	// Class 53 - Insufficient Resources
+	"53000": "insufficient_resources",
+	"53100": "disk_full",
+	"53200": "out_of_memory",
+	"53300": "too_many_connections",
+	"53400": "configuration_limit_exceeded",
+	// Class 54 - Program Limit Exceeded
+	"54000": "program_limit_exceeded",
+	"54001": "statement_too_complex",
+	"54011": "too_many_columns",
+	"54023": "too_many_arguments",
+	// Class 55 - Object Not In Prerequisite State
+	"55000": "object_not_in_prerequisite_state",
+	"55006": "object_in_use",
+	"55P02": "cant_change_runtime_param",
+	"55P03": "lock_not_available",
+	// Class 57 - Operator Intervention
+	"57000": "operator_intervention",
+	"57014": "query_canceled",
+	"57P01": "admin_shutdown",
+	"57P02": "crash_shutdown",
+	"57P03": "cannot_connect_now",
+	"57P04": "database_dropped",
+	// Class 58 - System Error (errors external to PostgreSQL itself)
+	"58000": "system_error",
+	"58030": "io_error",
+	"58P01": "undefined_file",
+	"58P02": "duplicate_file",
+	// Class F0 - Configuration File Error
+	"F0000": "config_file_error",
+	"F0001": "lock_file_exists",
+	// Class HV - Foreign Data Wrapper Error (SQL/MED)
+	"HV000": "fdw_error",
+	"HV005": "fdw_column_name_not_found",
+	"HV002": "fdw_dynamic_parameter_value_needed",
+	"HV010": "fdw_function_sequence_error",
+	"HV021": "fdw_inconsistent_descriptor_information",
+	"HV024": "fdw_invalid_attribute_value",
+	"HV007": "fdw_invalid_column_name",
+	"HV008": "fdw_invalid_column_number",
+	"HV004": "fdw_invalid_data_type",
+	"HV006": "fdw_invalid_data_type_descriptors",
+	"HV091": "fdw_invalid_descriptor_field_identifier",
+	"HV00B": "fdw_invalid_handle",
+	"HV00C": "fdw_invalid_option_index",
+	"HV00D": "fdw_invalid_option_name",
+	"HV090": "fdw_invalid_string_length_or_buffer_length",
+	"HV00A": "fdw_invalid_string_format",
+	"HV009": "fdw_invalid_use_of_null_pointer",
+	"HV014": "fdw_too_many_handles",
+	"HV001": "fdw_out_of_memory",
+	"HV00P": "fdw_no_schemas",
+	"HV00J": "fdw_option_name_not_found",
+	"HV00K": "fdw_reply_handle",
+	"HV00Q": "fdw_schema_not_found",
+	"HV00R": "fdw_table_not_found",
+	"HV00L": "fdw_unable_to_create_execution",
+	"HV00M": "fdw_unable_to_create_reply",
+	"HV00N": "fdw_unable_to_establish_connection",
+	// Class P0 - PL/pgSQL Error
+	"P0000": "plpgsql_error",
+	"P0001": "raise_exception",
+	"P0002": "no_data_found",
+	"P0003": "too_many_rows",
+	// Class XX - Internal Error
+	"XX000": "internal_error",
+	"XX001": "data_corrupted",
+	"XX002": "index_corrupted",
+}
+
+func parseError(r *readBuf) *Error {
+	err := new(Error)
+	for t := r.byte(); t != 0; t = r.byte() {
+		msg := r.string()
+		switch t {
+		case 'S':
+			err.Severity = msg
+		case 'C':
+			err.Code = ErrorCode(msg)
+		case 'M':
+			err.Message = msg
+		case 'D':
+			err.Detail = msg
+		case 'H':
+			err.Hint = msg
+		case 'P':
+			err.Position = msg
+		case 'p':
+			err.InternalPosition = msg
+		case 'q':
+			err.InternalQuery = msg
+		case 'W':
+			err.Where = msg
+		case 's':
+			err.Schema = msg
+		case 't':
+			err.Table = msg
+		case 'c':
+			err.Column = msg
+		case 'd':
+			err.DataTypeName = msg
+		case 'n':
+			err.Constraint = msg
+		case 'F':
+			err.File = msg
+		case 'L':
+			err.Line = msg
+		case 'R':
+			err.Routine = msg
+		}
+	}
+	return err
+}
+
+// Fatal returns true if the Error Severity is fatal.
+func (err *Error) Fatal() bool {
+	return err.Severity == Efatal
+}
+
+// Get implements the legacy PGError interface. New code should use the fields
+// of the Error struct directly.
+func (err *Error) Get(k byte) (v string) {
+	switch k {
+	case 'S':
+		return err.Severity
+	case 'C':
+		return string(err.Code)
+	case 'M':
+		return err.Message
+	case 'D':
+		return err.Detail
+	case 'H':
+		return err.Hint
+	case 'P':
+		return err.Position
+	case 'p':
+		return err.InternalPosition
+	case 'q':
+		return err.InternalQuery
+	case 'W':
+		return err.Where
+	case 's':
+		return err.Schema
+	case 't':
+		return err.Table
+	case 'c':
+		return err.Column
+	case 'd':
+		return err.DataTypeName
+	case 'n':
+		return err.Constraint
+	case 'F':
+		return err.File
+	case 'L':
+		return err.Line
+	case 'R':
+		return err.Routine
+	}
+	return ""
+}
+
+func (err Error) Error() string {
+	return "pq: " + err.Message
+}
+
+// PGError is an interface used by previous versions of pq. It is provided
+// only to support legacy code. New code should use the Error type.
+type PGError interface {
+	Error() string
+	Fatal() bool
+	Get(k byte) (v string)
+}
+
+func errorf(s string, args ...interface{}) {
+	panic(fmt.Errorf("pq: %s", fmt.Sprintf(s, args...)))
+}
+
+// TODO(ainar-g) Rename to errorf after removing panics.
+func fmterrorf(s string, args ...interface{}) error {
+	return fmt.Errorf("pq: %s", fmt.Sprintf(s, args...))
+}
+
+func errRecoverNoErrBadConn(err *error) {
+	e := recover()
+	if e == nil {
+		// Do nothing
+		return
+	}
+	var ok bool
+	*err, ok = e.(error)
+	if !ok {
+		*err = fmt.Errorf("pq: unexpected error: %#v", e)
+	}
+}
+
+func (c *conn) errRecover(err *error) {
+	e := recover()
+	switch v := e.(type) {
+	case nil:
+		// Do nothing
+	case runtime.Error:
+		c.bad = true
+		panic(v)
+	case *Error:
+		if v.Fatal() {
+			*err = driver.ErrBadConn
+		} else {
+			*err = v
+		}
+	case *net.OpError:
+		c.bad = true
+		*err = v
+	case error:
+		if v == io.EOF || v.(error).Error() == "remote error: handshake failure" {
+			*err = driver.ErrBadConn
+		} else {
+			*err = v
+		}
+
+	default:
+		c.bad = true
+		panic(fmt.Sprintf("unknown error: %#v", e))
+	}
+
+	// Any time we return ErrBadConn, we need to remember it since *Tx doesn't
+	// mark the connection bad in database/sql.
+	if *err == driver.ErrBadConn {
+		c.bad = true
+	}
+}

+ 98 - 0
vendor/github.com/lib/pq/example/listen/doc.go

@@ -0,0 +1,98 @@
+/*
+
+Package listen is a self-contained Go program which uses the LISTEN / NOTIFY
+mechanism to avoid polling the database while waiting for more work to arrive.
+
+    //
+    // You can see the program in action by defining a function similar to
+    // the following:
+    //
+    // CREATE OR REPLACE FUNCTION public.get_work()
+    //   RETURNS bigint
+    //   LANGUAGE sql
+    //   AS $$
+    //     SELECT CASE WHEN random() >= 0.2 THEN int8 '1' END
+    //   $$
+    // ;
+
+    package main
+
+    import (
+        "database/sql"
+        "fmt"
+        "time"
+
+        "github.com/lib/pq"
+    )
+
+    func doWork(db *sql.DB, work int64) {
+        // work here
+    }
+
+    func getWork(db *sql.DB) {
+        for {
+            // get work from the database here
+            var work sql.NullInt64
+            err := db.QueryRow("SELECT get_work()").Scan(&work)
+            if err != nil {
+                fmt.Println("call to get_work() failed: ", err)
+                time.Sleep(10 * time.Second)
+                continue
+            }
+            if !work.Valid {
+                // no more work to do
+                fmt.Println("ran out of work")
+                return
+            }
+
+            fmt.Println("starting work on ", work.Int64)
+            go doWork(db, work.Int64)
+        }
+    }
+
+    func waitForNotification(l *pq.Listener) {
+        select {
+            case <-l.Notify:
+                fmt.Println("received notification, new work available")
+            case <-time.After(90 * time.Second):
+                go l.Ping()
+                // Check if there's more work available, just in case it takes
+                // a while for the Listener to notice connection loss and
+                // reconnect.
+                fmt.Println("received no work for 90 seconds, checking for new work")
+        }
+    }
+
+    func main() {
+        var conninfo string = ""
+
+        db, err := sql.Open("postgres", conninfo)
+        if err != nil {
+            panic(err)
+        }
+
+        reportProblem := func(ev pq.ListenerEventType, err error) {
+            if err != nil {
+                fmt.Println(err.Error())
+            }
+        }
+
+        minReconn := 10 * time.Second
+        maxReconn := time.Minute
+        listener := pq.NewListener(conninfo, minReconn, maxReconn, reportProblem)
+        err = listener.Listen("getwork")
+        if err != nil {
+            panic(err)
+        }
+
+        fmt.Println("entering main loop")
+        for {
+            // process all available work before waiting for notifications
+            getWork(db)
+            waitForNotification(listener)
+        }
+    }
+
+
+*/
+package listen

+ 118 - 0
vendor/github.com/lib/pq/hstore/hstore.go

@@ -0,0 +1,118 @@
+package hstore
+
+import (
+	"database/sql"
+	"database/sql/driver"
+	"strings"
+)
+
+// Hstore is a wrapper for transferring Hstore values back and forth easily.
+type Hstore struct {
+	Map map[string]sql.NullString
+}
+
+// escapes and quotes hstore keys/values
+// s should be a sql.NullString or string
+func hQuote(s interface{}) string {
+	var str string
+	switch v := s.(type) {
+	case sql.NullString:
+		if !v.Valid {
+			return "NULL"
+		}
+		str = v.String
+	case string:
+		str = v
+	default:
+		panic("not a string or sql.NullString")
+	}
+
+	str = strings.Replace(str, "\\", "\\\\", -1)
+	return `"` + strings.Replace(str, "\"", "\\\"", -1) + `"`
+}
+
+// Scan implements the Scanner interface.
+//
+// Note h.Map is reallocated before the scan to clear existing values. If the
+// hstore column's database value is NULL, then h.Map is set to nil instead.
+func (h *Hstore) Scan(value interface{}) error {
+	if value == nil {
+		h.Map = nil
+		return nil
+	}
+	h.Map = make(map[string]sql.NullString)
+	var b byte
+	pair := [][]byte{{}, {}}
+	pi := 0
+	inQuote := false
+	didQuote := false
+	sawSlash := false
+	bindex := 0
+	for bindex, b = range value.([]byte) {
+		if sawSlash {
+			pair[pi] = append(pair[pi], b)
+			sawSlash = false
+			continue
+		}
+
+		switch b {
+		case '\\':
+			sawSlash = true
+			continue
+		case '"':
+			inQuote = !inQuote
+			if !didQuote {
+				didQuote = true
+			}
+			continue
+		default:
+			if !inQuote {
+				switch b {
+				case ' ', '\t', '\n', '\r':
+					continue
+				case '=':
+					continue
+				case '>':
+					pi = 1
+					didQuote = false
+					continue
+				case ',':
+					s := string(pair[1])
+					if !didQuote && len(s) == 4 && strings.ToLower(s) == "null" {
+						h.Map[string(pair[0])] = sql.NullString{String: "", Valid: false}
+					} else {
+						h.Map[string(pair[0])] = sql.NullString{String: string(pair[1]), Valid: true}
+					}
+					pair[0] = []byte{}
+					pair[1] = []byte{}
+					pi = 0
+					continue
+				}
+			}
+		}
+		pair[pi] = append(pair[pi], b)
+	}
+	if bindex > 0 {
+		s := string(pair[1])
+		if !didQuote && len(s) == 4 && strings.ToLower(s) == "null" {
+			h.Map[string(pair[0])] = sql.NullString{String: "", Valid: false}
+		} else {
+			h.Map[string(pair[0])] = sql.NullString{String: string(pair[1]), Valid: true}
+		}
+	}
+	return nil
+}
+
+// Value implements the driver Valuer interface. Note if h.Map is nil, the
+// database column value will be set to NULL.
+func (h Hstore) Value() (driver.Value, error) {
+	if h.Map == nil {
+		return nil, nil
+	}
+	parts := []string{}
+	for key, val := range h.Map {
+		thispart := hQuote(key) + "=>" + hQuote(val)
+		parts = append(parts, thispart)
+	}
+	return []byte(strings.Join(parts, ",")), nil
+}

+ 797 - 0
vendor/github.com/lib/pq/notify.go

@@ -0,0 +1,797 @@
+package pq
+
+// Package pq is a pure Go Postgres driver for the database/sql package.
+// This module contains support for Postgres LISTEN/NOTIFY.
+
+import (
+	"errors"
+	"fmt"
+	"sync"
+	"sync/atomic"
+	"time"
+)
+
+// Notification represents a single notification from the database.
+type Notification struct {
+	// Process ID (PID) of the notifying postgres backend.
+	BePid int
+	// Name of the channel the notification was sent on.
+	Channel string
+	// Payload, or the empty string if unspecified.
+	Extra string
+}
+
+func recvNotification(r *readBuf) *Notification {
+	bePid := r.int32()
+	channel := r.string()
+	extra := r.string()
+
+	return &Notification{bePid, channel, extra}
+}
+
+const (
+	connStateIdle int32 = iota
+	connStateExpectResponse
+	connStateExpectReadyForQuery
+)
+
+type message struct {
+	typ byte
+	err error
+}
+
+var errListenerConnClosed = errors.New("pq: ListenerConn has been closed")
+
+// ListenerConn is a low-level interface for waiting for notifications.  You
+// should use Listener instead.
+type ListenerConn struct {
+	// guards cn and err
+	connectionLock sync.Mutex
+	cn             *conn
+	err            error
+
+	connState int32
+
+	// the sending goroutine will be holding this lock
+	senderLock sync.Mutex
+
+	notificationChan chan<- *Notification
+
+	replyChan chan message
+}
+
+// NewListenerConn creates a new ListenerConn. Use NewListener instead.
+func NewListenerConn(name string, notificationChan chan<- *Notification) (*ListenerConn, error) {
+	return newDialListenerConn(defaultDialer{}, name, notificationChan)
+}
+
+func newDialListenerConn(d Dialer, name string, c chan<- *Notification) (*ListenerConn, error) {
+	cn, err := DialOpen(d, name)
+	if err != nil {
+		return nil, err
+	}
+
+	l := &ListenerConn{
+		cn:               cn.(*conn),
+		notificationChan: c,
+		connState:        connStateIdle,
+		replyChan:        make(chan message, 2),
+	}
+
+	go l.listenerConnMain()
+
+	return l, nil
+}
+
+// We can only allow one goroutine at a time to be running a query on the
+// connection for various reasons, so the goroutine sending on the connection
+// must be holding senderLock.
+//
+// Returns an error if an unrecoverable error has occurred and the ListenerConn
+// should be abandoned.
+func (l *ListenerConn) acquireSenderLock() error {
+	// we must acquire senderLock first to avoid deadlocks; see ExecSimpleQuery
+	l.senderLock.Lock()
+
+	l.connectionLock.Lock()
+	err := l.err
+	l.connectionLock.Unlock()
+	if err != nil {
+		l.senderLock.Unlock()
+		return err
+	}
+	return nil
+}
+
+func (l *ListenerConn) releaseSenderLock() {
+	l.senderLock.Unlock()
+}
+
+// setState advances the protocol state to newState.  Returns false if moving
+// to that state from the current state is not allowed.
+func (l *ListenerConn) setState(newState int32) bool {
+	var expectedState int32
+
+	switch newState {
+	case connStateIdle:
+		expectedState = connStateExpectReadyForQuery
+	case connStateExpectResponse:
+		expectedState = connStateIdle
+	case connStateExpectReadyForQuery:
+		expectedState = connStateExpectResponse
+	default:
+		panic(fmt.Sprintf("unexpected listenerConnState %d", newState))
+	}
+
+	return atomic.CompareAndSwapInt32(&l.connState, expectedState, newState)
+}
+
+// Main logic is here: receive messages from the postgres backend, forward
+// notifications and query replies and keep the internal state in sync with the
+// protocol state.  Returns when the connection has been lost, is about to go
+// away or should be discarded because we couldn't agree on the state with the
+// server backend.
+func (l *ListenerConn) listenerConnLoop() (err error) {
+	defer errRecoverNoErrBadConn(&err)
+
+	r := &readBuf{}
+	for {
+		t, err := l.cn.recvMessage(r)
+		if err != nil {
+			return err
+		}
+
+		switch t {
+		case 'A':
+			// recvNotification copies all the data so we don't need to worry
+			// about the scratch buffer being overwritten.
+			l.notificationChan <- recvNotification(r)
+
+		case 'T', 'D':
+			// only used by tests; ignore
+
+		case 'E':
+			// We might receive an ErrorResponse even when not in a query; it
+			// is expected that the server will close the connection after
+			// that, but we should make sure that the error we display is the
+			// one from the stray ErrorResponse, not io.ErrUnexpectedEOF.
+			if !l.setState(connStateExpectReadyForQuery) {
+				return parseError(r)
+			}
+			l.replyChan <- message{t, parseError(r)}
+
+		case 'C', 'I':
+			if !l.setState(connStateExpectReadyForQuery) {
+				// protocol out of sync
+				return fmt.Errorf("unexpected CommandComplete")
+			}
+			// ExecSimpleQuery doesn't need to know about this message
+
+		case 'Z':
+			if !l.setState(connStateIdle) {
+				// protocol out of sync
+				return fmt.Errorf("unexpected ReadyForQuery")
+			}
+			l.replyChan <- message{t, nil}
+
+		case 'N', 'S':
+			// ignore
+		default:
+			return fmt.Errorf("unexpected message %q from server in listenerConnLoop", t)
+		}
+	}
+}
+
+// This is the main routine for the goroutine receiving on the database
+// connection.  Most of the main logic is in listenerConnLoop.
+func (l *ListenerConn) listenerConnMain() {
+	err := l.listenerConnLoop()
+
+	// listenerConnLoop terminated; we're done, but we still have to clean up.
+	// Make sure nobody tries to start any new queries by making sure the err
+	// pointer is set.  It is important that we do not overwrite its value; a
+	// connection could be closed by either this goroutine or one sending on
+	// the connection -- whoever closes the connection is assumed to have the
+	// more meaningful error message (as the other one will probably get
+	// net.errClosed), so that goroutine sets the error we expose while the
+	// other error is discarded.  If the connection is lost while two
+	// goroutines are operating on the socket, it probably doesn't matter which
+	// error we expose so we don't try to do anything more complex.
+	l.connectionLock.Lock()
+	if l.err == nil {
+		l.err = err
+	}
+	l.cn.Close()
+	l.connectionLock.Unlock()
+
+	// There might be a query in-flight; make sure nobody's waiting for a
+	// response to it, since there's not going to be one.
+	close(l.replyChan)
+
+	// let the listener know we're done
+	close(l.notificationChan)
+
+	// this ListenerConn is done
+}
+
+// Listen sends a LISTEN query to the server. See ExecSimpleQuery.
+func (l *ListenerConn) Listen(channel string) (bool, error) {
+	return l.ExecSimpleQuery("LISTEN " + QuoteIdentifier(channel))
+}
+
+// Unlisten sends an UNLISTEN query to the server. See ExecSimpleQuery.
+func (l *ListenerConn) Unlisten(channel string) (bool, error) {
+	return l.ExecSimpleQuery("UNLISTEN " + QuoteIdentifier(channel))
+}
+
+// UnlistenAll sends an `UNLISTEN *` query to the server. See ExecSimpleQuery.
+func (l *ListenerConn) UnlistenAll() (bool, error) {
+	return l.ExecSimpleQuery("UNLISTEN *")
+}
+
+// Ping the remote server to make sure it's alive.  Non-nil error means the
+// connection has failed and should be abandoned.
+func (l *ListenerConn) Ping() error {
+	sent, err := l.ExecSimpleQuery("")
+	if !sent {
+		return err
+	}
+	if err != nil {
+		// shouldn't happen
+		panic(err)
+	}
+	return nil
+}
+
+// Attempt to send a query on the connection.  Returns an error if sending the
+// query failed, and the caller should initiate closure of this connection.
+// The caller must be holding senderLock (see acquireSenderLock and
+// releaseSenderLock).
+func (l *ListenerConn) sendSimpleQuery(q string) (err error) {
+	defer errRecoverNoErrBadConn(&err)
+
+	// must set connection state before sending the query
+	if !l.setState(connStateExpectResponse) {
+		panic("two queries running at the same time")
+	}
+
+	// Can't use l.cn.writeBuf here because it uses the scratch buffer which
+	// might get overwritten by listenerConnLoop.
+	b := &writeBuf{
+		buf: []byte("Q\x00\x00\x00\x00"),
+		pos: 1,
+	}
+	b.string(q)
+	l.cn.send(b)
+
+	return nil
+}
+
+// ExecSimpleQuery executes a "simple query" (i.e. one with no bindable
+// parameters) on the connection. The possible return values are:
+//   1) "executed" is true; the query was executed to completion on the
+//      database server.  If the query failed, err will be set to the error
+//      returned by the database, otherwise err will be nil.
+//   2) If "executed" is false, the query could not be executed on the remote
+//      server.  err will be non-nil.
+//
+// After a call to ExecSimpleQuery has returned an executed=false value, the
+// connection has either been closed or will be closed shortly thereafter, and
+// all subsequently executed queries will return an error.
+func (l *ListenerConn) ExecSimpleQuery(q string) (executed bool, err error) {
+	if err = l.acquireSenderLock(); err != nil {
+		return false, err
+	}
+	defer l.releaseSenderLock()
+
+	err = l.sendSimpleQuery(q)
+	if err != nil {
+		// We can't know what state the protocol is in, so we need to abandon
+		// this connection.
+		l.connectionLock.Lock()
+		// Set the error pointer if it hasn't been set already; see
+		// listenerConnMain.
+		if l.err == nil {
+			l.err = err
+		}
+		l.connectionLock.Unlock()
+		l.cn.c.Close()
+		return false, err
+	}
+
+	// now we just wait for a reply..
+	for {
+		m, ok := <-l.replyChan
+		if !ok {
+			// We lost the connection to server, don't bother waiting for a
+			// a response.  err should have been set already.
+			l.connectionLock.Lock()
+			err := l.err
+			l.connectionLock.Unlock()
+			return false, err
+		}
+		switch m.typ {
+		case 'Z':
+			// sanity check
+			if m.err != nil {
+				panic("m.err != nil")
+			}
+			// done; err might or might not be set
+			return true, err
+
+		case 'E':
+			// sanity check
+			if m.err == nil {
+				panic("m.err == nil")
+			}
+			// server responded with an error; ReadyForQuery to follow
+			err = m.err
+
+		default:
+			return false, fmt.Errorf("unknown response for simple query: %q", m.typ)
+		}
+	}
+}
+
+// Close closes the connection.
+func (l *ListenerConn) Close() error {
+	l.connectionLock.Lock()
+	if l.err != nil {
+		l.connectionLock.Unlock()
+		return errListenerConnClosed
+	}
+	l.err = errListenerConnClosed
+	l.connectionLock.Unlock()
+	// We can't send anything on the connection without holding senderLock.
+	// Simply close the net.Conn to wake up everyone operating on it.
+	return l.cn.c.Close()
+}
+
+// Err returns the reason the connection was closed. It is not safe to call
+// this function until l.Notify has been closed.
+func (l *ListenerConn) Err() error {
+	return l.err
+}
+
+var errListenerClosed = errors.New("pq: Listener has been closed")
+
+// ErrChannelAlreadyOpen is returned from Listen when a channel is already
+// open.
+var ErrChannelAlreadyOpen = errors.New("pq: channel is already open")
+
+// ErrChannelNotOpen is returned from Unlisten when a channel is not open.
+var ErrChannelNotOpen = errors.New("pq: channel is not open")
+
+// ListenerEventType is an enumeration of listener event types.
+type ListenerEventType int
+
+const (
+	// ListenerEventConnected is emitted only when the database connection
+	// has been initially initialized. The err argument of the callback
+	// will always be nil.
+	ListenerEventConnected ListenerEventType = iota
+
+	// ListenerEventDisconnected is emitted after a database connection has
+	// been lost, either because of an error or because Close has been
+	// called. The err argument will be set to the reason the database
+	// connection was lost.
+	ListenerEventDisconnected
+
+	// ListenerEventReconnected is emitted after a database connection has
+	// been re-established after connection loss. The err argument of the
+	// callback will always be nil. After this event has been emitted, a
+	// nil pq.Notification is sent on the Listener.Notify channel.
+	ListenerEventReconnected
+
+	// ListenerEventConnectionAttemptFailed is emitted after a connection
+	// to the database was attempted, but failed. The err argument will be
+	// set to an error describing why the connection attempt did not
+	// succeed.
+	ListenerEventConnectionAttemptFailed
+)
+
+// EventCallbackType is the event callback type. See also ListenerEventType
+// constants' documentation.
+type EventCallbackType func(event ListenerEventType, err error)
+
+// Listener provides an interface for listening to notifications from a
+// PostgreSQL database.  For general usage information, see section
+// "Notifications".
+//
+// Listener can safely be used from concurrently running goroutines.
+type Listener struct {
+	// Channel for receiving notifications from the database.  In some cases a
+	// nil value will be sent.  See section "Notifications" above.
+	Notify chan *Notification
+
+	name                 string
+	minReconnectInterval time.Duration
+	maxReconnectInterval time.Duration
+	dialer               Dialer
+	eventCallback        EventCallbackType
+
+	lock                 sync.Mutex
+	isClosed             bool
+	reconnectCond        *sync.Cond
+	cn                   *ListenerConn
+	connNotificationChan <-chan *Notification
+	channels             map[string]struct{}
+}
+
+// NewListener creates a new database connection dedicated to LISTEN / NOTIFY.
+//
+// name should be set to a connection string to be used to establish the
+// database connection (see section "Connection String Parameters" above).
+//
+// minReconnectInterval controls the duration to wait before trying to
+// re-establish the database connection after connection loss.  After each
+// consecutive failure this interval is doubled, until maxReconnectInterval is
+// reached.  Successfully completing the connection establishment procedure
+// resets the interval back to minReconnectInterval.
+//
+// The last parameter eventCallback can be set to a function which will be
+// called by the Listener when the state of the underlying database connection
+// changes.  This callback will be called by the goroutine which dispatches the
+// notifications over the Notify channel, so you should try to avoid doing
+// potentially time-consuming operations from the callback.
+func NewListener(name string,
+	minReconnectInterval time.Duration,
+	maxReconnectInterval time.Duration,
+	eventCallback EventCallbackType) *Listener {
+	return NewDialListener(defaultDialer{}, name, minReconnectInterval, maxReconnectInterval, eventCallback)
+}
+
+// NewDialListener is like NewListener but it takes a Dialer.
+func NewDialListener(d Dialer,
+	name string,
+	minReconnectInterval time.Duration,
+	maxReconnectInterval time.Duration,
+	eventCallback EventCallbackType) *Listener {
+
+	l := &Listener{
+		name:                 name,
+		minReconnectInterval: minReconnectInterval,
+		maxReconnectInterval: maxReconnectInterval,
+		dialer:               d,
+		eventCallback:        eventCallback,
+
+		channels: make(map[string]struct{}),
+
+		Notify: make(chan *Notification, 32),
+	}
+	l.reconnectCond = sync.NewCond(&l.lock)
+
+	go l.listenerMain()
+
+	return l
+}
+
+// NotificationChannel returns the notification channel for this listener.
+// This is the same channel as Notify, and will not be recreated during the
+// life time of the Listener.
+func (l *Listener) NotificationChannel() <-chan *Notification {
+	return l.Notify
+}
+
+// Listen starts listening for notifications on a channel.  Calls to this
+// function will block until an acknowledgement has been received from the
+// server.  Note that Listener automatically re-establishes the connection
+// after connection loss, so this function may block indefinitely if the
+// connection can not be re-established.
+//
+// Listen will only fail in three conditions:
+//   1) The channel is already open.  The returned error will be
+//      ErrChannelAlreadyOpen.
+//   2) The query was executed on the remote server, but PostgreSQL returned an
+//      error message in response to the query.  The returned error will be a
+//      pq.Error containing the information the server supplied.
+//   3) Close is called on the Listener before the request could be completed.
+//
+// The channel name is case-sensitive.
+func (l *Listener) Listen(channel string) error {
+	l.lock.Lock()
+	defer l.lock.Unlock()
+
+	if l.isClosed {
+		return errListenerClosed
+	}
+
+	// The server allows you to issue a LISTEN on a channel which is already
+	// open, but it seems useful to be able to detect this case to spot for
+	// mistakes in application logic.  If the application genuinely does't
+	// care, it can check the exported error and ignore it.
+	_, exists := l.channels[channel]
+	if exists {
+		return ErrChannelAlreadyOpen
+	}
+
+	if l.cn != nil {
+		// If gotResponse is true but error is set, the query was executed on
+		// the remote server, but resulted in an error.  This should be
+		// relatively rare, so it's fine if we just pass the error to our
+		// caller.  However, if gotResponse is false, we could not complete the
+		// query on the remote server and our underlying connection is about
+		// to go away, so we only add relname to l.channels, and wait for
+		// resync() to take care of the rest.
+		gotResponse, err := l.cn.Listen(channel)
+		if gotResponse && err != nil {
+			return err
+		}
+	}
+
+	l.channels[channel] = struct{}{}
+	for l.cn == nil {
+		l.reconnectCond.Wait()
+		// we let go of the mutex for a while
+		if l.isClosed {
+			return errListenerClosed
+		}
+	}
+
+	return nil
+}
+
+// Unlisten removes a channel from the Listener's channel list.  Returns
+// ErrChannelNotOpen if the Listener is not listening on the specified channel.
+// Returns immediately with no error if there is no connection.  Note that you
+// might still get notifications for this channel even after Unlisten has
+// returned.
+//
+// The channel name is case-sensitive.
+func (l *Listener) Unlisten(channel string) error {
+	l.lock.Lock()
+	defer l.lock.Unlock()
+
+	if l.isClosed {
+		return errListenerClosed
+	}
+
+	// Similarly to LISTEN, this is not an error in Postgres, but it seems
+	// useful to distinguish from the normal conditions.
+	_, exists := l.channels[channel]
+	if !exists {
+		return ErrChannelNotOpen
+	}
+
+	if l.cn != nil {
+		// Similarly to Listen (see comment in that function), the caller
+		// should only be bothered with an error if it came from the backend as
+		// a response to our query.
+		gotResponse, err := l.cn.Unlisten(channel)
+		if gotResponse && err != nil {
+			return err
+		}
+	}
+
+	// Don't bother waiting for resync if there's no connection.
+	delete(l.channels, channel)
+	return nil
+}
+
+// UnlistenAll removes all channels from the Listener's channel list.  Returns
+// immediately with no error if there is no connection.  Note that you might
+// still get notifications for any of the deleted channels even after
+// UnlistenAll has returned.
+func (l *Listener) UnlistenAll() error {
+	l.lock.Lock()
+	defer l.lock.Unlock()
+
+	if l.isClosed {
+		return errListenerClosed
+	}
+
+	if l.cn != nil {
+		// Similarly to Listen (see comment in that function), the caller
+		// should only be bothered with an error if it came from the backend as
+		// a response to our query.
+		gotResponse, err := l.cn.UnlistenAll()
+		if gotResponse && err != nil {
+			return err
+		}
+	}
+
+	// Don't bother waiting for resync if there's no connection.
+	l.channels = make(map[string]struct{})
+	return nil
+}
+
+// Ping the remote server to make sure it's alive.  Non-nil return value means
+// that there is no active connection.
+func (l *Listener) Ping() error {
+	l.lock.Lock()
+	defer l.lock.Unlock()
+
+	if l.isClosed {
+		return errListenerClosed
+	}
+	if l.cn == nil {
+		return errors.New("no connection")
+	}
+
+	return l.cn.Ping()
+}
+
+// Clean up after losing the server connection.  Returns l.cn.Err(), which
+// should have the reason the connection was lost.
+func (l *Listener) disconnectCleanup() error {
+	l.lock.Lock()
+	defer l.lock.Unlock()
+
+	// sanity check; can't look at Err() until the channel has been closed
+	select {
+	case _, ok := <-l.connNotificationChan:
+		if ok {
+			panic("connNotificationChan not closed")
+		}
+	default:
+		panic("connNotificationChan not closed")
+	}
+
+	err := l.cn.Err()
+	l.cn.Close()
+	l.cn = nil
+	return err
+}
+
+// Synchronize the list of channels we want to be listening on with the server
+// after the connection has been established.
+func (l *Listener) resync(cn *ListenerConn, notificationChan <-chan *Notification) error {
+	doneChan := make(chan error)
+	go func(notificationChan <-chan *Notification) {
+		for channel := range l.channels {
+			// If we got a response, return that error to our caller as it's
+			// going to be more descriptive than cn.Err().
+			gotResponse, err := cn.Listen(channel)
+			if gotResponse && err != nil {
+				doneChan <- err
+				return
+			}
+
+			// If we couldn't reach the server, wait for notificationChan to
+			// close and then return the error message from the connection, as
+			// per ListenerConn's interface.
+			if err != nil {
+				for range notificationChan {
+				}
+				doneChan <- cn.Err()
+				return
+			}
+		}
+		doneChan <- nil
+	}(notificationChan)
+
+	// Ignore notifications while synchronization is going on to avoid
+	// deadlocks.  We have to send a nil notification over Notify anyway as
+	// we can't possibly know which notifications (if any) were lost while
+	// the connection was down, so there's no reason to try and process
+	// these messages at all.
+	for {
+		select {
+		case _, ok := <-notificationChan:
+			if !ok {
+				notificationChan = nil
+			}
+
+		case err := <-doneChan:
+			return err
+		}
+	}
+}
+
+// caller should NOT be holding l.lock
+func (l *Listener) closed() bool {
+	l.lock.Lock()
+	defer l.lock.Unlock()
+
+	return l.isClosed
+}
+
+func (l *Listener) connect() error {
+	notificationChan := make(chan *Notification, 32)
+	cn, err := newDialListenerConn(l.dialer, l.name, notificationChan)
+	if err != nil {
+		return err
+	}
+
+	l.lock.Lock()
+	defer l.lock.Unlock()
+
+	err = l.resync(cn, notificationChan)
+	if err != nil {
+		cn.Close()
+		return err
+	}
+
+	l.cn = cn
+	l.connNotificationChan = notificationChan
+	l.reconnectCond.Broadcast()
+
+	return nil
+}
+
+// Close disconnects the Listener from the database and shuts it down.
+// Subsequent calls to its methods will return an error.  Close returns an
+// error if the connection has already been closed.
+func (l *Listener) Close() error {
+	l.lock.Lock()
+	defer l.lock.Unlock()
+
+	if l.isClosed {
+		return errListenerClosed
+	}
+
+	if l.cn != nil {
+		l.cn.Close()
+	}
+	l.isClosed = true
+
+	// Unblock calls to Listen()
+	l.reconnectCond.Broadcast()
+
+	return nil
+}
+
+func (l *Listener) emitEvent(event ListenerEventType, err error) {
+	if l.eventCallback != nil {
+		l.eventCallback(event, err)
+	}
+}
+
+// Main logic here: maintain a connection to the server when possible, wait
+// for notifications and emit events.
+func (l *Listener) listenerConnLoop() {
+	var nextReconnect time.Time
+
+	reconnectInterval := l.minReconnectInterval
+	for {
+		for {
+			err := l.connect()
+			if err == nil {
+				break
+			}
+
+			if l.closed() {
+				return
+			}
+			l.emitEvent(ListenerEventConnectionAttemptFailed, err)
+
+			time.Sleep(reconnectInterval)
+			reconnectInterval *= 2
+			if reconnectInterval > l.maxReconnectInterval {
+				reconnectInterval = l.maxReconnectInterval
+			}
+		}
+
+		if nextReconnect.IsZero() {
+			l.emitEvent(ListenerEventConnected, nil)
+		} else {
+			l.emitEvent(ListenerEventReconnected, nil)
+			l.Notify <- nil
+		}
+
+		reconnectInterval = l.minReconnectInterval
+		nextReconnect = time.Now().Add(reconnectInterval)
+
+		for {
+			notification, ok := <-l.connNotificationChan
+			if !ok {
+				// lost connection, loop again
+				break
+			}
+			l.Notify <- notification
+		}
+
+		err := l.disconnectCleanup()
+		if l.closed() {
+			return
+		}
+		l.emitEvent(ListenerEventDisconnected, err)
+
+		time.Sleep(time.Until(nextReconnect))
+	}
+}
+
+func (l *Listener) listenerMain() {
+	l.listenerConnLoop()
+	close(l.Notify)
+}

+ 6 - 0
vendor/github.com/lib/pq/oid/doc.go

@@ -0,0 +1,6 @@
+// Package oid contains OID constants
+// as defined by the Postgres server.
+package oid
+
+// Oid is a Postgres Object ID.
+type Oid uint32

+ 93 - 0
vendor/github.com/lib/pq/oid/gen.go

@@ -0,0 +1,93 @@
+// +build ignore
+
+// Generate the table of OID values
+// Run with 'go run gen.go'.
+package main
+
+import (
+	"database/sql"
+	"fmt"
+	"log"
+	"os"
+	"os/exec"
+	"strings"
+
+	_ "github.com/lib/pq"
+)
+
+// OID represent a postgres Object Identifier Type.
+type OID struct {
+	ID   int
+	Type string
+}
+
+// Name returns an upper case version of the oid type.
+func (o OID) Name() string {
+	return strings.ToUpper(o.Type)
+}
+
+func main() {
+	datname := os.Getenv("PGDATABASE")
+	sslmode := os.Getenv("PGSSLMODE")
+
+	if datname == "" {
+		os.Setenv("PGDATABASE", "pqgotest")
+	}
+
+	if sslmode == "" {
+		os.Setenv("PGSSLMODE", "disable")
+	}
+
+	db, err := sql.Open("postgres", "")
+	if err != nil {
+		log.Fatal(err)
+	}
+	rows, err := db.Query(`
+		SELECT typname, oid
+		FROM pg_type WHERE oid < 10000
+		ORDER BY oid;
+	`)
+	if err != nil {
+		log.Fatal(err)
+	}
+	oids := make([]*OID, 0)
+	for rows.Next() {
+		var oid OID
+		if err = rows.Scan(&oid.Type, &oid.ID); err != nil {
+			log.Fatal(err)
+		}
+		oids = append(oids, &oid)
+	}
+	if err = rows.Err(); err != nil {
+		log.Fatal(err)
+	}
+	cmd := exec.Command("gofmt")
+	cmd.Stderr = os.Stderr
+	w, err := cmd.StdinPipe()
+	if err != nil {
+		log.Fatal(err)
+	}
+	f, err := os.Create("types.go")
+	if err != nil {
+		log.Fatal(err)
+	}
+	cmd.Stdout = f
+	err = cmd.Start()
+	if err != nil {
+		log.Fatal(err)
+	}
+	fmt.Fprintln(w, "// Code generated by gen.go. DO NOT EDIT.")
+	fmt.Fprintln(w, "\npackage oid")
+	fmt.Fprintln(w, "const (")
+	for _, oid := range oids {
+		fmt.Fprintf(w, "T_%s Oid = %d\n", oid.Type, oid.ID)
+	}
+	fmt.Fprintln(w, ")")
+	fmt.Fprintln(w, "var TypeName = map[Oid]string{")
+	for _, oid := range oids {
+		fmt.Fprintf(w, "T_%s: \"%s\",\n", oid.Type, oid.Name())
+	}
+	fmt.Fprintln(w, "}")
+	w.Close()
+	cmd.Wait()
+}

+ 343 - 0
vendor/github.com/lib/pq/oid/types.go

@@ -0,0 +1,343 @@
+// Code generated by gen.go. DO NOT EDIT.
+
+package oid
+
+const (
+	T_bool             Oid = 16
+	T_bytea            Oid = 17
+	T_char             Oid = 18
+	T_name             Oid = 19
+	T_int8             Oid = 20
+	T_int2             Oid = 21
+	T_int2vector       Oid = 22
+	T_int4             Oid = 23
+	T_regproc          Oid = 24
+	T_text             Oid = 25
+	T_oid              Oid = 26
+	T_tid              Oid = 27
+	T_xid              Oid = 28
+	T_cid              Oid = 29
+	T_oidvector        Oid = 30
+	T_pg_ddl_command   Oid = 32
+	T_pg_type          Oid = 71
+	T_pg_attribute     Oid = 75
+	T_pg_proc          Oid = 81
+	T_pg_class         Oid = 83
+	T_json             Oid = 114
+	T_xml              Oid = 142
+	T__xml             Oid = 143
+	T_pg_node_tree     Oid = 194
+	T__json            Oid = 199
+	T_smgr             Oid = 210
+	T_index_am_handler Oid = 325
+	T_point            Oid = 600
+	T_lseg             Oid = 601
+	T_path             Oid = 602
+	T_box              Oid = 603
+	T_polygon          Oid = 604
+	T_line             Oid = 628
+	T__line            Oid = 629
+	T_cidr             Oid = 650
+	T__cidr            Oid = 651
+	T_float4           Oid = 700
+	T_float8           Oid = 701
+	T_abstime          Oid = 702
+	T_reltime          Oid = 703
+	T_tinterval        Oid = 704
+	T_unknown          Oid = 705
+	T_circle           Oid = 718
+	T__circle          Oid = 719
+	T_money            Oid = 790
+	T__money           Oid = 791
+	T_macaddr          Oid = 829
+	T_inet             Oid = 869
+	T__bool            Oid = 1000
+	T__bytea           Oid = 1001
+	T__char            Oid = 1002
+	T__name            Oid = 1003
+	T__int2            Oid = 1005
+	T__int2vector      Oid = 1006
+	T__int4            Oid = 1007
+	T__regproc         Oid = 1008
+	T__text            Oid = 1009
+	T__tid             Oid = 1010
+	T__xid             Oid = 1011
+	T__cid             Oid = 1012
+	T__oidvector       Oid = 1013
+	T__bpchar          Oid = 1014
+	T__varchar         Oid = 1015
+	T__int8            Oid = 1016
+	T__point           Oid = 1017
+	T__lseg            Oid = 1018
+	T__path            Oid = 1019
+	T__box             Oid = 1020
+	T__float4          Oid = 1021
+	T__float8          Oid = 1022
+	T__abstime         Oid = 1023
+	T__reltime         Oid = 1024
+	T__tinterval       Oid = 1025
+	T__polygon         Oid = 1027
+	T__oid             Oid = 1028
+	T_aclitem          Oid = 1033
+	T__aclitem         Oid = 1034
+	T__macaddr         Oid = 1040
+	T__inet            Oid = 1041
+	T_bpchar           Oid = 1042
+	T_varchar          Oid = 1043
+	T_date             Oid = 1082
+	T_time             Oid = 1083
+	T_timestamp        Oid = 1114
+	T__timestamp       Oid = 1115
+	T__date            Oid = 1182
+	T__time            Oid = 1183
+	T_timestamptz      Oid = 1184
+	T__timestamptz     Oid = 1185
+	T_interval         Oid = 1186
+	T__interval        Oid = 1187
+	T__numeric         Oid = 1231
+	T_pg_database      Oid = 1248
+	T__cstring         Oid = 1263
+	T_timetz           Oid = 1266
+	T__timetz          Oid = 1270
+	T_bit              Oid = 1560
+	T__bit             Oid = 1561
+	T_varbit           Oid = 1562
+	T__varbit          Oid = 1563
+	T_numeric          Oid = 1700
+	T_refcursor        Oid = 1790
+	T__refcursor       Oid = 2201
+	T_regprocedure     Oid = 2202
+	T_regoper          Oid = 2203
+	T_regoperator      Oid = 2204
+	T_regclass         Oid = 2205
+	T_regtype          Oid = 2206
+	T__regprocedure    Oid = 2207
+	T__regoper         Oid = 2208
+	T__regoperator     Oid = 2209
+	T__regclass        Oid = 2210
+	T__regtype         Oid = 2211
+	T_record           Oid = 2249
+	T_cstring          Oid = 2275
+	T_any              Oid = 2276
+	T_anyarray         Oid = 2277
+	T_void             Oid = 2278
+	T_trigger          Oid = 2279
+	T_language_handler Oid = 2280
+	T_internal         Oid = 2281
+	T_opaque           Oid = 2282
+	T_anyelement       Oid = 2283
+	T__record          Oid = 2287
+	T_anynonarray      Oid = 2776
+	T_pg_authid        Oid = 2842
+	T_pg_auth_members  Oid = 2843
+	T__txid_snapshot   Oid = 2949
+	T_uuid             Oid = 2950
+	T__uuid            Oid = 2951
+	T_txid_snapshot    Oid = 2970
+	T_fdw_handler      Oid = 3115
+	T_pg_lsn           Oid = 3220
+	T__pg_lsn          Oid = 3221
+	T_tsm_handler      Oid = 3310
+	T_anyenum          Oid = 3500
+	T_tsvector         Oid = 3614
+	T_tsquery          Oid = 3615
+	T_gtsvector        Oid = 3642
+	T__tsvector        Oid = 3643
+	T__gtsvector       Oid = 3644
+	T__tsquery         Oid = 3645
+	T_regconfig        Oid = 3734
+	T__regconfig       Oid = 3735
+	T_regdictionary    Oid = 3769
+	T__regdictionary   Oid = 3770
+	T_jsonb            Oid = 3802
+	T__jsonb           Oid = 3807
+	T_anyrange         Oid = 3831
+	T_event_trigger    Oid = 3838
+	T_int4range        Oid = 3904
+	T__int4range       Oid = 3905
+	T_numrange         Oid = 3906
+	T__numrange        Oid = 3907
+	T_tsrange          Oid = 3908
+	T__tsrange         Oid = 3909
+	T_tstzrange        Oid = 3910
+	T__tstzrange       Oid = 3911
+	T_daterange        Oid = 3912
+	T__daterange       Oid = 3913
+	T_int8range        Oid = 3926
+	T__int8range       Oid = 3927
+	T_pg_shseclabel    Oid = 4066
+	T_regnamespace     Oid = 4089
+	T__regnamespace    Oid = 4090
+	T_regrole          Oid = 4096
+	T__regrole         Oid = 4097
+)
+
+var TypeName = map[Oid]string{
+	T_bool:             "BOOL",
+	T_bytea:            "BYTEA",
+	T_char:             "CHAR",
+	T_name:             "NAME",
+	T_int8:             "INT8",
+	T_int2:             "INT2",
+	T_int2vector:       "INT2VECTOR",
+	T_int4:             "INT4",
+	T_regproc:          "REGPROC",
+	T_text:             "TEXT",
+	T_oid:              "OID",
+	T_tid:              "TID",
+	T_xid:              "XID",
+	T_cid:              "CID",
+	T_oidvector:        "OIDVECTOR",
+	T_pg_ddl_command:   "PG_DDL_COMMAND",
+	T_pg_type:          "PG_TYPE",
+	T_pg_attribute:     "PG_ATTRIBUTE",
+	T_pg_proc:          "PG_PROC",
+	T_pg_class:         "PG_CLASS",
+	T_json:             "JSON",
+	T_xml:              "XML",
+	T__xml:             "_XML",
+	T_pg_node_tree:     "PG_NODE_TREE",
+	T__json:            "_JSON",
+	T_smgr:             "SMGR",
+	T_index_am_handler: "INDEX_AM_HANDLER",
+	T_point:            "POINT",
+	T_lseg:             "LSEG",
+	T_path:             "PATH",
+	T_box:              "BOX",
+	T_polygon:          "POLYGON",
+	T_line:             "LINE",
+	T__line:            "_LINE",
+	T_cidr:             "CIDR",
+	T__cidr:            "_CIDR",
+	T_float4:           "FLOAT4",
+	T_float8:           "FLOAT8",
+	T_abstime:          "ABSTIME",
+	T_reltime:          "RELTIME",
+	T_tinterval:        "TINTERVAL",
+	T_unknown:          "UNKNOWN",
+	T_circle:           "CIRCLE",
+	T__circle:          "_CIRCLE",
+	T_money:            "MONEY",
+	T__money:           "_MONEY",
+	T_macaddr:          "MACADDR",
+	T_inet:             "INET",
+	T__bool:            "_BOOL",
+	T__bytea:           "_BYTEA",
+	T__char:            "_CHAR",
+	T__name:            "_NAME",
+	T__int2:            "_INT2",
+	T__int2vector:      "_INT2VECTOR",
+	T__int4:            "_INT4",
+	T__regproc:         "_REGPROC",
+	T__text:            "_TEXT",
+	T__tid:             "_TID",
+	T__xid:             "_XID",
+	T__cid:             "_CID",
+	T__oidvector:       "_OIDVECTOR",
+	T__bpchar:          "_BPCHAR",
+	T__varchar:         "_VARCHAR",
+	T__int8:            "_INT8",
+	T__point:           "_POINT",
+	T__lseg:            "_LSEG",
+	T__path:            "_PATH",
+	T__box:             "_BOX",
+	T__float4:          "_FLOAT4",
+	T__float8:          "_FLOAT8",
+	T__abstime:         "_ABSTIME",
+	T__reltime:         "_RELTIME",
+	T__tinterval:       "_TINTERVAL",
+	T__polygon:         "_POLYGON",
+	T__oid:             "_OID",
+	T_aclitem:          "ACLITEM",
+	T__aclitem:         "_ACLITEM",
+	T__macaddr:         "_MACADDR",
+	T__inet:            "_INET",
+	T_bpchar:           "BPCHAR",
+	T_varchar:          "VARCHAR",
+	T_date:             "DATE",
+	T_time:             "TIME",
+	T_timestamp:        "TIMESTAMP",
+	T__timestamp:       "_TIMESTAMP",
+	T__date:            "_DATE",
+	T__time:            "_TIME",
+	T_timestamptz:      "TIMESTAMPTZ",
+	T__timestamptz:     "_TIMESTAMPTZ",
+	T_interval:         "INTERVAL",
+	T__interval:        "_INTERVAL",
+	T__numeric:         "_NUMERIC",
+	T_pg_database:      "PG_DATABASE",
+	T__cstring:         "_CSTRING",
+	T_timetz:           "TIMETZ",
+	T__timetz:          "_TIMETZ",
+	T_bit:              "BIT",
+	T__bit:             "_BIT",
+	T_varbit:           "VARBIT",
+	T__varbit:          "_VARBIT",
+	T_numeric:          "NUMERIC",
+	T_refcursor:        "REFCURSOR",
+	T__refcursor:       "_REFCURSOR",
+	T_regprocedure:     "REGPROCEDURE",
+	T_regoper:          "REGOPER",
+	T_regoperator:      "REGOPERATOR",
+	T_regclass:         "REGCLASS",
+	T_regtype:          "REGTYPE",
+	T__regprocedure:    "_REGPROCEDURE",
+	T__regoper:         "_REGOPER",
+	T__regoperator:     "_REGOPERATOR",
+	T__regclass:        "_REGCLASS",
+	T__regtype:         "_REGTYPE",
+	T_record:           "RECORD",
+	T_cstring:          "CSTRING",
+	T_any:              "ANY",
+	T_anyarray:         "ANYARRAY",
+	T_void:             "VOID",
+	T_trigger:          "TRIGGER",
+	T_language_handler: "LANGUAGE_HANDLER",
+	T_internal:         "INTERNAL",
+	T_opaque:           "OPAQUE",
+	T_anyelement:       "ANYELEMENT",
+	T__record:          "_RECORD",
+	T_anynonarray:      "ANYNONARRAY",
+	T_pg_authid:        "PG_AUTHID",
+	T_pg_auth_members:  "PG_AUTH_MEMBERS",
+	T__txid_snapshot:   "_TXID_SNAPSHOT",
+	T_uuid:             "UUID",
+	T__uuid:            "_UUID",
+	T_txid_snapshot:    "TXID_SNAPSHOT",
+	T_fdw_handler:      "FDW_HANDLER",
+	T_pg_lsn:           "PG_LSN",
+	T__pg_lsn:          "_PG_LSN",
+	T_tsm_handler:      "TSM_HANDLER",
+	T_anyenum:          "ANYENUM",
+	T_tsvector:         "TSVECTOR",
+	T_tsquery:          "TSQUERY",
+	T_gtsvector:        "GTSVECTOR",
+	T__tsvector:        "_TSVECTOR",
+	T__gtsvector:       "_GTSVECTOR",
+	T__tsquery:         "_TSQUERY",
+	T_regconfig:        "REGCONFIG",
+	T__regconfig:       "_REGCONFIG",
+	T_regdictionary:    "REGDICTIONARY",
+	T__regdictionary:   "_REGDICTIONARY",
+	T_jsonb:            "JSONB",
+	T__jsonb:           "_JSONB",
+	T_anyrange:         "ANYRANGE",
+	T_event_trigger:    "EVENT_TRIGGER",
+	T_int4range:        "INT4RANGE",
+	T__int4range:       "_INT4RANGE",
+	T_numrange:         "NUMRANGE",
+	T__numrange:        "_NUMRANGE",
+	T_tsrange:          "TSRANGE",
+	T__tsrange:         "_TSRANGE",
+	T_tstzrange:        "TSTZRANGE",
+	T__tstzrange:       "_TSTZRANGE",
+	T_daterange:        "DATERANGE",
+	T__daterange:       "_DATERANGE",
+	T_int8range:        "INT8RANGE",
+	T__int8range:       "_INT8RANGE",
+	T_pg_shseclabel:    "PG_SHSECLABEL",
+	T_regnamespace:     "REGNAMESPACE",
+	T__regnamespace:    "_REGNAMESPACE",
+	T_regrole:          "REGROLE",
+	T__regrole:         "_REGROLE",
+}

+ 93 - 0
vendor/github.com/lib/pq/rows.go

@@ -0,0 +1,93 @@
+package pq
+
+import (
+	"math"
+	"reflect"
+	"time"
+
+	"github.com/lib/pq/oid"
+)
+
+const headerSize = 4
+
+type fieldDesc struct {
+	// The object ID of the data type.
+	OID oid.Oid
+	// The data type size (see pg_type.typlen).
+	// Note that negative values denote variable-width types.
+	Len int
+	// The type modifier (see pg_attribute.atttypmod).
+	// The meaning of the modifier is type-specific.
+	Mod int
+}
+
+func (fd fieldDesc) Type() reflect.Type {
+	switch fd.OID {
+	case oid.T_int8:
+		return reflect.TypeOf(int64(0))
+	case oid.T_int4:
+		return reflect.TypeOf(int32(0))
+	case oid.T_int2:
+		return reflect.TypeOf(int16(0))
+	case oid.T_varchar, oid.T_text:
+		return reflect.TypeOf("")
+	case oid.T_bool:
+		return reflect.TypeOf(false)
+	case oid.T_date, oid.T_time, oid.T_timetz, oid.T_timestamp, oid.T_timestamptz:
+		return reflect.TypeOf(time.Time{})
+	case oid.T_bytea:
+		return reflect.TypeOf([]byte(nil))
+	default:
+		return reflect.TypeOf(new(interface{})).Elem()
+	}
+}
+
+func (fd fieldDesc) Name() string {
+	return oid.TypeName[fd.OID]
+}
+
+func (fd fieldDesc) Length() (length int64, ok bool) {
+	switch fd.OID {
+	case oid.T_text, oid.T_bytea:
+		return math.MaxInt64, true
+	case oid.T_varchar, oid.T_bpchar:
+		return int64(fd.Mod - headerSize), true
+	default:
+		return 0, false
+	}
+}
+
+func (fd fieldDesc) PrecisionScale() (precision, scale int64, ok bool) {
+	switch fd.OID {
+	case oid.T_numeric, oid.T__numeric:
+		mod := fd.Mod - headerSize
+		precision = int64((mod >> 16) & 0xffff)
+		scale = int64(mod & 0xffff)
+		return precision, scale, true
+	default:
+		return 0, 0, false
+	}
+}
+
+// ColumnTypeScanType returns the value type that can be used to scan types into.
+func (rs *rows) ColumnTypeScanType(index int) reflect.Type {
+	return rs.colTyps[index].Type()
+}
+
+// ColumnTypeDatabaseTypeName return the database system type name.
+func (rs *rows) ColumnTypeDatabaseTypeName(index int) string {
+	return rs.colTyps[index].Name()
+}
+
+// ColumnTypeLength returns the length of the column type if the column is a
+// variable length type. If the column is not a variable length type ok
+// should return false.
+func (rs *rows) ColumnTypeLength(index int) (length int64, ok bool) {
+	return rs.colTyps[index].Length()
+}
+
+// ColumnTypePrecisionScale should return the precision and scale for decimal
+// types. If not applicable, ok should be false.
+func (rs *rows) ColumnTypePrecisionScale(index int) (precision, scale int64, ok bool) {
+	return rs.colTyps[index].PrecisionScale()
+}

+ 169 - 0
vendor/github.com/lib/pq/ssl.go

@@ -0,0 +1,169 @@
+package pq
+
+import (
+	"crypto/tls"
+	"crypto/x509"
+	"io/ioutil"
+	"net"
+	"os"
+	"os/user"
+	"path/filepath"
+)
+
+// ssl generates a function to upgrade a net.Conn based on the "sslmode" and
+// related settings. The function is nil when no upgrade should take place.
+func ssl(o values) (func(net.Conn) (net.Conn, error), error) {
+	verifyCaOnly := false
+	tlsConf := tls.Config{}
+	switch mode := o["sslmode"]; mode {
+	// "require" is the default.
+	case "", "require":
+		// We must skip TLS's own verification since it requires full
+		// verification since Go 1.3.
+		tlsConf.InsecureSkipVerify = true
+
+		// From http://www.postgresql.org/docs/current/static/libpq-ssl.html:
+		//
+		// Note: For backwards compatibility with earlier versions of
+		// PostgreSQL, if a root CA file exists, the behavior of
+		// sslmode=require will be the same as that of verify-ca, meaning the
+		// server certificate is validated against the CA. Relying on this
+		// behavior is discouraged, and applications that need certificate
+		// validation should always use verify-ca or verify-full.
+		if sslrootcert, ok := o["sslrootcert"]; ok {
+			if _, err := os.Stat(sslrootcert); err == nil {
+				verifyCaOnly = true
+			} else {
+				delete(o, "sslrootcert")
+			}
+		}
+	case "verify-ca":
+		// We must skip TLS's own verification since it requires full
+		// verification since Go 1.3.
+		tlsConf.InsecureSkipVerify = true
+		verifyCaOnly = true
+	case "verify-full":
+		tlsConf.ServerName = o["host"]
+	case "disable":
+		return nil, nil
+	default:
+		return nil, fmterrorf(`unsupported sslmode %q; only "require" (default), "verify-full", "verify-ca", and "disable" supported`, mode)
+	}
+
+	err := sslClientCertificates(&tlsConf, o)
+	if err != nil {
+		return nil, err
+	}
+	err = sslCertificateAuthority(&tlsConf, o)
+	if err != nil {
+		return nil, err
+	}
+	sslRenegotiation(&tlsConf)
+
+	return func(conn net.Conn) (net.Conn, error) {
+		client := tls.Client(conn, &tlsConf)
+		if verifyCaOnly {
+			err := sslVerifyCertificateAuthority(client, &tlsConf)
+			if err != nil {
+				return nil, err
+			}
+		}
+		return client, nil
+	}, nil
+}
+
+// sslClientCertificates adds the certificate specified in the "sslcert" and
+// "sslkey" settings, or if they aren't set, from the .postgresql directory
+// in the user's home directory. The configured files must exist and have
+// the correct permissions.
+func sslClientCertificates(tlsConf *tls.Config, o values) error {
+	// user.Current() might fail when cross-compiling. We have to ignore the
+	// error and continue without home directory defaults, since we wouldn't
+	// know from where to load them.
+	user, _ := user.Current()
+
+	// In libpq, the client certificate is only loaded if the setting is not blank.
+	//
+	// https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1036-L1037
+	sslcert := o["sslcert"]
+	if len(sslcert) == 0 && user != nil {
+		sslcert = filepath.Join(user.HomeDir, ".postgresql", "postgresql.crt")
+	}
+	// https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1045
+	if len(sslcert) == 0 {
+		return nil
+	}
+	// https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1050:L1054
+	if _, err := os.Stat(sslcert); os.IsNotExist(err) {
+		return nil
+	} else if err != nil {
+		return err
+	}
+
+	// In libpq, the ssl key is only loaded if the setting is not blank.
+	//
+	// https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1123-L1222
+	sslkey := o["sslkey"]
+	if len(sslkey) == 0 && user != nil {
+		sslkey = filepath.Join(user.HomeDir, ".postgresql", "postgresql.key")
+	}
+
+	if len(sslkey) > 0 {
+		if err := sslKeyPermissions(sslkey); err != nil {
+			return err
+		}
+	}
+
+	cert, err := tls.LoadX509KeyPair(sslcert, sslkey)
+	if err != nil {
+		return err
+	}
+
+	tlsConf.Certificates = []tls.Certificate{cert}
+	return nil
+}
+
+// sslCertificateAuthority adds the RootCA specified in the "sslrootcert" setting.
+func sslCertificateAuthority(tlsConf *tls.Config, o values) error {
+	// In libpq, the root certificate is only loaded if the setting is not blank.
+	//
+	// https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L950-L951
+	if sslrootcert := o["sslrootcert"]; len(sslrootcert) > 0 {
+		tlsConf.RootCAs = x509.NewCertPool()
+
+		cert, err := ioutil.ReadFile(sslrootcert)
+		if err != nil {
+			return err
+		}
+
+		if !tlsConf.RootCAs.AppendCertsFromPEM(cert) {
+			return fmterrorf("couldn't parse pem in sslrootcert")
+		}
+	}
+
+	return nil
+}
+
+// sslVerifyCertificateAuthority carries out a TLS handshake to the server and
+// verifies the presented certificate against the CA, i.e. the one specified in
+// sslrootcert or the system CA if sslrootcert was not specified.
+func sslVerifyCertificateAuthority(client *tls.Conn, tlsConf *tls.Config) error {
+	err := client.Handshake()
+	if err != nil {
+		return err
+	}
+	certs := client.ConnectionState().PeerCertificates
+	opts := x509.VerifyOptions{
+		DNSName:       client.ConnectionState().ServerName,
+		Intermediates: x509.NewCertPool(),
+		Roots:         tlsConf.RootCAs,
+	}
+	for i, cert := range certs {
+		if i == 0 {
+			continue
+		}
+		opts.Intermediates.AddCert(cert)
+	}
+	_, err = certs[0].Verify(opts)
+	return err
+}

+ 14 - 0
vendor/github.com/lib/pq/ssl_go1.7.go

@@ -0,0 +1,14 @@
+// +build go1.7
+
+package pq
+
+import "crypto/tls"
+
+// Accept renegotiation requests initiated by the backend.
+//
+// Renegotiation was deprecated then removed from PostgreSQL 9.5, but
+// the default configuration of older versions has it enabled. Redshift
+// also initiates renegotiations and cannot be reconfigured.
+func sslRenegotiation(conf *tls.Config) {
+	conf.Renegotiation = tls.RenegotiateFreelyAsClient
+}

+ 20 - 0
vendor/github.com/lib/pq/ssl_permissions.go

@@ -0,0 +1,20 @@
+// +build !windows
+
+package pq
+
+import "os"
+
+// sslKeyPermissions checks the permissions on user-supplied ssl key files.
+// The key file should have very little access.
+//
+// libpq does not check key file permissions on Windows.
+func sslKeyPermissions(sslkey string) error {
+	info, err := os.Stat(sslkey)
+	if err != nil {
+		return err
+	}
+	if info.Mode().Perm()&0077 != 0 {
+		return ErrSSLKeyHasWorldPermissions
+	}
+	return nil
+}

+ 8 - 0
vendor/github.com/lib/pq/ssl_renegotiation.go

@@ -0,0 +1,8 @@
+// +build !go1.7
+
+package pq
+
+import "crypto/tls"
+
+// Renegotiation is not supported by crypto/tls until Go 1.7.
+func sslRenegotiation(*tls.Config) {}

+ 9 - 0
vendor/github.com/lib/pq/ssl_windows.go

@@ -0,0 +1,9 @@
+// +build windows
+
+package pq
+
+// sslKeyPermissions checks the permissions on user-supplied ssl key files.
+// The key file should have very little access.
+//
+// libpq does not check key file permissions on Windows.
+func sslKeyPermissions(string) error { return nil }

+ 76 - 0
vendor/github.com/lib/pq/url.go

@@ -0,0 +1,76 @@
+package pq
+
+import (
+	"fmt"
+	"net"
+	nurl "net/url"
+	"sort"
+	"strings"
+)
+
+// ParseURL no longer needs to be used by clients of this library since supplying a URL as a
+// connection string to sql.Open() is now supported:
+//
+//	sql.Open("postgres", "postgres://bob:[email protected]:5432/mydb?sslmode=verify-full")
+//
+// It remains exported here for backwards-compatibility.
+//
+// ParseURL converts a url to a connection string for driver.Open.
+// Example:
+//
+//	"postgres://bob:[email protected]:5432/mydb?sslmode=verify-full"
+//
+// converts to:
+//
+//	"user=bob password=secret host=1.2.3.4 port=5432 dbname=mydb sslmode=verify-full"
+//
+// A minimal example:
+//
+//	"postgres://"
+//
+// This will be blank, causing driver.Open to use all of the defaults
+func ParseURL(url string) (string, error) {
+	u, err := nurl.Parse(url)
+	if err != nil {
+		return "", err
+	}
+
+	if u.Scheme != "postgres" && u.Scheme != "postgresql" {
+		return "", fmt.Errorf("invalid connection protocol: %s", u.Scheme)
+	}
+
+	var kvs []string
+	escaper := strings.NewReplacer(` `, `\ `, `'`, `\'`, `\`, `\\`)
+	accrue := func(k, v string) {
+		if v != "" {
+			kvs = append(kvs, k+"="+escaper.Replace(v))
+		}
+	}
+
+	if u.User != nil {
+		v := u.User.Username()
+		accrue("user", v)
+
+		v, _ = u.User.Password()
+		accrue("password", v)
+	}
+
+	if host, port, err := net.SplitHostPort(u.Host); err != nil {
+		accrue("host", u.Host)
+	} else {
+		accrue("host", host)
+		accrue("port", port)
+	}
+
+	if u.Path != "" {
+		accrue("dbname", u.Path[1:])
+	}
+
+	q := u.Query()
+	for k := range q {
+		accrue(k, q.Get(k))
+	}
+
+	sort.Strings(kvs) // Makes testing easier (not a performance concern)
+	return strings.Join(kvs, " "), nil
+}

+ 24 - 0
vendor/github.com/lib/pq/user_posix.go

@@ -0,0 +1,24 @@
+// Package pq is a pure Go Postgres driver for the database/sql package.
+
+// +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris rumprun
+
+package pq
+
+import (
+	"os"
+	"os/user"
+)
+
+func userCurrent() (string, error) {
+	u, err := user.Current()
+	if err == nil {
+		return u.Username, nil
+	}
+
+	name := os.Getenv("USER")
+	if name != "" {
+		return name, nil
+	}
+
+	return "", ErrCouldNotDetectUsername
+}

+ 27 - 0
vendor/github.com/lib/pq/user_windows.go

@@ -0,0 +1,27 @@
+// Package pq is a pure Go Postgres driver for the database/sql package.
+package pq
+
+import (
+	"path/filepath"
+	"syscall"
+)
+
+// Perform Windows user name lookup identically to libpq.
+//
+// The PostgreSQL code makes use of the legacy Win32 function
+// GetUserName, and that function has not been imported into stock Go.
+// GetUserNameEx is available though, the difference being that a
+// wider range of names are available.  To get the output to be the
+// same as GetUserName, only the base (or last) component of the
+// result is returned.
+func userCurrent() (string, error) {
+	pw_name := make([]uint16, 128)
+	pwname_size := uint32(len(pw_name)) - 1
+	err := syscall.GetUserNameEx(syscall.NameSamCompatible, &pw_name[0], &pwname_size)
+	if err != nil {
+		return "", ErrCouldNotDetectUsername
+	}
+	s := syscall.UTF16ToString(pw_name)
+	u := filepath.Base(s)
+	return u, nil
+}

+ 23 - 0
vendor/github.com/lib/pq/uuid.go

@@ -0,0 +1,23 @@
+package pq
+
+import (
+	"encoding/hex"
+	"fmt"
+)
+
+// decodeUUIDBinary interprets the binary format of a uuid, returning it in text format.
+func decodeUUIDBinary(src []byte) ([]byte, error) {
+	if len(src) != 16 {
+		return nil, fmt.Errorf("pq: unable to decode uuid; bad length: %d", len(src))
+	}
+
+	dst := make([]byte, 36)
+	dst[8], dst[13], dst[18], dst[23] = '-', '-', '-', '-'
+	hex.Encode(dst[0:], src[0:4])
+	hex.Encode(dst[9:], src[4:6])
+	hex.Encode(dst[14:], src[6:8])
+	hex.Encode(dst[19:], src[8:10])
+	hex.Encode(dst[24:], src[10:16])
+
+	return dst, nil
+}

+ 8 - 0
vendor/manifest

@@ -206,6 +206,14 @@
 			"branch": "master",
 			"notests": true
 		},
+		{
+			"importpath": "github.com/lib/pq",
+			"repository": "https://github.com/lib/pq",
+			"vcs": "git",
+			"revision": "4ded0e9383f75c197b3a2aaa6d590ac52df6fd79",
+			"branch": "master",
+			"notests": true
+		},
 		{
 			"importpath": "github.com/magefile/mage/mg",
 			"repository": "https://github.com/magefile/mage",

Vissa filer visades inte eftersom för många filer har ändrats