1
0
Эх сурвалжийг харах

The Great Rewrite (fixes #36, #61, #94, #101)

Rewrite of the file model and pulling mechanism. Needs lots of cleanup
and bugfixes, now...
Jakob Borg 12 жил өмнө
parent
commit
f87b1520e8
47 өөрчлөгдсөн 2131 нэмэгдсэн , 1896 устгасан
  1. 2 0
      .gitignore
  2. 0 0
      auto/gui.files.go
  3. 9 5
      build.sh
  4. 41 6
      cid/cid.go
  5. 27 0
      cid/cid_test.go
  6. 1 0
      cmd/.gitignore
  7. 72 0
      cmd/stcli/logger.go
  8. 137 0
      cmd/stcli/main.go
  9. 71 0
      cmd/stcli/tls.go
  10. 94 0
      cmd/syncthing/blockqueue.go
  11. 0 1
      cmd/syncthing/config.go
  12. 0 2
      cmd/syncthing/config_test.go
  13. 0 173
      cmd/syncthing/filemonitor.go
  14. 0 241
      cmd/syncthing/filequeue.go
  15. 0 297
      cmd/syncthing/filequeue_test.go
  16. 24 77
      cmd/syncthing/main.go
  17. 112 554
      cmd/syncthing/model.go
  18. 27 336
      cmd/syncthing/model_test.go
  19. 0 11
      cmd/syncthing/normalize.go
  20. 0 11
      cmd/syncthing/normalize_darwin.go
  21. 477 0
      cmd/syncthing/puller.go
  22. BIN
      cmd/syncthing/syncthing
  23. 4 9
      cmd/syncthing/tempname.go
  24. 4 4
      cmd/syncthing/tls.go
  25. 15 1
      cmd/syncthing/util.go
  26. 12 0
      files/debug.go
  27. 324 0
      files/set.go
  28. 321 0
      files/set_test.go
  29. 0 1
      gui/app.js
  30. 1 1
      gui/index.html
  31. 3 3
      integration/genfiles.go
  32. 3 3
      integration/h1/config.xml
  33. 3 3
      integration/h2/config.xml
  34. 3 3
      integration/h3/config.xml
  35. 0 8
      integration/test.sh
  36. 24 0
      lamport/clock.go
  37. 43 20
      protocol/PROTOCOL.md
  38. 1 1
      protocol/message_types.go
  39. 2 2
      protocol/message_xdr.go
  40. 34 0
      protocol/nativemodel_darwin.go
  41. 25 0
      protocol/nativemodel_unix.go
  42. 34 0
      protocol/nativemodel_windows.go
  43. 68 44
      protocol/protocol.go
  44. 61 61
      protocol/protocol_test.go
  45. 35 0
      protocol/wireformat.go
  46. 2 2
      scanner/file.go
  47. 15 16
      scanner/walk.go

+ 2 - 0
.gitignore

@@ -1,5 +1,7 @@
 syncthing
 syncthing
 syncthing.exe
 syncthing.exe
+stcli
+stcli.exe
 *.tar.gz
 *.tar.gz
 *.zip
 *.zip
 *.asc
 *.asc

Файлын зөрүү хэтэрхий том тул дарагдсан байна
+ 0 - 0
auto/gui.files.go


+ 9 - 5
build.sh

@@ -3,7 +3,7 @@
 export COPYFILE_DISABLE=true
 export COPYFILE_DISABLE=true
 
 
 distFiles=(README.md LICENSE) # apart from the binary itself
 distFiles=(README.md LICENSE) # apart from the binary itself
-version=$(git describe --always)
+version=$(git describe --always --dirty)
 
 
 build() {
 build() {
 	if command -v godep >/dev/null ; then
 	if command -v godep >/dev/null ; then
@@ -15,6 +15,7 @@ build() {
 		godep=
 		godep=
 	fi
 	fi
 	${godep} go build -ldflags "-w -X main.Version $version" ./cmd/syncthing
 	${godep} go build -ldflags "-w -X main.Version $version" ./cmd/syncthing
+	${godep} go build -ldflags "-w -X main.Version $version" ./cmd/stcli
 }
 }
 
 
 prepare() {
 prepare() {
@@ -26,9 +27,12 @@ test() {
 }
 }
 
 
 sign() {
 sign() {
-	id=BCE524C7
-	if gpg --list-keys "$id" >/dev/null 2>&1 ; then
-		gpg -ab -u "$id" "$1"
+	if git describe --exact-match 2>/dev/null >/dev/null ; then
+		# HEAD is a tag
+		id=BCE524C7
+		if gpg --list-keys "$id" >/dev/null 2>&1 ; then
+			gpg -ab -u "$id" "$1"
+		fi
 	fi
 	fi
 }
 }
 
 
@@ -79,7 +83,7 @@ case "$1" in
 		test || exit 1
 		test || exit 1
 
 
 		export GOARM=7
 		export GOARM=7
-		for os in darwin-amd64 linux-amd64 linux-arm freebsd-amd64 ; do
+		for os in darwin-amd64 linux-amd64 linux-arm freebsd-amd64 windows-amd64 ; do
 			export GOOS=${os%-*}
 			export GOOS=${os%-*}
 			export GOARCH=${os#*-}
 			export GOARCH=${os#*-}
 
 

+ 41 - 6
cid/cid.go

@@ -1,18 +1,30 @@
 // Package cid provides a manager for mappings between node ID:s and connection ID:s.
 // Package cid provides a manager for mappings between node ID:s and connection ID:s.
 package cid
 package cid
 
 
+import "sync"
+
 type Map struct {
 type Map struct {
-	toCid  map[string]int
+	sync.Mutex
+	toCid  map[string]uint
 	toName []string
 	toName []string
 }
 }
 
 
+var (
+	LocalName      = "<local>"
+	LocalID   uint = 0
+)
+
 func NewMap() *Map {
 func NewMap() *Map {
 	return &Map{
 	return &Map{
-		toCid: make(map[string]int),
+		toCid:  map[string]uint{"<local>": 0},
+		toName: []string{"<local>"},
 	}
 	}
 }
 }
 
 
-func (m *Map) Get(name string) int {
+func (m *Map) Get(name string) uint {
+	m.Lock()
+	defer m.Unlock()
+
 	cid, ok := m.toCid[name]
 	cid, ok := m.toCid[name]
 	if ok {
 	if ok {
 		return cid
 		return cid
@@ -22,22 +34,45 @@ func (m *Map) Get(name string) int {
 	for i, n := range m.toName {
 	for i, n := range m.toName {
 		if n == "" {
 		if n == "" {
 			m.toName[i] = name
 			m.toName[i] = name
-			m.toCid[name] = i
-			return i
+			m.toCid[name] = uint(i)
+			return uint(i)
 		}
 		}
 	}
 	}
 
 
 	// Add it to the end since we didn't find a free slot
 	// Add it to the end since we didn't find a free slot
 	m.toName = append(m.toName, name)
 	m.toName = append(m.toName, name)
-	cid = len(m.toName) - 1
+	cid = uint(len(m.toName) - 1)
 	m.toCid[name] = cid
 	m.toCid[name] = cid
 	return cid
 	return cid
 }
 }
 
 
+func (m *Map) Name(cid uint) string {
+	m.Lock()
+	defer m.Unlock()
+
+	return m.toName[cid]
+}
+
+func (m *Map) Names() []string {
+	m.Lock()
+
+	var names []string
+	for _, name := range m.toName {
+		if name != "" {
+			names = append(names, name)
+		}
+	}
+
+	m.Unlock()
+	return names
+}
+
 func (m *Map) Clear(name string) {
 func (m *Map) Clear(name string) {
+	m.Lock()
 	cid, ok := m.toCid[name]
 	cid, ok := m.toCid[name]
 	if ok {
 	if ok {
 		m.toName[cid] = ""
 		m.toName[cid] = ""
 		delete(m.toCid, name)
 		delete(m.toCid, name)
 	}
 	}
+	m.Unlock()
 }
 }

+ 27 - 0
cid/cid_test.go

@@ -0,0 +1,27 @@
+package cid
+
+import "testing"
+
+func TestGet(t *testing.T) {
+	m := NewMap()
+
+	if i := m.Get("foo"); i != 1 {
+		t.Errorf("Unexpected id %d != 1", i)
+	}
+	if i := m.Get("bar"); i != 2 {
+		t.Errorf("Unexpected id %d != 2", i)
+	}
+	if i := m.Get("foo"); i != 1 {
+		t.Errorf("Unexpected id %d != 1", i)
+	}
+	if i := m.Get("bar"); i != 2 {
+		t.Errorf("Unexpected id %d != 2", i)
+	}
+
+	if LocalID != 0 {
+		t.Error("LocalID should be 0")
+	}
+	if i := m.Get(LocalName); i != LocalID {
+		t.Errorf("Unexpected id %d != %c", i, LocalID)
+	}
+}

+ 1 - 0
cmd/.gitignore

@@ -1 +1,2 @@
 !syncthing
 !syncthing
+!stcli

+ 72 - 0
cmd/stcli/logger.go

@@ -0,0 +1,72 @@
+package main
+
+import (
+	"fmt"
+	"log"
+	"os"
+)
+
+var logger *log.Logger
+
+func init() {
+	log.SetOutput(os.Stderr)
+	logger = log.New(os.Stderr, "", log.Flags())
+}
+
+func debugln(vals ...interface{}) {
+	s := fmt.Sprintln(vals...)
+	logger.Output(2, "DEBUG: "+s)
+}
+
+func debugf(format string, vals ...interface{}) {
+	s := fmt.Sprintf(format, vals...)
+	logger.Output(2, "DEBUG: "+s)
+}
+
+func infoln(vals ...interface{}) {
+	s := fmt.Sprintln(vals...)
+	logger.Output(2, "INFO: "+s)
+}
+
+func infof(format string, vals ...interface{}) {
+	s := fmt.Sprintf(format, vals...)
+	logger.Output(2, "INFO: "+s)
+}
+
+func okln(vals ...interface{}) {
+	s := fmt.Sprintln(vals...)
+	logger.Output(2, "OK: "+s)
+}
+
+func okf(format string, vals ...interface{}) {
+	s := fmt.Sprintf(format, vals...)
+	logger.Output(2, "OK: "+s)
+}
+
+func warnln(vals ...interface{}) {
+	s := fmt.Sprintln(vals...)
+	logger.Output(2, "WARNING: "+s)
+}
+
+func warnf(format string, vals ...interface{}) {
+	s := fmt.Sprintf(format, vals...)
+	logger.Output(2, "WARNING: "+s)
+}
+
+func fatalln(vals ...interface{}) {
+	s := fmt.Sprintln(vals...)
+	logger.Output(2, "FATAL: "+s)
+	os.Exit(3)
+}
+
+func fatalf(format string, vals ...interface{}) {
+	s := fmt.Sprintf(format, vals...)
+	logger.Output(2, "FATAL: "+s)
+	os.Exit(3)
+}
+
+func fatalErr(err error) {
+	if err != nil {
+		fatalf(err.Error())
+	}
+}

+ 137 - 0
cmd/stcli/main.go

@@ -0,0 +1,137 @@
+package main
+
+import (
+	"crypto/tls"
+	"flag"
+	"io"
+	"log"
+	"os"
+	"path/filepath"
+
+	"github.com/calmh/syncthing/protocol"
+)
+
+var (
+	exit    bool
+	cmd     string
+	confDir string
+	target  string
+	get     string
+	pc      protocol.Connection
+)
+
+func main() {
+	log.SetFlags(0)
+	log.SetOutput(os.Stdout)
+
+	flag.StringVar(&cmd, "cmd", "idx", "Command")
+	flag.StringVar(&confDir, "home", ".", "Certificates directory")
+	flag.StringVar(&target, "target", "127.0.0.1:22000", "Target node")
+	flag.StringVar(&get, "get", "", "Get file")
+	flag.BoolVar(&exit, "exit", false, "Exit after command")
+	flag.Parse()
+
+	connect(target)
+
+	select {}
+}
+
+func connect(target string) {
+	cert, err := loadCert(confDir)
+	if err != nil {
+		log.Fatal(err)
+	}
+
+	myID := string(certID(cert.Certificate[0]))
+
+	tlsCfg := &tls.Config{
+		Certificates:           []tls.Certificate{cert},
+		NextProtos:             []string{"bep/1.0"},
+		ServerName:             myID,
+		ClientAuth:             tls.RequestClientCert,
+		SessionTicketsDisabled: true,
+		InsecureSkipVerify:     true,
+		MinVersion:             tls.VersionTLS12,
+	}
+
+	conn, err := tls.Dial("tcp", target, tlsCfg)
+	if err != nil {
+		log.Fatal(err)
+	}
+
+	remoteID := certID(conn.ConnectionState().PeerCertificates[0].Raw)
+
+	pc = protocol.NewConnection(remoteID, conn, conn, Model{}, nil)
+
+	select {}
+}
+
+type Model struct {
+}
+
+func prtIndex(files []protocol.FileInfo) {
+	for _, f := range files {
+		log.Printf("%q (v:%d mod:%d flags:0%o nblocks:%d)", f.Name, f.Version, f.Modified, f.Flags, len(f.Blocks))
+		for _, b := range f.Blocks {
+			log.Printf("    %6d %x", b.Size, b.Hash)
+		}
+	}
+}
+
+func (m Model) Index(nodeID string, files []protocol.FileInfo) {
+	log.Printf("Received index")
+	if cmd == "idx" {
+		prtIndex(files)
+		if get != "" {
+			for _, f := range files {
+				if f.Name == get {
+					go getFile(f)
+					break
+				}
+			}
+		} else if exit {
+			os.Exit(0)
+		}
+	}
+}
+
+func getFile(f protocol.FileInfo) {
+	fn := filepath.Base(f.Name)
+	fd, err := os.Create(fn)
+	if err != nil {
+		log.Fatal(err)
+	}
+
+	var offset int64
+	for _, b := range f.Blocks {
+		log.Printf("Request %q %d - %d", f.Name, offset, offset+int64(b.Size))
+		bs, err := pc.Request("default", f.Name, offset, int(b.Size))
+		log.Printf(" - got %d bytes", len(bs))
+		if err != nil {
+			log.Fatal(err)
+		}
+		offset += int64(b.Size)
+		fd.Write(bs)
+	}
+
+	fd.Close()
+}
+
+func (m Model) IndexUpdate(nodeID string, files []protocol.FileInfo) {
+	log.Println("Received index update")
+	if cmd == "idx" {
+		prtIndex(files)
+		if exit {
+			os.Exit(0)
+		}
+	}
+}
+
+func (m Model) Request(nodeID, repo string, name string, offset int64, size int) ([]byte, error) {
+	log.Println("Received request")
+	return nil, io.EOF
+}
+
+func (m Model) Close(nodeID string, err error) {
+	log.Println("Received close")
+}

+ 71 - 0
cmd/stcli/tls.go

@@ -0,0 +1,71 @@
+package main
+
+import (
+	"crypto/rand"
+	"crypto/rsa"
+	"crypto/sha256"
+	"crypto/tls"
+	"crypto/x509"
+	"crypto/x509/pkix"
+	"encoding/base32"
+	"encoding/pem"
+	"math/big"
+	"os"
+	"path/filepath"
+	"strings"
+	"time"
+)
+
+const (
+	tlsRSABits = 3072
+	tlsName    = "syncthing"
+)
+
+func loadCert(dir string) (tls.Certificate, error) {
+	return tls.LoadX509KeyPair(filepath.Join(dir, "cert.pem"), filepath.Join(dir, "key.pem"))
+}
+
+func certID(bs []byte) string {
+	hf := sha256.New()
+	hf.Write(bs)
+	id := hf.Sum(nil)
+	return strings.Trim(base32.StdEncoding.EncodeToString(id), "=")
+}
+
+func newCertificate(dir string) {
+	infoln("Generating RSA certificate and key...")
+
+	priv, err := rsa.GenerateKey(rand.Reader, tlsRSABits)
+	fatalErr(err)
+
+	notBefore := time.Now()
+	notAfter := time.Date(2049, 12, 31, 23, 59, 59, 0, time.UTC)
+
+	template := x509.Certificate{
+		SerialNumber: new(big.Int).SetInt64(0),
+		Subject: pkix.Name{
+			CommonName: tlsName,
+		},
+		NotBefore: notBefore,
+		NotAfter:  notAfter,
+
+		KeyUsage:              x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
+		ExtKeyUsage:           []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth},
+		BasicConstraintsValid: true,
+	}
+
+	derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &priv.PublicKey, priv)
+	fatalErr(err)
+
+	certOut, err := os.Create(filepath.Join(dir, "cert.pem"))
+	fatalErr(err)
+	pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes})
+	certOut.Close()
+	okln("Created RSA certificate file")
+
+	keyOut, err := os.OpenFile(filepath.Join(dir, "key.pem"), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
+	fatalErr(err)
+	pem.Encode(keyOut, &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(priv)})
+	keyOut.Close()
+	okln("Created RSA key file")
+}

+ 94 - 0
cmd/syncthing/blockqueue.go

@@ -0,0 +1,94 @@
+package main
+
+import "github.com/calmh/syncthing/scanner"
+
+type bqAdd struct {
+	file scanner.File
+	have []scanner.Block
+	need []scanner.Block
+}
+
+type bqBlock struct {
+	file  scanner.File
+	block scanner.Block   // get this block from the network
+	copy  []scanner.Block // copy these blocks from the old version of the file
+	last  bool
+}
+
+type blockQueue struct {
+	inbox  chan bqAdd
+	outbox chan bqBlock
+
+	queued []bqBlock
+}
+
+func newBlockQueue() *blockQueue {
+	q := &blockQueue{
+		inbox:  make(chan bqAdd),
+		outbox: make(chan bqBlock),
+	}
+	go q.run()
+	return q
+}
+
+func (q *blockQueue) addBlock(a bqAdd) {
+	// If we already have it queued, return
+	for _, b := range q.queued {
+		if b.file.Name == a.file.Name {
+			return
+		}
+	}
+	if len(a.have) > 0 {
+		// First queue a copy operation
+		q.queued = append(q.queued, bqBlock{
+			file: a.file,
+			copy: a.have,
+		})
+	}
+	// Queue the needed blocks individually
+	l := len(a.need)
+	for i, b := range a.need {
+		q.queued = append(q.queued, bqBlock{
+			file:  a.file,
+			block: b,
+			last:  i == l-1,
+		})
+	}
+
+	if l == 0 {
+		// If we didn't have anything to fetch, queue an empty block with the "last" flag set to close the file.
+		q.queued = append(q.queued, bqBlock{
+			file: a.file,
+			last: true,
+		})
+	}
+}
+
+func (q *blockQueue) run() {
+	for {
+		if len(q.queued) == 0 {
+			q.addBlock(<-q.inbox)
+		} else {
+			next := q.queued[0]
+			select {
+			case a := <-q.inbox:
+				q.addBlock(a)
+			case q.outbox <- next:
+				q.queued = q.queued[1:]
+			}
+		}
+	}
+}
+
+func (q *blockQueue) put(a bqAdd) {
+	q.inbox <- a
+}
+
+func (q *blockQueue) get() bqBlock {
+	return <-q.outbox
+}
+
+func (q *blockQueue) empty() bool {
+	// There is a race condition here. We're only mostly sure the queue is empty if the expression below is true.
+	return len(q.queued) == 0 && len(q.inbox) == 0 && len(q.outbox) == 0
+}

+ 0 - 1
cmd/syncthing/config.go

@@ -32,7 +32,6 @@ type NodeConfiguration struct {
 type OptionsConfiguration struct {
 type OptionsConfiguration struct {
 	ListenAddress      []string `xml:"listenAddress" default:":22000" ini:"listen-address"`
 	ListenAddress      []string `xml:"listenAddress" default:":22000" ini:"listen-address"`
 	ReadOnly           bool     `xml:"readOnly" ini:"read-only"`
 	ReadOnly           bool     `xml:"readOnly" ini:"read-only"`
-	AllowDelete        bool     `xml:"allowDelete" default:"true" ini:"allow-delete"`
 	FollowSymlinks     bool     `xml:"followSymlinks" default:"true" ini:"follow-symlinks"`
 	FollowSymlinks     bool     `xml:"followSymlinks" default:"true" ini:"follow-symlinks"`
 	GUIEnabled         bool     `xml:"guiEnabled" default:"true" ini:"gui-enabled"`
 	GUIEnabled         bool     `xml:"guiEnabled" default:"true" ini:"gui-enabled"`
 	GUIAddress         string   `xml:"guiAddress" default:"127.0.0.1:8080" ini:"gui-address"`
 	GUIAddress         string   `xml:"guiAddress" default:"127.0.0.1:8080" ini:"gui-address"`

+ 0 - 2
cmd/syncthing/config_test.go

@@ -11,7 +11,6 @@ func TestDefaultValues(t *testing.T) {
 	expected := OptionsConfiguration{
 	expected := OptionsConfiguration{
 		ListenAddress:      []string{":22000"},
 		ListenAddress:      []string{":22000"},
 		ReadOnly:           false,
 		ReadOnly:           false,
-		AllowDelete:        true,
 		FollowSymlinks:     true,
 		FollowSymlinks:     true,
 		GUIEnabled:         true,
 		GUIEnabled:         true,
 		GUIAddress:         "127.0.0.1:8080",
 		GUIAddress:         "127.0.0.1:8080",
@@ -90,7 +89,6 @@ func TestOverriddenValues(t *testing.T) {
 	expected := OptionsConfiguration{
 	expected := OptionsConfiguration{
 		ListenAddress:      []string{":23000"},
 		ListenAddress:      []string{":23000"},
 		ReadOnly:           true,
 		ReadOnly:           true,
-		AllowDelete:        false,
 		FollowSymlinks:     false,
 		FollowSymlinks:     false,
 		GUIEnabled:         false,
 		GUIEnabled:         false,
 		GUIAddress:         "125.2.2.2:8080",
 		GUIAddress:         "125.2.2.2:8080",

+ 0 - 173
cmd/syncthing/filemonitor.go

@@ -1,173 +0,0 @@
-package main
-
-import (
-	"bytes"
-	"errors"
-	"fmt"
-	"os"
-	"path"
-	"sync"
-	"time"
-
-	"github.com/calmh/syncthing/buffers"
-	"github.com/calmh/syncthing/scanner"
-)
-
-type fileMonitor struct {
-	name        string // in-repo name
-	path        string // full path
-	writeDone   sync.WaitGroup
-	model       *Model
-	global      scanner.File
-	localBlocks []scanner.Block
-	copyError   error
-	writeError  error
-}
-
-func (m *fileMonitor) FileBegins(cc <-chan content) error {
-	if debugPull {
-		dlog.Println("file begins:", m.name)
-	}
-
-	tmp := defTempNamer.TempName(m.path)
-
-	dir := path.Dir(tmp)
-	_, err := os.Stat(dir)
-	if err != nil && os.IsNotExist(err) {
-		err = os.MkdirAll(dir, 0777)
-		if err != nil {
-			return err
-		}
-	}
-
-	outFile, err := os.Create(tmp)
-	if err != nil {
-		return err
-	}
-
-	m.writeDone.Add(1)
-
-	var writeWg sync.WaitGroup
-	if len(m.localBlocks) > 0 {
-		writeWg.Add(1)
-		inFile, err := os.Open(m.path)
-		if err != nil {
-			return err
-		}
-
-		// Copy local blocks, close infile when done
-		go m.copyLocalBlocks(inFile, outFile, &writeWg)
-	}
-
-	// Write remote blocks,
-	writeWg.Add(1)
-	go m.copyRemoteBlocks(cc, outFile, &writeWg)
-
-	// Wait for both writing routines, then close the outfile
-	go func() {
-		writeWg.Wait()
-		outFile.Close()
-		m.writeDone.Done()
-	}()
-
-	return nil
-}
-
-func (m *fileMonitor) copyLocalBlocks(inFile, outFile *os.File, writeWg *sync.WaitGroup) {
-	defer inFile.Close()
-	defer writeWg.Done()
-
-	var buf = buffers.Get(BlockSize)
-	defer buffers.Put(buf)
-
-	for _, lb := range m.localBlocks {
-		buf = buf[:lb.Size]
-		_, err := inFile.ReadAt(buf, lb.Offset)
-		if err != nil {
-			m.copyError = err
-			return
-		}
-		_, err = outFile.WriteAt(buf, lb.Offset)
-		if err != nil {
-			m.copyError = err
-			return
-		}
-	}
-}
-
-func (m *fileMonitor) copyRemoteBlocks(cc <-chan content, outFile *os.File, writeWg *sync.WaitGroup) {
-	defer writeWg.Done()
-
-	for content := range cc {
-		_, err := outFile.WriteAt(content.data, content.offset)
-		buffers.Put(content.data)
-		if err != nil {
-			m.writeError = err
-			return
-		}
-	}
-}
-
-func (m *fileMonitor) FileDone() error {
-	if debugPull {
-		dlog.Println("file done:", m.name)
-	}
-
-	m.writeDone.Wait()
-
-	tmp := defTempNamer.TempName(m.path)
-	defer os.Remove(tmp)
-
-	if m.copyError != nil {
-		return m.copyError
-	}
-	if m.writeError != nil {
-		return m.writeError
-	}
-
-	err := hashCheck(tmp, m.global.Blocks)
-	if err != nil {
-		return err
-	}
-
-	err = os.Chtimes(tmp, time.Unix(m.global.Modified, 0), time.Unix(m.global.Modified, 0))
-	if err != nil {
-		return err
-	}
-
-	err = os.Chmod(tmp, os.FileMode(m.global.Flags&0777))
-	if err != nil {
-		return err
-	}
-
-	err = os.Rename(tmp, m.path)
-	if err != nil {
-		return err
-	}
-
-	m.model.updateLocal(m.global)
-	return nil
-}
-
-func hashCheck(name string, correct []scanner.Block) error {
-	rf, err := os.Open(name)
-	if err != nil {
-		return err
-	}
-	defer rf.Close()
-
-	current, err := scanner.Blocks(rf, BlockSize)
-	if err != nil {
-		return err
-	}
-	if len(current) != len(correct) {
-		return errors.New("incorrect number of blocks")
-	}
-	for i := range current {
-		if bytes.Compare(current[i].Hash, correct[i].Hash) != 0 {
-			return fmt.Errorf("hash mismatch: %x != %x", current[i], correct[i])
-		}
-	}
-
-	return nil
-}

+ 0 - 241
cmd/syncthing/filequeue.go

@@ -1,241 +0,0 @@
-package main
-
-import (
-	"log"
-	"sort"
-	"sync"
-	"time"
-
-	"github.com/calmh/syncthing/scanner"
-)
-
-type Monitor interface {
-	FileBegins(<-chan content) error
-	FileDone() error
-}
-
-type FileQueue struct {
-	files        queuedFileList
-	sorted       bool
-	fmut         sync.Mutex // protects files and sorted
-	availability map[string][]string
-	amut         sync.Mutex // protects availability
-	queued       map[string]bool
-}
-
-type queuedFile struct {
-	name         string
-	blocks       []scanner.Block
-	activeBlocks []bool
-	given        int
-	remaining    int
-	channel      chan content
-	nodes        []string
-	nodesChecked time.Time
-	monitor      Monitor
-}
-
-type content struct {
-	offset int64
-	data   []byte
-}
-
-type queuedFileList []queuedFile
-
-func (l queuedFileList) Len() int { return len(l) }
-
-func (l queuedFileList) Swap(a, b int) { l[a], l[b] = l[b], l[a] }
-
-func (l queuedFileList) Less(a, b int) bool {
-	// Sort by most blocks already given out, then alphabetically
-	if l[a].given != l[b].given {
-		return l[a].given > l[b].given
-	}
-	return l[a].name < l[b].name
-}
-
-type queuedBlock struct {
-	name  string
-	block scanner.Block
-	index int
-}
-
-func NewFileQueue() *FileQueue {
-	return &FileQueue{
-		availability: make(map[string][]string),
-		queued:       make(map[string]bool),
-	}
-}
-
-func (q *FileQueue) Add(name string, blocks []scanner.Block, monitor Monitor) {
-	q.fmut.Lock()
-	defer q.fmut.Unlock()
-
-	if q.queued[name] {
-		return
-	}
-
-	q.files = append(q.files, queuedFile{
-		name:         name,
-		blocks:       blocks,
-		activeBlocks: make([]bool, len(blocks)),
-		remaining:    len(blocks),
-		channel:      make(chan content),
-		monitor:      monitor,
-	})
-	q.queued[name] = true
-	q.sorted = false
-}
-
-func (q *FileQueue) Len() int {
-	q.fmut.Lock()
-	defer q.fmut.Unlock()
-
-	return len(q.files)
-}
-
-func (q *FileQueue) Get(nodeID string) (queuedBlock, bool) {
-	q.fmut.Lock()
-	defer q.fmut.Unlock()
-
-	if !q.sorted {
-		sort.Sort(q.files)
-		q.sorted = true
-	}
-
-	for i := range q.files {
-		qf := &q.files[i]
-
-		q.amut.Lock()
-		av := q.availability[qf.name]
-		q.amut.Unlock()
-
-		if len(av) == 0 {
-			// Noone has the file we want; abort.
-			if qf.remaining != len(qf.blocks) {
-				// We have already started on this file; close it down
-				close(qf.channel)
-				if mon := qf.monitor; mon != nil {
-					mon.FileDone()
-				}
-			}
-			delete(q.queued, qf.name)
-			q.deleteAt(i)
-			return queuedBlock{}, false
-		}
-
-		for _, ni := range av {
-			// Find and return the next block in the queue
-			if ni == nodeID {
-				for j, b := range qf.blocks {
-					if !qf.activeBlocks[j] {
-						qf.activeBlocks[j] = true
-						qf.given++
-						return queuedBlock{
-							name:  qf.name,
-							block: b,
-							index: j,
-						}, true
-					}
-				}
-				break
-			}
-		}
-	}
-
-	// We found nothing to do
-	return queuedBlock{}, false
-}
-
-func (q *FileQueue) Done(file string, offset int64, data []byte) {
-	q.fmut.Lock()
-	defer q.fmut.Unlock()
-
-	c := content{
-		offset: offset,
-		data:   data,
-	}
-	for i := range q.files {
-		qf := &q.files[i]
-
-		if qf.name == file {
-			if qf.monitor != nil && qf.remaining == len(qf.blocks) {
-				err := qf.monitor.FileBegins(qf.channel)
-				if err != nil {
-					log.Printf("WARNING: %s: %v (not synced)", qf.name, err)
-					delete(q.queued, qf.name)
-					q.deleteAt(i)
-					return
-				}
-			}
-
-			qf.channel <- c
-			qf.remaining--
-
-			if qf.remaining == 0 {
-				close(qf.channel)
-				if qf.monitor != nil {
-					err := qf.monitor.FileDone()
-					if err != nil {
-						log.Printf("WARNING: %s: %v", qf.name, err)
-					}
-				}
-				delete(q.queued, qf.name)
-				q.deleteAt(i)
-			}
-			return
-		}
-	}
-
-	// We found nothing, might have errored out already
-}
-
-func (q *FileQueue) QueuedFiles() (files []string) {
-	q.fmut.Lock()
-	defer q.fmut.Unlock()
-
-	for _, qf := range q.files {
-		files = append(files, qf.name)
-	}
-	return
-}
-
-func (q *FileQueue) deleteAt(i int) {
-	q.files = append(q.files[:i], q.files[i+1:]...)
-}
-
-func (q *FileQueue) deleteFile(n string) {
-	for i, file := range q.files {
-		if n == file.name {
-			q.deleteAt(i)
-			delete(q.queued, file.name)
-			return
-		}
-	}
-}
-
-func (q *FileQueue) SetAvailable(file string, nodes []string) {
-	q.amut.Lock()
-	defer q.amut.Unlock()
-
-	q.availability[file] = nodes
-}
-
-func (q *FileQueue) RemoveAvailable(toRemove string) {
-	q.fmut.Lock()
-	q.amut.Lock()
-	defer q.amut.Unlock()
-	defer q.fmut.Unlock()
-
-	for file, nodes := range q.availability {
-		for i, node := range nodes {
-			if node == toRemove {
-				q.availability[file] = nodes[:i+copy(nodes[i:], nodes[i+1:])]
-				if len(q.availability[file]) == 0 {
-					q.deleteFile(file)
-				}
-			}
-			break
-		}
-	}
-}

+ 0 - 297
cmd/syncthing/filequeue_test.go

@@ -1,297 +0,0 @@
-package main
-
-import (
-	"reflect"
-	"sync"
-	"sync/atomic"
-	"testing"
-
-	"github.com/calmh/syncthing/scanner"
-)
-
-func TestFileQueueAdd(t *testing.T) {
-	q := NewFileQueue()
-	q.Add("foo", nil, nil)
-}
-
-func TestFileQueueAddSorting(t *testing.T) {
-	q := NewFileQueue()
-	q.SetAvailable("zzz", []string{"nodeID"})
-	q.SetAvailable("aaa", []string{"nodeID"})
-
-	q.Add("zzz", []scanner.Block{{Offset: 0, Size: 128}, {Offset: 128, Size: 128}}, nil)
-	q.Add("aaa", []scanner.Block{{Offset: 0, Size: 128}, {Offset: 128, Size: 128}}, nil)
-	b, _ := q.Get("nodeID")
-	if b.name != "aaa" {
-		t.Errorf("Incorrectly sorted get: %+v", b)
-	}
-
-	q = NewFileQueue()
-	q.SetAvailable("zzz", []string{"nodeID"})
-	q.SetAvailable("aaa", []string{"nodeID"})
-
-	q.Add("zzz", []scanner.Block{{Offset: 0, Size: 128}, {Offset: 128, Size: 128}}, nil)
-	b, _ = q.Get("nodeID") // Start on zzzz
-	if b.name != "zzz" {
-		t.Errorf("Incorrectly sorted get: %+v", b)
-	}
-	q.Add("aaa", []scanner.Block{{Offset: 0, Size: 128}, {Offset: 128, Size: 128}}, nil)
-	b, _ = q.Get("nodeID")
-	if b.name != "zzz" {
-		// Continue rather than starting a new file
-		t.Errorf("Incorrectly sorted get: %+v", b)
-	}
-}
-
-func TestFileQueueLen(t *testing.T) {
-	q := NewFileQueue()
-	q.Add("foo", nil, nil)
-	q.Add("bar", nil, nil)
-
-	if l := q.Len(); l != 2 {
-		t.Errorf("Incorrect len %d != 2 after adds", l)
-	}
-}
-
-func TestFileQueueGet(t *testing.T) {
-	q := NewFileQueue()
-	q.SetAvailable("foo", []string{"nodeID"})
-	q.SetAvailable("bar", []string{"nodeID"})
-
-	q.Add("foo", []scanner.Block{
-		{Offset: 0, Size: 128, Hash: []byte("some foo hash bytes")},
-		{Offset: 128, Size: 128, Hash: []byte("some other foo hash bytes")},
-		{Offset: 256, Size: 128, Hash: []byte("more foo hash bytes")},
-	}, nil)
-	q.Add("bar", []scanner.Block{
-		{Offset: 0, Size: 128, Hash: []byte("some bar hash bytes")},
-		{Offset: 128, Size: 128, Hash: []byte("some other bar hash bytes")},
-	}, nil)
-
-	// First get should return the first block of the first file
-
-	expected := queuedBlock{
-		name: "bar",
-		block: scanner.Block{
-			Offset: 0,
-			Size:   128,
-			Hash:   []byte("some bar hash bytes"),
-		},
-	}
-	actual, ok := q.Get("nodeID")
-
-	if !ok {
-		t.Error("Unexpected non-OK Get()")
-	}
-	if !reflect.DeepEqual(expected, actual) {
-		t.Errorf("Incorrect block returned (first)\n  E: %+v\n  A: %+v", expected, actual)
-	}
-
-	// Second get should return the next block of the first file
-
-	expected = queuedBlock{
-		name: "bar",
-		block: scanner.Block{
-			Offset: 128,
-			Size:   128,
-			Hash:   []byte("some other bar hash bytes"),
-		},
-		index: 1,
-	}
-	actual, ok = q.Get("nodeID")
-
-	if !ok {
-		t.Error("Unexpected non-OK Get()")
-	}
-	if !reflect.DeepEqual(expected, actual) {
-		t.Errorf("Incorrect block returned (second)\n  E: %+v\n  A: %+v", expected, actual)
-	}
-
-	// Third get should return the first block of the second file
-
-	expected = queuedBlock{
-		name: "foo",
-		block: scanner.Block{
-			Offset: 0,
-			Size:   128,
-			Hash:   []byte("some foo hash bytes"),
-		},
-	}
-	actual, ok = q.Get("nodeID")
-
-	if !ok {
-		t.Error("Unexpected non-OK Get()")
-	}
-	if !reflect.DeepEqual(expected, actual) {
-		t.Errorf("Incorrect block returned (third)\n  E: %+v\n  A: %+v", expected, actual)
-	}
-}
-
-/*
-func TestFileQueueDone(t *testing.T) {
-	ch := make(chan content)
-	var recv sync.WaitGroup
-	recv.Add(1)
-	go func() {
-		content := <-ch
-		if bytes.Compare(content.data, []byte("first block bytes")) != 0 {
-			t.Error("Incorrect data in first content block")
-		}
-
-		content = <-ch
-		if bytes.Compare(content.data, []byte("second block bytes")) != 0 {
-			t.Error("Incorrect data in second content block")
-		}
-
-		_, ok := <-ch
-		if ok {
-			t.Error("Content channel not closed")
-		}
-
-		recv.Done()
-	}()
-
-	q := FileQueue{resolver: fakeResolver{}}
-	q.Add("foo", []scanner.Block{
-		{Offset: 0, Length: 128, Hash: []byte("some foo hash bytes")},
-		{Offset: 128, Length: 128, Hash: []byte("some other foo hash bytes")},
-	}, ch)
-
-	b0, _ := q.Get("nodeID")
-	b1, _ := q.Get("nodeID")
-
-	q.Done(b0.name, b0.block.Offset, []byte("first block bytes"))
-	q.Done(b1.name, b1.block.Offset, []byte("second block bytes"))
-
-	recv.Wait()
-
-	// Queue should now have one file less
-
-	if l := q.Len(); l != 0 {
-		t.Error("Queue not empty")
-	}
-
-	_, ok := q.Get("nodeID")
-	if ok {
-		t.Error("Unexpected OK Get()")
-	}
-}
-*/
-
-func TestFileQueueGetNodeIDs(t *testing.T) {
-	q := NewFileQueue()
-	q.SetAvailable("a-foo", []string{"nodeID", "a"})
-	q.SetAvailable("b-bar", []string{"nodeID", "b"})
-
-	q.Add("a-foo", []scanner.Block{
-		{Offset: 0, Size: 128, Hash: []byte("some foo hash bytes")},
-		{Offset: 128, Size: 128, Hash: []byte("some other foo hash bytes")},
-		{Offset: 256, Size: 128, Hash: []byte("more foo hash bytes")},
-	}, nil)
-	q.Add("b-bar", []scanner.Block{
-		{Offset: 0, Size: 128, Hash: []byte("some bar hash bytes")},
-		{Offset: 128, Size: 128, Hash: []byte("some other bar hash bytes")},
-	}, nil)
-
-	expected := queuedBlock{
-		name: "b-bar",
-		block: scanner.Block{
-			Offset: 0,
-			Size:   128,
-			Hash:   []byte("some bar hash bytes"),
-		},
-	}
-	actual, ok := q.Get("b")
-	if !ok {
-		t.Error("Unexpected non-OK Get()")
-	}
-	if !reflect.DeepEqual(expected, actual) {
-		t.Errorf("Incorrect block returned\n  E: %+v\n  A: %+v", expected, actual)
-	}
-
-	expected = queuedBlock{
-		name: "a-foo",
-		block: scanner.Block{
-			Offset: 0,
-			Size:   128,
-			Hash:   []byte("some foo hash bytes"),
-		},
-	}
-	actual, ok = q.Get("a")
-	if !ok {
-		t.Error("Unexpected non-OK Get()")
-	}
-	if !reflect.DeepEqual(expected, actual) {
-		t.Errorf("Incorrect block returned\n  E: %+v\n  A: %+v", expected, actual)
-	}
-
-	expected = queuedBlock{
-		name: "a-foo",
-		block: scanner.Block{
-			Offset: 128,
-			Size:   128,
-			Hash:   []byte("some other foo hash bytes"),
-		},
-		index: 1,
-	}
-	actual, ok = q.Get("nodeID")
-	if !ok {
-		t.Error("Unexpected non-OK Get()")
-	}
-	if !reflect.DeepEqual(expected, actual) {
-		t.Errorf("Incorrect block returned\n  E: %+v\n  A: %+v", expected, actual)
-	}
-}
-
-func TestFileQueueThreadHandling(t *testing.T) {
-	// This should pass with go test -race
-
-	const n = 100
-	var total int
-	var blocks []scanner.Block
-	for i := 1; i <= n; i++ {
-		blocks = append(blocks, scanner.Block{Offset: int64(i), Size: 1})
-		total += i
-	}
-
-	q := NewFileQueue()
-	q.Add("foo", blocks, nil)
-	q.SetAvailable("foo", []string{"nodeID"})
-
-	var start = make(chan bool)
-	var gotTot uint32
-	var wg sync.WaitGroup
-	wg.Add(n)
-	for i := 1; i <= n; i++ {
-		go func() {
-			<-start
-			b, _ := q.Get("nodeID")
-			atomic.AddUint32(&gotTot, uint32(b.block.Offset))
-			wg.Done()
-		}()
-	}
-
-	close(start)
-	wg.Wait()
-	if int(gotTot) != total {
-		t.Errorf("Total mismatch; %d != %d", gotTot, total)
-	}
-}
-
-func TestDeleteAt(t *testing.T) {
-	q := FileQueue{}
-
-	for i := 0; i < 4; i++ {
-		q.files = queuedFileList{{name: "a"}, {name: "b"}, {name: "c"}, {name: "d"}}
-		q.deleteAt(i)
-		if l := len(q.files); l != 3 {
-			t.Fatalf("deleteAt(%d) failed; %d != 3", i, l)
-		}
-	}
-
-	q.files = queuedFileList{{name: "a"}}
-	q.deleteAt(0)
-	if l := len(q.files); l != 0 {
-		t.Fatalf("deleteAt(only) failed; %d != 0", l)
-	}
-}

+ 24 - 77
cmd/syncthing/main.go

@@ -11,7 +11,7 @@ import (
 	_ "net/http/pprof"
 	_ "net/http/pprof"
 	"os"
 	"os"
 	"os/exec"
 	"os/exec"
-	"path"
+	"path/filepath"
 	"runtime"
 	"runtime"
 	"runtime/debug"
 	"runtime/debug"
 	"strings"
 	"strings"
@@ -51,12 +51,15 @@ const (
 
 
  STTRACE      A comma separated string of facilities to trace. The valid
  STTRACE      A comma separated string of facilities to trace. The valid
               facility strings:
               facility strings:
-              - "scanner"  (the file change scanner)
               - "discover" (the node discovery package)
               - "discover" (the node discovery package)
-              - "net"      (connecting and disconnecting, network messages)
+              - "files"    (file set store)
               - "idx"      (index sending and receiving)
               - "idx"      (index sending and receiving)
+              - "mc"       (multicast beacon)
               - "need"     (file need calculations)
               - "need"     (file need calculations)
-              - "pull"     (file pull activity)`
+              - "net"      (connecting and disconnecting, network messages)
+              - "pull"     (file pull activity)
+              - "scanner"  (the file change scanner)
+              `
 )
 )
 
 
 func main() {
 func main() {
@@ -105,7 +108,7 @@ func main() {
 
 
 	// Prepare to be able to save configuration
 	// Prepare to be able to save configuration
 
 
-	cfgFile := path.Join(confDir, "config.xml")
+	cfgFile := filepath.Join(confDir, "config.xml")
 	go saveConfigLoop(cfgFile)
 	go saveConfigLoop(cfgFile)
 
 
 	// Load the configuration file, if it exists.
 	// Load the configuration file, if it exists.
@@ -121,13 +124,13 @@ func main() {
 		cf.Close()
 		cf.Close()
 	} else {
 	} else {
 		// No config.xml, let's try the old syncthing.ini
 		// No config.xml, let's try the old syncthing.ini
-		iniFile := path.Join(confDir, "syncthing.ini")
+		iniFile := filepath.Join(confDir, "syncthing.ini")
 		cf, err := os.Open(iniFile)
 		cf, err := os.Open(iniFile)
 		if err == nil {
 		if err == nil {
 			infoln("Migrating syncthing.ini to config.xml")
 			infoln("Migrating syncthing.ini to config.xml")
 			iniCfg := ini.Parse(cf)
 			iniCfg := ini.Parse(cf)
 			cf.Close()
 			cf.Close()
-			os.Rename(iniFile, path.Join(confDir, "migrated_syncthing.ini"))
+			Rename(iniFile, filepath.Join(confDir, "migrated_syncthing.ini"))
 
 
 			cfg, _ = readConfigXML(nil)
 			cfg, _ = readConfigXML(nil)
 			cfg.Repositories = []RepositoryConfiguration{
 			cfg.Repositories = []RepositoryConfiguration{
@@ -152,7 +155,7 @@ func main() {
 		cfg, err = readConfigXML(nil)
 		cfg, err = readConfigXML(nil)
 		cfg.Repositories = []RepositoryConfiguration{
 		cfg.Repositories = []RepositoryConfiguration{
 			{
 			{
-				Directory: path.Join(getHomeDir(), "Sync"),
+				Directory: filepath.Join(getHomeDir(), "Sync"),
 				Nodes: []NodeConfiguration{
 				Nodes: []NodeConfiguration{
 					{NodeID: myID, Addresses: []string{"dynamic"}},
 					{NodeID: myID, Addresses: []string{"dynamic"}},
 				},
 				},
@@ -259,35 +262,16 @@ func main() {
 
 
 	// Routine to pull blocks from other nodes to synchronize the local
 	// Routine to pull blocks from other nodes to synchronize the local
 	// repository. Does not run when we are in read only (publish only) mode.
 	// repository. Does not run when we are in read only (publish only) mode.
-	if !cfg.Options.ReadOnly {
+	if cfg.Options.ReadOnly {
+		if verbose {
+			okln("Ready to synchronize (read only; no external updates accepted)")
+		}
+		m.StartRO()
+	} else {
 		if verbose {
 		if verbose {
-			if cfg.Options.AllowDelete {
-				infoln("Deletes from peer nodes are allowed")
-			} else {
-				infoln("Deletes from peer nodes will be ignored")
-			}
 			okln("Ready to synchronize (read-write)")
 			okln("Ready to synchronize (read-write)")
 		}
 		}
-		m.StartRW(cfg.Options.AllowDelete, cfg.Options.ParallelRequests)
-	} else if verbose {
-		okln("Ready to synchronize (read only; no external updates accepted)")
-	}
-
-	// Periodically scan the repository and update the local
-	// XXX: Should use some fsnotify mechanism.
-	go func() {
-		td := time.Duration(cfg.Options.RescanIntervalS) * time.Second
-		for {
-			time.Sleep(td)
-			if m.LocalAge() > (td / 2).Seconds() {
-				updateLocalModel(m, w)
-			}
-		}
-	}()
-
-	if verbose {
-		// Periodically print statistics
-		go printStatsLoop(m)
+		m.StartRW(cfg.Options.ParallelRequests)
 	}
 	}
 
 
 	select {}
 	select {}
@@ -344,14 +328,7 @@ func saveConfigLoop(cfgFile string) {
 			continue
 			continue
 		}
 		}
 
 
-		if runtime.GOOS == "windows" {
-			err := os.Remove(cfgFile)
-			if err != nil && !os.IsNotExist(err) {
-				warnln(err)
-			}
-		}
-
-		err = os.Rename(cfgFile+".tmp", cfgFile)
+		err = Rename(cfgFile+".tmp", cfgFile)
 		if err != nil {
 		if err != nil {
 			warnln(err)
 			warnln(err)
 		}
 		}
@@ -362,37 +339,6 @@ func saveConfig() {
 	saveConfigCh <- struct{}{}
 	saveConfigCh <- struct{}{}
 }
 }
 
 
-func printStatsLoop(m *Model) {
-	var lastUpdated int64
-	var lastStats = make(map[string]ConnectionInfo)
-
-	for {
-		time.Sleep(60 * time.Second)
-
-		for node, stats := range m.ConnectionStats() {
-			secs := time.Since(lastStats[node].At).Seconds()
-			inbps := 8 * int(float64(stats.InBytesTotal-lastStats[node].InBytesTotal)/secs)
-			outbps := 8 * int(float64(stats.OutBytesTotal-lastStats[node].OutBytesTotal)/secs)
-
-			if inbps+outbps > 0 {
-				infof("%s: %sb/s in, %sb/s out", node[0:5], MetricPrefix(int64(inbps)), MetricPrefix(int64(outbps)))
-			}
-
-			lastStats[node] = stats
-		}
-
-		if lu := m.Generation(); lu > lastUpdated {
-			lastUpdated = lu
-			files, _, bytes := m.GlobalSize()
-			infof("%6d files, %9sB in cluster", files, BinaryPrefix(bytes))
-			files, _, bytes = m.LocalSize()
-			infof("%6d files, %9sB in local repo", files, BinaryPrefix(bytes))
-			needFiles, bytes := m.NeedFiles()
-			infof("%6d files, %9sB to synchronize", len(needFiles), BinaryPrefix(bytes))
-		}
-	}
-}
-
 func listenConnect(myID string, disc *discover.Discoverer, m *Model, tlsCfg *tls.Config, connOpts map[string]string) {
 func listenConnect(myID string, disc *discover.Discoverer, m *Model, tlsCfg *tls.Config, connOpts map[string]string) {
 	var conns = make(chan *tls.Conn)
 	var conns = make(chan *tls.Conn)
 
 
@@ -529,7 +475,7 @@ func updateLocalModel(m *Model, w *scanner.Walker) {
 
 
 func saveIndex(m *Model) {
 func saveIndex(m *Model) {
 	name := m.RepoID() + ".idx.gz"
 	name := m.RepoID() + ".idx.gz"
-	fullName := path.Join(confDir, name)
+	fullName := filepath.Join(confDir, name)
 	idxf, err := os.Create(fullName + ".tmp")
 	idxf, err := os.Create(fullName + ".tmp")
 	if err != nil {
 	if err != nil {
 		return
 		return
@@ -543,12 +489,13 @@ func saveIndex(m *Model) {
 	}.EncodeXDR(gzw)
 	}.EncodeXDR(gzw)
 	gzw.Close()
 	gzw.Close()
 	idxf.Close()
 	idxf.Close()
-	os.Rename(fullName+".tmp", fullName)
+
+	Rename(fullName+".tmp", fullName)
 }
 }
 
 
 func loadIndex(m *Model) {
 func loadIndex(m *Model) {
 	name := m.RepoID() + ".idx.gz"
 	name := m.RepoID() + ".idx.gz"
-	idxf, err := os.Open(path.Join(confDir, name))
+	idxf, err := os.Open(filepath.Join(confDir, name))
 	if err != nil {
 	if err != nil {
 		return
 		return
 	}
 	}
@@ -611,7 +558,7 @@ func getHomeDir() string {
 
 
 func getDefaultConfDir() string {
 func getDefaultConfDir() string {
 	if runtime.GOOS == "windows" {
 	if runtime.GOOS == "windows" {
-		return path.Join(os.Getenv("AppData"), "syncthing")
+		return filepath.Join(os.Getenv("AppData"), "syncthing")
 	}
 	}
 	return expandTilde("~/.syncthing")
 	return expandTilde("~/.syncthing")
 }
 }

+ 112 - 554
cmd/syncthing/model.go

@@ -7,59 +7,36 @@ import (
 	"io"
 	"io"
 	"net"
 	"net"
 	"os"
 	"os"
-	"path"
+	"path/filepath"
 	"sync"
 	"sync"
 	"time"
 	"time"
 
 
 	"github.com/calmh/syncthing/buffers"
 	"github.com/calmh/syncthing/buffers"
+	"github.com/calmh/syncthing/cid"
+	"github.com/calmh/syncthing/files"
+	"github.com/calmh/syncthing/lamport"
 	"github.com/calmh/syncthing/protocol"
 	"github.com/calmh/syncthing/protocol"
 	"github.com/calmh/syncthing/scanner"
 	"github.com/calmh/syncthing/scanner"
 )
 )
 
 
 type Model struct {
 type Model struct {
 	dir string
 	dir string
+	cm  *cid.Map
+	fs  *files.Set
 
 
-	global    map[string]scanner.File // the latest version of each file as it exists in the cluster
-	gmut      sync.RWMutex            // protects global
-	local     map[string]scanner.File // the files we currently have locally on disk
-	lmut      sync.RWMutex            // protects local
-	remote    map[string]map[string]scanner.File
-	rmut      sync.RWMutex // protects remote
-	protoConn map[string]Connection
+	protoConn map[string]protocol.Connection
 	rawConn   map[string]io.Closer
 	rawConn   map[string]io.Closer
 	pmut      sync.RWMutex // protects protoConn and rawConn
 	pmut      sync.RWMutex // protects protoConn and rawConn
 
 
-	// Queue for files to fetch. fq can call back into the model, so we must ensure
-	// to hold no locks when calling methods on fq.
-	fq *FileQueue
-	dq chan scanner.File // queue for files to delete
-
-	updatedLocal        int64 // timestamp of last update to local
-	updateGlobal        int64 // timestamp of last update to remote
-	lastIdxBcast        time.Time
-	lastIdxBcastRequest time.Time
-	umut                sync.RWMutex // provides updated* and lastIdx*
-
-	rwRunning bool
-	delete    bool
-	initmut   sync.Mutex // protects rwRunning and delete
+	initOnce sync.Once
 
 
 	sup suppressor
 	sup suppressor
 
 
-	parallelRequests int
 	limitRequestRate chan struct{}
 	limitRequestRate chan struct{}
 
 
 	imut sync.Mutex // protects Index
 	imut sync.Mutex // protects Index
 }
 }
 
 
-type Connection interface {
-	ID() string
-	Index(string, []protocol.FileInfo)
-	Request(repo, name string, offset int64, size int) ([]byte, error)
-	Statistics() protocol.Statistics
-	Option(key string) string
-}
-
 const (
 const (
 	idxBcastHoldtime = 15 * time.Second  // Wait at least this long after the last index modification
 	idxBcastHoldtime = 15 * time.Second  // Wait at least this long after the last index modification
 	idxBcastMaxDelay = 120 * time.Second // Unless we've already waited this long
 	idxBcastMaxDelay = 120 * time.Second // Unless we've already waited this long
@@ -75,16 +52,12 @@ var (
 // for file data without altering the local repository in any way.
 // for file data without altering the local repository in any way.
 func NewModel(dir string, maxChangeBw int) *Model {
 func NewModel(dir string, maxChangeBw int) *Model {
 	m := &Model{
 	m := &Model{
-		dir:          dir,
-		global:       make(map[string]scanner.File),
-		local:        make(map[string]scanner.File),
-		remote:       make(map[string]map[string]scanner.File),
-		protoConn:    make(map[string]Connection),
-		rawConn:      make(map[string]io.Closer),
-		lastIdxBcast: time.Now(),
-		sup:          suppressor{threshold: int64(maxChangeBw)},
-		fq:           NewFileQueue(),
-		dq:           make(chan scanner.File),
+		dir:       dir,
+		cm:        cid.NewMap(),
+		fs:        files.NewSet(),
+		protoConn: make(map[string]protocol.Connection),
+		rawConn:   make(map[string]io.Closer),
+		sup:       suppressor{threshold: int64(maxChangeBw)},
 	}
 	}
 
 
 	go m.broadcastIndexLoop()
 	go m.broadcastIndexLoop()
@@ -109,37 +82,26 @@ func (m *Model) LimitRate(kbps int) {
 // StartRW starts read/write processing on the current model. When in
 // StartRW starts read/write processing on the current model. When in
 // read/write mode the model will attempt to keep in sync with the cluster by
 // read/write mode the model will attempt to keep in sync with the cluster by
 // pulling needed files from peer nodes.
 // pulling needed files from peer nodes.
-func (m *Model) StartRW(del bool, threads int) {
-	m.initmut.Lock()
-	defer m.initmut.Unlock()
-
-	if m.rwRunning {
-		panic("starting started model")
-	}
-
-	m.rwRunning = true
-	m.delete = del
-	m.parallelRequests = threads
+func (m *Model) StartRW(threads int) {
+	m.initOnce.Do(func() {
+		newPuller("default", m.dir, m, threads)
+	})
+}
 
 
-	if del {
-		go m.deleteLoop()
-	}
+// StartRO starts read only processing on the current model. When in
+// read only mode the model will announce files to the cluster but not
+// pull in any external changes.
+func (m *Model) StartRO() {
+	m.initOnce.Do(func() {
+		newPuller("default", m.dir, m, 0) // zero threads => read only
+	})
 }
 }
 
 
 // Generation returns an opaque integer that is guaranteed to increment on
 // Generation returns an opaque integer that is guaranteed to increment on
 // every change to the local repository or global model.
 // every change to the local repository or global model.
-func (m *Model) Generation() int64 {
-	m.umut.RLock()
-	defer m.umut.RUnlock()
-
-	return m.updatedLocal + m.updateGlobal
-}
-
-func (m *Model) LocalAge() float64 {
-	m.umut.RLock()
-	defer m.umut.RUnlock()
-
-	return time.Since(time.Unix(m.updatedLocal, 0)).Seconds()
+func (m *Model) Generation() uint64 {
+	c := m.fs.Changes(cid.LocalID)
+	return c
 }
 }
 
 
 type ConnectionInfo struct {
 type ConnectionInfo struct {
@@ -156,12 +118,10 @@ func (m *Model) ConnectionStats() map[string]ConnectionInfo {
 		RemoteAddr() net.Addr
 		RemoteAddr() net.Addr
 	}
 	}
 
 
-	m.gmut.RLock()
 	m.pmut.RLock()
 	m.pmut.RLock()
-	m.rmut.RLock()
 
 
 	var tot int64
 	var tot int64
-	for _, f := range m.global {
+	for _, f := range m.fs.Global() {
 		if f.Flags&protocol.FlagDeleted == 0 {
 		if f.Flags&protocol.FlagDeleted == 0 {
 			tot += f.Size
 			tot += f.Size
 		}
 		}
@@ -178,10 +138,10 @@ func (m *Model) ConnectionStats() map[string]ConnectionInfo {
 			ci.Address = nc.RemoteAddr().String()
 			ci.Address = nc.RemoteAddr().String()
 		}
 		}
 
 
-		var have int64
-		for _, f := range m.remote[node] {
-			if f.Equals(m.global[f.Name]) && f.Flags&protocol.FlagDeleted == 0 {
-				have += f.Size
+		var have = tot
+		for _, f := range m.fs.Need(m.cm.Get(node)) {
+			if f.Flags&protocol.FlagDeleted == 0 {
+				have -= f.Size
 			}
 			}
 		}
 		}
 
 
@@ -193,18 +153,13 @@ func (m *Model) ConnectionStats() map[string]ConnectionInfo {
 		res[node] = ci
 		res[node] = ci
 	}
 	}
 
 
-	m.rmut.RUnlock()
 	m.pmut.RUnlock()
 	m.pmut.RUnlock()
-	m.gmut.RUnlock()
+
 	return res
 	return res
 }
 }
 
 
-// GlobalSize returns the number of files, deleted files and total bytes for all
-// files in the global model.
-func (m *Model) GlobalSize() (files, deleted int, bytes int64) {
-	m.gmut.RLock()
-
-	for _, f := range m.global {
+func sizeOf(fs []scanner.File) (files, deleted int, bytes int64) {
+	for _, f := range fs {
 		if f.Flags&protocol.FlagDeleted == 0 {
 		if f.Flags&protocol.FlagDeleted == 0 {
 			files++
 			files++
 			bytes += f.Size
 			bytes += f.Size
@@ -212,63 +167,45 @@ func (m *Model) GlobalSize() (files, deleted int, bytes int64) {
 			deleted++
 			deleted++
 		}
 		}
 	}
 	}
-
-	m.gmut.RUnlock()
 	return
 	return
 }
 }
 
 
+// GlobalSize returns the number of files, deleted files and total bytes for all
+// files in the global model.
+func (m *Model) GlobalSize() (files, deleted int, bytes int64) {
+	fs := m.fs.Global()
+	return sizeOf(fs)
+}
+
 // LocalSize returns the number of files, deleted files and total bytes for all
 // LocalSize returns the number of files, deleted files and total bytes for all
 // files in the local repository.
 // files in the local repository.
 func (m *Model) LocalSize() (files, deleted int, bytes int64) {
 func (m *Model) LocalSize() (files, deleted int, bytes int64) {
-	m.lmut.RLock()
-
-	for _, f := range m.local {
-		if f.Flags&protocol.FlagDeleted == 0 {
-			files++
-			bytes += f.Size
-		} else {
-			deleted++
-		}
-	}
-
-	m.lmut.RUnlock()
-	return
+	fs := m.fs.Have(cid.LocalID)
+	return sizeOf(fs)
 }
 }
 
 
 // InSyncSize returns the number and total byte size of the local files that
 // InSyncSize returns the number and total byte size of the local files that
 // are in sync with the global model.
 // are in sync with the global model.
-func (m *Model) InSyncSize() (files, bytes int64) {
-	m.gmut.RLock()
-	m.lmut.RLock()
+func (m *Model) InSyncSize() (files int, bytes int64) {
+	gf := m.fs.Global()
+	hf := m.fs.Need(cid.LocalID)
 
 
-	for n, f := range m.local {
-		if gf, ok := m.global[n]; ok && f.Equals(gf) {
-			if f.Flags&protocol.FlagDeleted == 0 {
-				files++
-				bytes += f.Size
-			}
-		}
-	}
+	gn, _, gb := sizeOf(gf)
+	hn, _, hb := sizeOf(hf)
 
 
-	m.lmut.RUnlock()
-	m.gmut.RUnlock()
-	return
+	return gn - hn, gb - hb
 }
 }
 
 
 // NeedFiles returns the list of currently needed files and the total size.
 // NeedFiles returns the list of currently needed files and the total size.
-func (m *Model) NeedFiles() (files []scanner.File, bytes int64) {
-	qf := m.fq.QueuedFiles()
-
-	m.gmut.RLock()
+func (m *Model) NeedFiles() ([]scanner.File, int64) {
+	nf := m.fs.Need(cid.LocalID)
 
 
-	for _, n := range qf {
-		f := m.global[n]
-		files = append(files, f)
+	var bytes int64
+	for _, f := range nf {
 		bytes += f.Size
 		bytes += f.Size
 	}
 	}
 
 
-	m.gmut.RUnlock()
-	return
+	return nf, bytes
 }
 }
 
 
 // Index is called when a new node is connected and we receive their full index.
 // Index is called when a new node is connected and we receive their full index.
@@ -276,27 +213,16 @@ func (m *Model) NeedFiles() (files []scanner.File, bytes int64) {
 func (m *Model) Index(nodeID string, fs []protocol.FileInfo) {
 func (m *Model) Index(nodeID string, fs []protocol.FileInfo) {
 	var files = make([]scanner.File, len(fs))
 	var files = make([]scanner.File, len(fs))
 	for i := range fs {
 	for i := range fs {
+		lamport.Default.Tick(fs[i].Version)
 		files[i] = fileFromFileInfo(fs[i])
 		files[i] = fileFromFileInfo(fs[i])
 	}
 	}
 
 
-	m.imut.Lock()
-	defer m.imut.Unlock()
+	cid := m.cm.Get(nodeID)
+	m.fs.Replace(cid, files)
 
 
 	if debugNet {
 	if debugNet {
 		dlog.Printf("IDX(in): %s: %d files", nodeID, len(fs))
 		dlog.Printf("IDX(in): %s: %d files", nodeID, len(fs))
 	}
 	}
-
-	repo := make(map[string]scanner.File)
-	for _, f := range files {
-		m.indexUpdate(repo, f)
-	}
-
-	m.rmut.Lock()
-	m.remote[nodeID] = repo
-	m.rmut.Unlock()
-
-	m.recomputeGlobal()
-	m.recomputeNeedForFiles(files)
 }
 }
 
 
 // IndexUpdate is called for incremental updates to connected nodes' indexes.
 // IndexUpdate is called for incremental updates to connected nodes' indexes.
@@ -304,48 +230,16 @@ func (m *Model) Index(nodeID string, fs []protocol.FileInfo) {
 func (m *Model) IndexUpdate(nodeID string, fs []protocol.FileInfo) {
 func (m *Model) IndexUpdate(nodeID string, fs []protocol.FileInfo) {
 	var files = make([]scanner.File, len(fs))
 	var files = make([]scanner.File, len(fs))
 	for i := range fs {
 	for i := range fs {
+		lamport.Default.Tick(fs[i].Version)
 		files[i] = fileFromFileInfo(fs[i])
 		files[i] = fileFromFileInfo(fs[i])
 	}
 	}
 
 
-	m.imut.Lock()
-	defer m.imut.Unlock()
+	id := m.cm.Get(nodeID)
+	m.fs.Update(id, files)
 
 
 	if debugNet {
 	if debugNet {
 		dlog.Printf("IDXUP(in): %s: %d files", nodeID, len(files))
 		dlog.Printf("IDXUP(in): %s: %d files", nodeID, len(files))
 	}
 	}
-
-	m.rmut.Lock()
-	repo, ok := m.remote[nodeID]
-	if !ok {
-		warnf("Index update from node %s that does not have an index", nodeID)
-		m.rmut.Unlock()
-		return
-	}
-
-	for _, f := range files {
-		m.indexUpdate(repo, f)
-	}
-	m.rmut.Unlock()
-
-	m.recomputeGlobal()
-	m.recomputeNeedForFiles(files)
-}
-
-func (m *Model) indexUpdate(repo map[string]scanner.File, f scanner.File) {
-	if debugIdx {
-		var flagComment string
-		if f.Flags&protocol.FlagDeleted != 0 {
-			flagComment = " (deleted)"
-		}
-		dlog.Printf("IDX(in): %q m=%d f=%o%s v=%d (%d blocks)", f.Name, f.Modified, f.Flags, flagComment, f.Version, len(f.Blocks))
-	}
-
-	if extraFlags := f.Flags &^ (protocol.FlagInvalid | protocol.FlagDeleted | 0xfff); extraFlags != 0 {
-		warnf("IDX(in): Unknown flags 0x%x in index record %+v", extraFlags, f)
-		return
-	}
-
-	repo[f.Name] = f
 }
 }
 
 
 // Close removes the peer from the model and closes the underlying connection if possible.
 // Close removes the peer from the model and closes the underlying connection if possible.
@@ -360,40 +254,26 @@ func (m *Model) Close(node string, err error) {
 		warnf("Connection to %s closed: %v", node, err)
 		warnf("Connection to %s closed: %v", node, err)
 	}
 	}
 
 
-	m.fq.RemoveAvailable(node)
+	cid := m.cm.Get(node)
+	m.fs.Replace(cid, nil)
+	m.cm.Clear(node)
 
 
 	m.pmut.Lock()
 	m.pmut.Lock()
-	m.rmut.Lock()
-
 	conn, ok := m.rawConn[node]
 	conn, ok := m.rawConn[node]
 	if ok {
 	if ok {
 		conn.Close()
 		conn.Close()
 	}
 	}
-
-	delete(m.remote, node)
 	delete(m.protoConn, node)
 	delete(m.protoConn, node)
 	delete(m.rawConn, node)
 	delete(m.rawConn, node)
-
-	m.rmut.Unlock()
 	m.pmut.Unlock()
 	m.pmut.Unlock()
-
-	m.recomputeGlobal()
-	m.recomputeNeedForGlobal()
 }
 }
 
 
 // Request returns the specified data segment by reading it from local disk.
 // Request returns the specified data segment by reading it from local disk.
 // Implements the protocol.Model interface.
 // Implements the protocol.Model interface.
 func (m *Model) Request(nodeID, repo, name string, offset int64, size int) ([]byte, error) {
 func (m *Model) Request(nodeID, repo, name string, offset int64, size int) ([]byte, error) {
-	// Verify that the requested file exists in the local and global model.
-	m.lmut.RLock()
-	lf, localOk := m.local[name]
-	m.lmut.RUnlock()
-
-	m.gmut.RLock()
-	_, globalOk := m.global[name]
-	m.gmut.RUnlock()
-
-	if !localOk || !globalOk {
+	// Verify that the requested file exists in the local model.
+	lf := m.fs.Get(cid.LocalID, name)
+	if offset > lf.Size {
 		warnf("SECURITY (nonexistent file) REQ(in): %s: %q o=%d s=%d", nodeID, name, offset, size)
 		warnf("SECURITY (nonexistent file) REQ(in): %s: %q o=%d s=%d", nodeID, name, offset, size)
 		return nil, ErrNoSuchFile
 		return nil, ErrNoSuchFile
 	}
 	}
@@ -404,7 +284,7 @@ func (m *Model) Request(nodeID, repo, name string, offset int64, size int) ([]by
 	if debugNet && nodeID != "<local>" {
 	if debugNet && nodeID != "<local>" {
 		dlog.Printf("REQ(in): %s: %q o=%d s=%d", nodeID, name, offset, size)
 		dlog.Printf("REQ(in): %s: %q o=%d s=%d", nodeID, name, offset, size)
 	}
 	}
-	fn := path.Join(m.dir, name)
+	fn := filepath.Join(m.dir, name)
 	fd, err := os.Open(fn) // XXX: Inefficient, should cache fd?
 	fd, err := os.Open(fn) // XXX: Inefficient, should cache fd?
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
@@ -428,63 +308,23 @@ func (m *Model) Request(nodeID, repo, name string, offset int64, size int) ([]by
 
 
 // ReplaceLocal replaces the local repository index with the given list of files.
 // ReplaceLocal replaces the local repository index with the given list of files.
 func (m *Model) ReplaceLocal(fs []scanner.File) {
 func (m *Model) ReplaceLocal(fs []scanner.File) {
-	var updated bool
-	var newLocal = make(map[string]scanner.File)
-
-	m.lmut.RLock()
-	for _, f := range fs {
-		newLocal[f.Name] = f
-		if ef := m.local[f.Name]; !ef.Equals(f) {
-			updated = true
-		}
-	}
-	m.lmut.RUnlock()
-
-	if m.markDeletedLocals(newLocal) {
-		updated = true
-	}
-
-	m.lmut.RLock()
-	if len(newLocal) != len(m.local) {
-		updated = true
-	}
-	m.lmut.RUnlock()
-
-	if updated {
-		m.lmut.Lock()
-		m.local = newLocal
-		m.lmut.Unlock()
-
-		m.recomputeGlobal()
-		m.recomputeNeedForGlobal()
-
-		m.umut.Lock()
-		m.updatedLocal = time.Now().Unix()
-		m.lastIdxBcastRequest = time.Now()
-		m.umut.Unlock()
-	}
+	m.fs.ReplaceWithDelete(cid.LocalID, fs)
 }
 }
 
 
-// SeedLocal replaces the local repository index with the given list of files,
-// in protocol data types. Does not track deletes, should only be used to seed
-// the local index from a cache file at startup.
+// ReplaceLocal replaces the local repository index with the given list of files.
 func (m *Model) SeedLocal(fs []protocol.FileInfo) {
 func (m *Model) SeedLocal(fs []protocol.FileInfo) {
-	m.lmut.Lock()
-	m.local = make(map[string]scanner.File)
-	for _, f := range fs {
-		m.local[f.Name] = fileFromFileInfo(f)
+	var sfs = make([]scanner.File, len(fs))
+	for i := 0; i < len(fs); i++ {
+		lamport.Default.Tick(fs[i].Version)
+		sfs[i] = fileFromFileInfo(fs[i])
 	}
 	}
-	m.lmut.Unlock()
 
 
-	m.recomputeGlobal()
-	m.recomputeNeedForGlobal()
+	m.fs.Replace(cid.LocalID, sfs)
 }
 }
 
 
 // Implements scanner.CurrentFiler
 // Implements scanner.CurrentFiler
 func (m *Model) CurrentFile(file string) scanner.File {
 func (m *Model) CurrentFile(file string) scanner.File {
-	m.lmut.RLock()
-	f := m.local[file]
-	m.lmut.RUnlock()
+	f := m.fs.Get(cid.LocalID, file)
 	return f
 	return f
 }
 }
 
 
@@ -504,7 +344,7 @@ func (m *Model) RepoID() string {
 // AddConnection adds a new peer connection to the model. An initial index will
 // AddConnection adds a new peer connection to the model. An initial index will
 // be sent to the connected peer, thereafter index updates whenever the local
 // be sent to the connected peer, thereafter index updates whenever the local
 // repository changes.
 // repository changes.
-func (m *Model) AddConnection(rawConn io.Closer, protoConn Connection) {
+func (m *Model) AddConnection(rawConn io.Closer, protoConn protocol.Connection) {
 	nodeID := protoConn.ID()
 	nodeID := protoConn.ID()
 	m.pmut.Lock()
 	m.pmut.Lock()
 	if _, ok := m.protoConn[nodeID]; ok {
 	if _, ok := m.protoConn[nodeID]; ok {
@@ -524,44 +364,6 @@ func (m *Model) AddConnection(rawConn io.Closer, protoConn Connection) {
 		}
 		}
 		protoConn.Index("default", idx)
 		protoConn.Index("default", idx)
 	}()
 	}()
-
-	m.initmut.Lock()
-	rw := m.rwRunning
-	m.initmut.Unlock()
-	if !rw {
-		return
-	}
-
-	for i := 0; i < m.parallelRequests; i++ {
-		i := i
-		go func() {
-			if debugPull {
-				dlog.Println("starting puller:", nodeID, i)
-			}
-			for {
-				m.pmut.RLock()
-				if _, ok := m.protoConn[nodeID]; !ok {
-					if debugPull {
-						dlog.Println("stopping puller:", nodeID, i)
-					}
-					m.pmut.RUnlock()
-					return
-				}
-				m.pmut.RUnlock()
-
-				qb, ok := m.fq.Get(nodeID)
-				if ok {
-					if debugPull {
-						dlog.Println("request: out", nodeID, i, qb.name, qb.block.Offset)
-					}
-					data, _ := protoConn.Request("default", qb.name, qb.block.Offset, int(qb.block.Size))
-					m.fq.Done(qb.name, qb.block.Offset, data)
-				} else {
-					time.Sleep(1 * time.Second)
-				}
-			}
-		}()
-	}
 }
 }
 
 
 // ProtocolIndex returns the current local index in protocol data types.
 // ProtocolIndex returns the current local index in protocol data types.
@@ -569,9 +371,9 @@ func (m *Model) AddConnection(rawConn io.Closer, protoConn Connection) {
 func (m *Model) ProtocolIndex() []protocol.FileInfo {
 func (m *Model) ProtocolIndex() []protocol.FileInfo {
 	var index []protocol.FileInfo
 	var index []protocol.FileInfo
 
 
-	m.lmut.RLock()
+	fs := m.fs.Have(cid.LocalID)
 
 
-	for _, f := range m.local {
+	for _, f := range fs {
 		mf := fileInfoFromFile(f)
 		mf := fileInfoFromFile(f)
 		if debugIdx {
 		if debugIdx {
 			var flagComment string
 			var flagComment string
@@ -583,10 +385,13 @@ func (m *Model) ProtocolIndex() []protocol.FileInfo {
 		index = append(index, mf)
 		index = append(index, mf)
 	}
 	}
 
 
-	m.lmut.RUnlock()
 	return index
 	return index
 }
 }
 
 
+func (m *Model) updateLocal(f scanner.File) {
+	m.fs.Update(cid.LocalID, []scanner.File{f})
+}
+
 func (m *Model) requestGlobal(nodeID, name string, offset int64, size int, hash []byte) ([]byte, error) {
 func (m *Model) requestGlobal(nodeID, name string, offset int64, size int, hash []byte) ([]byte, error) {
 	m.pmut.RLock()
 	m.pmut.RLock()
 	nc, ok := m.protoConn[nodeID]
 	nc, ok := m.protoConn[nodeID]
@@ -604,290 +409,42 @@ func (m *Model) requestGlobal(nodeID, name string, offset int64, size int, hash
 }
 }
 
 
 func (m *Model) broadcastIndexLoop() {
 func (m *Model) broadcastIndexLoop() {
+	var lastChange uint64
 	for {
 	for {
-		m.umut.RLock()
-		bcastRequested := m.lastIdxBcastRequest.After(m.lastIdxBcast)
-		holdtimeExceeded := time.Since(m.lastIdxBcastRequest) > idxBcastHoldtime
-		m.umut.RUnlock()
-
-		maxDelayExceeded := time.Since(m.lastIdxBcast) > idxBcastMaxDelay
-		if bcastRequested && (holdtimeExceeded || maxDelayExceeded) {
-			idx := m.ProtocolIndex()
-
-			var indexWg sync.WaitGroup
-			indexWg.Add(len(m.protoConn))
-
-			m.umut.Lock()
-			m.lastIdxBcast = time.Now()
-			m.umut.Unlock()
-
-			m.pmut.RLock()
-			for _, node := range m.protoConn {
-				node := node
-				if debugNet {
-					dlog.Printf("IDX(out/loop): %s: %d files", node.ID(), len(idx))
-				}
-				go func() {
-					node.Index("default", idx)
-					indexWg.Done()
-				}()
-			}
-			m.pmut.RUnlock()
+		time.Sleep(5 * time.Second)
 
 
-			indexWg.Wait()
+		c := m.fs.Changes(cid.LocalID)
+		if c == lastChange {
+			continue
 		}
 		}
-		time.Sleep(idxBcastHoldtime)
-	}
-}
-
-// markDeletedLocals sets the deleted flag on files that have gone missing locally.
-func (m *Model) markDeletedLocals(newLocal map[string]scanner.File) bool {
-	// For every file in the existing local table, check if they are also
-	// present in the new local table. If they are not, check that we already
-	// had the newest version available according to the global table and if so
-	// note the file as having been deleted.
-	var updated bool
-
-	m.gmut.RLock()
-	m.lmut.RLock()
-
-	for n, f := range m.local {
-		if _, ok := newLocal[n]; !ok {
-			if gf := m.global[n]; !gf.NewerThan(f) {
-				if f.Flags&protocol.FlagDeleted == 0 {
-					f.Flags = protocol.FlagDeleted
-					f.Version++
-					f.Blocks = nil
-					updated = true
-				}
-				newLocal[n] = f
-			}
-		}
-	}
-
-	m.lmut.RUnlock()
-	m.gmut.RUnlock()
-
-	return updated
-}
+		lastChange = c
 
 
-func (m *Model) updateLocal(f scanner.File) {
-	var updated bool
-
-	m.lmut.Lock()
-	if ef, ok := m.local[f.Name]; !ok || !ef.Equals(f) {
-		m.local[f.Name] = f
-		updated = true
-	}
-	m.lmut.Unlock()
-
-	if updated {
-		m.recomputeGlobal()
-		// We don't recomputeNeed here for two reasons:
-		// - a need shouldn't have arisen due to having a newer local file
-		// - recomputeNeed might call into fq.Add but we might have been called by
-		//   fq which would be a deadlock on fq
-
-		m.umut.Lock()
-		m.updatedLocal = time.Now().Unix()
-		m.lastIdxBcastRequest = time.Now()
-		m.umut.Unlock()
-	}
-}
+		saveIndex(m) // This should be cleaned up we don't do a lot of processing twice
 
 
-/*
-XXX: Not done, needs elegant handling of availability
+		fs := m.fs.Have(cid.LocalID)
 
 
-func (m *Model) recomputeGlobalFor(files []scanner.File) bool {
-	m.gmut.Lock()
-	defer m.gmut.Unlock()
+		var indexWg sync.WaitGroup
+		indexWg.Add(len(m.protoConn))
 
 
-	var updated bool
-	for _, f := range files {
-		if gf, ok := m.global[f.Name]; !ok || f.NewerThan(gf) {
-			m.global[f.Name] = f
-			updated = true
-			// Fix availability
+		var idx = make([]protocol.FileInfo, len(fs))
+		for i, f := range fs {
+			idx[i] = fileInfoFromFile(f)
 		}
 		}
-	}
-	return updated
-}
-*/
-
-func (m *Model) recomputeGlobal() {
-	var newGlobal = make(map[string]scanner.File)
 
 
-	m.lmut.RLock()
-	for n, f := range m.local {
-		newGlobal[n] = f
-	}
-	m.lmut.RUnlock()
-
-	var available = make(map[string][]string)
-
-	m.rmut.RLock()
-	var highestMod int64
-	for nodeID, fs := range m.remote {
-		for n, nf := range fs {
-			if lf, ok := newGlobal[n]; !ok || nf.NewerThan(lf) {
-				newGlobal[n] = nf
-				available[n] = []string{nodeID}
-				if nf.Modified > highestMod {
-					highestMod = nf.Modified
-				}
-			} else if lf.Equals(nf) {
-				available[n] = append(available[n], nodeID)
+		m.pmut.RLock()
+		for _, node := range m.protoConn {
+			node := node
+			if debugNet {
+				dlog.Printf("IDX(out/loop): %s: %d files", node.ID(), len(idx))
 			}
 			}
+			go func() {
+				node.Index("default", idx)
+				indexWg.Done()
+			}()
 		}
 		}
-	}
-	m.rmut.RUnlock()
-
-	for f, ns := range available {
-		m.fq.SetAvailable(f, ns)
-	}
-
-	// Figure out if anything actually changed
-
-	m.gmut.RLock()
-	var updated bool
-	if highestMod > m.updateGlobal || len(newGlobal) != len(m.global) {
-		updated = true
-	} else {
-		for n, f0 := range newGlobal {
-			if f1, ok := m.global[n]; !ok || !f0.Equals(f1) {
-				updated = true
-				break
-			}
-		}
-	}
-	m.gmut.RUnlock()
-
-	if updated {
-		m.gmut.Lock()
-		m.umut.Lock()
-		m.global = newGlobal
-		m.updateGlobal = time.Now().Unix()
-		m.umut.Unlock()
-		m.gmut.Unlock()
-	}
-}
-
-type addOrder struct {
-	n      string
-	remote []scanner.Block
-	fm     *fileMonitor
-}
-
-func (m *Model) recomputeNeedForGlobal() {
-	var toDelete []scanner.File
-	var toAdd []addOrder
-
-	m.gmut.RLock()
-
-	for _, gf := range m.global {
-		toAdd, toDelete = m.recomputeNeedForFile(gf, toAdd, toDelete)
-	}
-
-	m.gmut.RUnlock()
-
-	for _, ao := range toAdd {
-		m.fq.Add(ao.n, ao.remote, ao.fm)
-	}
-	for _, gf := range toDelete {
-		m.dq <- gf
-	}
-}
-
-func (m *Model) recomputeNeedForFiles(files []scanner.File) {
-	var toDelete []scanner.File
-	var toAdd []addOrder
-
-	m.gmut.RLock()
-
-	for _, gf := range files {
-		toAdd, toDelete = m.recomputeNeedForFile(gf, toAdd, toDelete)
-	}
-
-	m.gmut.RUnlock()
-
-	for _, ao := range toAdd {
-		m.fq.Add(ao.n, ao.remote, ao.fm)
-	}
-	for _, gf := range toDelete {
-		m.dq <- gf
-	}
-}
-
-func (m *Model) recomputeNeedForFile(gf scanner.File, toAdd []addOrder, toDelete []scanner.File) ([]addOrder, []scanner.File) {
-	m.lmut.RLock()
-	lf, ok := m.local[gf.Name]
-	m.lmut.RUnlock()
-
-	if !ok || gf.NewerThan(lf) {
-		if gf.Suppressed {
-			// Never attempt to sync invalid files
-			return toAdd, toDelete
-		}
-		if gf.Flags&protocol.FlagDeleted != 0 && !m.delete {
-			// Don't want to delete files, so forget this need
-			return toAdd, toDelete
-		}
-		if gf.Flags&protocol.FlagDeleted != 0 && !ok {
-			// Don't have the file, so don't need to delete it
-			return toAdd, toDelete
-		}
-		if debugNeed {
-			dlog.Printf("need: lf:%v gf:%v", lf, gf)
-		}
-
-		if gf.Flags&protocol.FlagDeleted != 0 {
-			toDelete = append(toDelete, gf)
-		} else {
-			local, remote := scanner.BlockDiff(lf.Blocks, gf.Blocks)
-			fm := fileMonitor{
-				name:        FSNormalize(gf.Name),
-				path:        FSNormalize(path.Clean(path.Join(m.dir, gf.Name))),
-				global:      gf,
-				model:       m,
-				localBlocks: local,
-			}
-			toAdd = append(toAdd, addOrder{gf.Name, remote, &fm})
-		}
-	}
-
-	return toAdd, toDelete
-}
-
-func (m *Model) WhoHas(name string) []string {
-	var remote []string
-
-	m.gmut.RLock()
-	m.rmut.RLock()
-
-	gf := m.global[name]
-	for node, files := range m.remote {
-		if file, ok := files[name]; ok && file.Equals(gf) {
-			remote = append(remote, node)
-		}
-	}
-
-	m.rmut.RUnlock()
-	m.gmut.RUnlock()
-	return remote
-}
-
-func (m *Model) deleteLoop() {
-	for file := range m.dq {
-		if debugPull {
-			dlog.Println("delete", file.Name)
-		}
-		path := FSNormalize(path.Clean(path.Join(m.dir, file.Name)))
-		err := os.Remove(path)
-		if err != nil {
-			warnf("%s: %v", file.Name, err)
-		}
+		m.pmut.RUnlock()
 
 
-		m.updateLocal(file)
+		indexWg.Wait()
 	}
 	}
 }
 }
 
 
@@ -903,7 +460,8 @@ func fileFromFileInfo(f protocol.FileInfo) scanner.File {
 		offset += int64(b.Size)
 		offset += int64(b.Size)
 	}
 	}
 	return scanner.File{
 	return scanner.File{
-		Name:       f.Name,
+		// Name is with native separator and normalization
+		Name:       filepath.FromSlash(f.Name),
 		Size:       offset,
 		Size:       offset,
 		Flags:      f.Flags &^ protocol.FlagInvalid,
 		Flags:      f.Flags &^ protocol.FlagInvalid,
 		Modified:   f.Modified,
 		Modified:   f.Modified,
@@ -922,7 +480,7 @@ func fileInfoFromFile(f scanner.File) protocol.FileInfo {
 		}
 		}
 	}
 	}
 	pf := protocol.FileInfo{
 	pf := protocol.FileInfo{
-		Name:     f.Name,
+		Name:     filepath.ToSlash(f.Name),
 		Flags:    f.Flags,
 		Flags:    f.Flags,
 		Modified: f.Modified,
 		Modified: f.Modified,
 		Version:  f.Version,
 		Version:  f.Version,

+ 27 - 336
cmd/syncthing/model_test.go

@@ -4,30 +4,14 @@ import (
 	"bytes"
 	"bytes"
 	"fmt"
 	"fmt"
 	"os"
 	"os"
-	"reflect"
 	"testing"
 	"testing"
 	"time"
 	"time"
 
 
+	"github.com/calmh/syncthing/cid"
 	"github.com/calmh/syncthing/protocol"
 	"github.com/calmh/syncthing/protocol"
 	"github.com/calmh/syncthing/scanner"
 	"github.com/calmh/syncthing/scanner"
 )
 )
 
 
-func TestNewModel(t *testing.T) {
-	m := NewModel("foo", 1e6)
-
-	if m == nil {
-		t.Fatalf("NewModel returned nil")
-	}
-
-	if fs, _ := m.NeedFiles(); len(fs) > 0 {
-		t.Errorf("New model should have no Need")
-	}
-
-	if len(m.local) > 0 {
-		t.Errorf("New model should have no Have")
-	}
-}
-
 var testDataExpected = map[string]scanner.File{
 var testDataExpected = map[string]scanner.File{
 	"foo": scanner.File{
 	"foo": scanner.File{
 		Name:     "foo",
 		Name:     "foo",
@@ -62,295 +46,6 @@ func init() {
 	}
 	}
 }
 }
 
 
-func TestUpdateLocal(t *testing.T) {
-	m := NewModel("testdata", 1e6)
-	w := scanner.Walker{Dir: "testdata", IgnoreFile: ".stignore", BlockSize: 128 * 1024}
-	fs, _ := w.Walk()
-	m.ReplaceLocal(fs)
-
-	if fs, _ := m.NeedFiles(); len(fs) > 0 {
-		t.Fatalf("Model with only local data should have no need")
-	}
-
-	if l1, l2 := len(m.local), len(testDataExpected); l1 != l2 {
-		t.Fatalf("Model len(local) incorrect, %d != %d", l1, l2)
-	}
-	if l1, l2 := len(m.global), len(testDataExpected); l1 != l2 {
-		t.Fatalf("Model len(global) incorrect, %d != %d", l1, l2)
-	}
-	for name, file := range testDataExpected {
-		if f, ok := m.local[name]; ok {
-			if !reflect.DeepEqual(f, file) {
-				t.Errorf("Incorrect local\n%v !=\n%v\nfor file %q", f, file, name)
-			}
-		} else {
-			t.Errorf("Missing file %q in local table", name)
-		}
-		if f, ok := m.global[name]; ok {
-			if !reflect.DeepEqual(f, file) {
-				t.Errorf("Incorrect global\n%v !=\n%v\nfor file %q", f, file, name)
-			}
-		} else {
-			t.Errorf("Missing file %q in global table", name)
-		}
-	}
-
-	for _, f := range fs {
-		if hf, ok := m.local[f.Name]; !ok || hf.Modified != f.Modified {
-			t.Fatalf("Incorrect local for %q", f.Name)
-		}
-		if cf, ok := m.global[f.Name]; !ok || cf.Modified != f.Modified {
-			t.Fatalf("Incorrect global for %q", f.Name)
-		}
-	}
-}
-
-func TestRemoteUpdateExisting(t *testing.T) {
-	m := NewModel("testdata", 1e6)
-	w := scanner.Walker{Dir: "testdata", IgnoreFile: ".stignore", BlockSize: 128 * 1024}
-	fs, _ := w.Walk()
-	m.ReplaceLocal(fs)
-
-	newFile := protocol.FileInfo{
-		Name:     "foo",
-		Modified: time.Now().Unix(),
-		Blocks:   []protocol.BlockInfo{{100, []byte("some hash bytes")}},
-	}
-	m.Index("42", []protocol.FileInfo{newFile})
-
-	if fs, _ := m.NeedFiles(); len(fs) != 1 {
-		t.Errorf("Model missing Need for one file (%d != 1)", len(fs))
-	}
-}
-
-func TestRemoteAddNew(t *testing.T) {
-	m := NewModel("testdata", 1e6)
-	w := scanner.Walker{Dir: "testdata", IgnoreFile: ".stignore", BlockSize: 128 * 1024}
-	fs, _ := w.Walk()
-	m.ReplaceLocal(fs)
-
-	newFile := protocol.FileInfo{
-		Name:     "a new file",
-		Modified: time.Now().Unix(),
-		Blocks:   []protocol.BlockInfo{{100, []byte("some hash bytes")}},
-	}
-	m.Index("42", []protocol.FileInfo{newFile})
-
-	if fs, _ := m.NeedFiles(); len(fs) != 1 {
-		t.Errorf("Model len(m.need) incorrect (%d != 1)", len(fs))
-	}
-}
-
-func TestRemoteUpdateOld(t *testing.T) {
-	m := NewModel("testdata", 1e6)
-	w := scanner.Walker{Dir: "testdata", IgnoreFile: ".stignore", BlockSize: 128 * 1024}
-	fs, _ := w.Walk()
-	m.ReplaceLocal(fs)
-
-	oldTimeStamp := int64(1234)
-	newFile := protocol.FileInfo{
-		Name:     "foo",
-		Modified: oldTimeStamp,
-		Blocks:   []protocol.BlockInfo{{100, []byte("some hash bytes")}},
-	}
-	m.Index("42", []protocol.FileInfo{newFile})
-
-	if fs, _ := m.NeedFiles(); len(fs) != 0 {
-		t.Errorf("Model len(need) incorrect (%d != 0)", len(fs))
-	}
-}
-
-func TestRemoteIndexUpdate(t *testing.T) {
-	m := NewModel("testdata", 1e6)
-	w := scanner.Walker{Dir: "testdata", IgnoreFile: ".stignore", BlockSize: 128 * 1024}
-	fs, _ := w.Walk()
-	m.ReplaceLocal(fs)
-
-	foo := protocol.FileInfo{
-		Name:     "foo",
-		Modified: time.Now().Unix(),
-		Blocks:   []protocol.BlockInfo{{100, []byte("some hash bytes")}},
-	}
-
-	bar := protocol.FileInfo{
-		Name:     "bar",
-		Modified: time.Now().Unix(),
-		Blocks:   []protocol.BlockInfo{{100, []byte("some hash bytes")}},
-	}
-
-	m.Index("42", []protocol.FileInfo{foo})
-
-	if fs, _ := m.NeedFiles(); fs[0].Name != "foo" {
-		t.Error("Model doesn't need 'foo'")
-	}
-
-	m.IndexUpdate("42", []protocol.FileInfo{bar})
-
-	if fs, _ := m.NeedFiles(); fs[0].Name != "foo" {
-		t.Error("Model doesn't need 'foo'")
-	}
-	if fs, _ := m.NeedFiles(); fs[1].Name != "bar" {
-		t.Error("Model doesn't need 'bar'")
-	}
-}
-
-func TestDelete(t *testing.T) {
-	m := NewModel("testdata", 1e6)
-	w := scanner.Walker{Dir: "testdata", IgnoreFile: ".stignore", BlockSize: 128 * 1024}
-	fs, _ := w.Walk()
-	m.ReplaceLocal(fs)
-
-	if l1, l2 := len(m.local), len(fs); l1 != l2 {
-		t.Errorf("Model len(local) incorrect (%d != %d)", l1, l2)
-	}
-	if l1, l2 := len(m.global), len(fs); l1 != l2 {
-		t.Errorf("Model len(global) incorrect (%d != %d)", l1, l2)
-	}
-
-	ot := time.Now().Unix()
-	newFile := scanner.File{
-		Name:     "a new file",
-		Modified: ot,
-		Blocks:   []scanner.Block{{0, 100, []byte("some hash bytes")}},
-	}
-	m.updateLocal(newFile)
-
-	if l1, l2 := len(m.local), len(fs)+1; l1 != l2 {
-		t.Errorf("Model len(local) incorrect (%d != %d)", l1, l2)
-	}
-	if l1, l2 := len(m.global), len(fs)+1; l1 != l2 {
-		t.Errorf("Model len(global) incorrect (%d != %d)", l1, l2)
-	}
-
-	// The deleted file is kept in the local and global tables and marked as deleted.
-
-	m.ReplaceLocal(fs)
-
-	if l1, l2 := len(m.local), len(fs)+1; l1 != l2 {
-		t.Errorf("Model len(local) incorrect (%d != %d)", l1, l2)
-	}
-	if l1, l2 := len(m.global), len(fs)+1; l1 != l2 {
-		t.Errorf("Model len(global) incorrect (%d != %d)", l1, l2)
-	}
-
-	if m.local["a new file"].Flags&(1<<12) == 0 {
-		t.Error("Unexpected deleted flag = 0 in local table")
-	}
-	if len(m.local["a new file"].Blocks) != 0 {
-		t.Error("Unexpected non-zero blocks for deleted file in local")
-	}
-	if ft := m.local["a new file"].Modified; ft != ot {
-		t.Errorf("Unexpected time %d != %d for deleted file in local", ft, ot+1)
-	}
-	if fv := m.local["a new file"].Version; fv != 1 {
-		t.Errorf("Unexpected version %d != 1 for deleted file in local", fv)
-	}
-
-	if m.global["a new file"].Flags&(1<<12) == 0 {
-		t.Error("Unexpected deleted flag = 0 in global table")
-	}
-	if len(m.global["a new file"].Blocks) != 0 {
-		t.Error("Unexpected non-zero blocks for deleted file in global")
-	}
-	if ft := m.global["a new file"].Modified; ft != ot {
-		t.Errorf("Unexpected time %d != %d for deleted file in global", ft, ot+1)
-	}
-	if fv := m.local["a new file"].Version; fv != 1 {
-		t.Errorf("Unexpected version %d != 1 for deleted file in global", fv)
-	}
-
-	// Another update should change nothing
-
-	m.ReplaceLocal(fs)
-
-	if l1, l2 := len(m.local), len(fs)+1; l1 != l2 {
-		t.Errorf("Model len(local) incorrect (%d != %d)", l1, l2)
-	}
-	if l1, l2 := len(m.global), len(fs)+1; l1 != l2 {
-		t.Errorf("Model len(global) incorrect (%d != %d)", l1, l2)
-	}
-
-	if m.local["a new file"].Flags&(1<<12) == 0 {
-		t.Error("Unexpected deleted flag = 0 in local table")
-	}
-	if len(m.local["a new file"].Blocks) != 0 {
-		t.Error("Unexpected non-zero blocks for deleted file in local")
-	}
-	if ft := m.local["a new file"].Modified; ft != ot {
-		t.Errorf("Unexpected time %d != %d for deleted file in local", ft, ot)
-	}
-	if fv := m.local["a new file"].Version; fv != 1 {
-		t.Errorf("Unexpected version %d != 1 for deleted file in local", fv)
-	}
-
-	if m.global["a new file"].Flags&(1<<12) == 0 {
-		t.Error("Unexpected deleted flag = 0 in global table")
-	}
-	if len(m.global["a new file"].Blocks) != 0 {
-		t.Error("Unexpected non-zero blocks for deleted file in global")
-	}
-	if ft := m.global["a new file"].Modified; ft != ot {
-		t.Errorf("Unexpected time %d != %d for deleted file in global", ft, ot)
-	}
-	if fv := m.local["a new file"].Version; fv != 1 {
-		t.Errorf("Unexpected version %d != 1 for deleted file in global", fv)
-	}
-}
-
-func TestForgetNode(t *testing.T) {
-	m := NewModel("testdata", 1e6)
-	w := scanner.Walker{Dir: "testdata", IgnoreFile: ".stignore", BlockSize: 128 * 1024}
-	fs, _ := w.Walk()
-	m.ReplaceLocal(fs)
-
-	if l1, l2 := len(m.local), len(fs); l1 != l2 {
-		t.Errorf("Model len(local) incorrect (%d != %d)", l1, l2)
-	}
-	if l1, l2 := len(m.global), len(fs); l1 != l2 {
-		t.Errorf("Model len(global) incorrect (%d != %d)", l1, l2)
-	}
-	if fs, _ := m.NeedFiles(); len(fs) != 0 {
-		t.Errorf("Model len(need) incorrect (%d != 0)", len(fs))
-	}
-
-	newFile := protocol.FileInfo{
-		Name:     "new file",
-		Modified: time.Now().Unix(),
-		Blocks:   []protocol.BlockInfo{{100, []byte("some hash bytes")}},
-	}
-	m.Index("42", []protocol.FileInfo{newFile})
-
-	newFile = protocol.FileInfo{
-		Name:     "new file 2",
-		Modified: time.Now().Unix(),
-		Blocks:   []protocol.BlockInfo{{100, []byte("some hash bytes")}},
-	}
-	m.Index("43", []protocol.FileInfo{newFile})
-
-	if l1, l2 := len(m.local), len(fs); l1 != l2 {
-		t.Errorf("Model len(local) incorrect (%d != %d)", l1, l2)
-	}
-	if l1, l2 := len(m.global), len(fs)+2; l1 != l2 {
-		t.Errorf("Model len(global) incorrect (%d != %d)", l1, l2)
-	}
-	if fs, _ := m.NeedFiles(); len(fs) != 2 {
-		t.Errorf("Model len(need) incorrect (%d != 2)", len(fs))
-	}
-
-	m.Close("42", nil)
-
-	if l1, l2 := len(m.local), len(fs); l1 != l2 {
-		t.Errorf("Model len(local) incorrect (%d != %d)", l1, l2)
-	}
-	if l1, l2 := len(m.global), len(fs)+1; l1 != l2 {
-		t.Errorf("Model len(global) incorrect (%d != %d)", l1, l2)
-	}
-
-	if fs, _ := m.NeedFiles(); len(fs) != 1 {
-		t.Errorf("Model len(need) incorrect (%d != 1)", len(fs))
-	}
-}
-
 func TestRequest(t *testing.T) {
 func TestRequest(t *testing.T) {
 	m := NewModel("testdata", 1e6)
 	m := NewModel("testdata", 1e6)
 	w := scanner.Walker{Dir: "testdata", IgnoreFile: ".stignore", BlockSize: 128 * 1024}
 	w := scanner.Walker{Dir: "testdata", IgnoreFile: ".stignore", BlockSize: 128 * 1024}
@@ -374,36 +69,6 @@ func TestRequest(t *testing.T) {
 	}
 	}
 }
 }
 
 
-func TestIgnoreWithUnknownFlags(t *testing.T) {
-	m := NewModel("testdata", 1e6)
-	w := scanner.Walker{Dir: "testdata", IgnoreFile: ".stignore", BlockSize: 128 * 1024}
-	fs, _ := w.Walk()
-	m.ReplaceLocal(fs)
-
-	valid := protocol.FileInfo{
-		Name:     "valid",
-		Modified: time.Now().Unix(),
-		Blocks:   []protocol.BlockInfo{{100, []byte("some hash bytes")}},
-		Flags:    protocol.FlagDeleted | 0755,
-	}
-
-	invalid := protocol.FileInfo{
-		Name:     "invalid",
-		Modified: time.Now().Unix(),
-		Blocks:   []protocol.BlockInfo{{100, []byte("some hash bytes")}},
-		Flags:    1<<27 | protocol.FlagDeleted | 0755,
-	}
-
-	m.Index("42", []protocol.FileInfo{valid, invalid})
-
-	if _, ok := m.global[valid.Name]; !ok {
-		t.Error("Model should include", valid)
-	}
-	if _, ok := m.global[invalid.Name]; ok {
-		t.Error("Model not should include", invalid)
-	}
-}
-
 func genFiles(n int) []protocol.FileInfo {
 func genFiles(n int) []protocol.FileInfo {
 	files := make([]protocol.FileInfo, n)
 	files := make([]protocol.FileInfo, n)
 	t := time.Now().Unix()
 	t := time.Now().Unix()
@@ -554,3 +219,29 @@ func BenchmarkRequest(b *testing.B) {
 		}
 		}
 	}
 	}
 }
 }
+
+func TestActivityMap(t *testing.T) {
+	cm := cid.NewMap()
+	fooID := cm.Get("foo")
+	if fooID == 0 {
+		t.Fatal("ID cannot be zero")
+	}
+	barID := cm.Get("bar")
+	if barID == 0 {
+		t.Fatal("ID cannot be zero")
+	}
+
+	m := make(activityMap)
+	if node := m.leastBusyNode(1<<fooID, cm); node != "foo" {
+		t.Errorf("Incorrect least busy node %q", node)
+	}
+	if node := m.leastBusyNode(1<<barID, cm); node != "bar" {
+		t.Errorf("Incorrect least busy node %q", node)
+	}
+	if node := m.leastBusyNode(1<<fooID|1<<barID, cm); node != "foo" {
+		t.Errorf("Incorrect least busy node %q", node)
+	}
+	if node := m.leastBusyNode(1<<fooID|1<<barID, cm); node != "bar" {
+		t.Errorf("Incorrect least busy node %q", node)
+	}
+}

+ 0 - 11
cmd/syncthing/normalize.go

@@ -1,11 +0,0 @@
-//+build !darwin
-
-package main
-
-import "code.google.com/p/go.text/unicode/norm"
-
-// FSNormalize returns the string with the required unicode normalization for
-// the host operating system.
-func FSNormalize(s string) string {
-	return norm.NFC.String(s)
-}

+ 0 - 11
cmd/syncthing/normalize_darwin.go

@@ -1,11 +0,0 @@
-//+build darwin
-
-package main
-
-import "code.google.com/p/go.text/unicode/norm"
-
-// FSNormalize returns the string with the required unicode normalization for
-// the host operating system.
-func FSNormalize(s string) string {
-	return norm.NFD.String(s)
-}

+ 477 - 0
cmd/syncthing/puller.go

@@ -0,0 +1,477 @@
+package main
+
+import (
+	"bytes"
+	"errors"
+	"os"
+	"path/filepath"
+	"time"
+
+	"github.com/calmh/syncthing/buffers"
+	"github.com/calmh/syncthing/cid"
+	"github.com/calmh/syncthing/protocol"
+	"github.com/calmh/syncthing/scanner"
+)
+
+type requestResult struct {
+	node     string
+	file     scanner.File
+	filepath string // full filepath name
+	offset   int64
+	data     []byte
+	err      error
+}
+
+type openFile struct {
+	filepath     string // full filepath name
+	temp         string // temporary filename
+	availability uint64 // availability bitset
+	file         *os.File
+	err          error // error when opening or writing to file, all following operations are cancelled
+	outstanding  int   // number of requests we still have outstanding
+	done         bool  // we have sent all requests for this file
+}
+
+type activityMap map[string]int
+
+func (m activityMap) leastBusyNode(availability uint64, cm *cid.Map) string {
+	var low int = 2<<30 - 1
+	var selected string
+	for _, node := range cm.Names() {
+		id := cm.Get(node)
+		if id == cid.LocalID {
+			continue
+		}
+		usage := m[node]
+		if availability&(1<<id) != 0 {
+			if usage < low {
+				low = usage
+				selected = node
+			}
+		}
+	}
+	m[selected]++
+	return selected
+}
+
+func (m activityMap) decrease(node string) {
+	m[node]--
+}
+
+var errNoNode = errors.New("no available source node")
+
+type puller struct {
+	repo              string
+	dir               string
+	bq                *blockQueue
+	model             *Model
+	oustandingPerNode activityMap
+	openFiles         map[string]openFile
+	requestSlots      chan bool
+	blocks            chan bqBlock
+	requestResults    chan requestResult
+}
+
+func newPuller(repo, dir string, model *Model, slots int) *puller {
+	p := &puller{
+		repo:              repo,
+		dir:               dir,
+		bq:                newBlockQueue(),
+		model:             model,
+		oustandingPerNode: make(activityMap),
+		openFiles:         make(map[string]openFile),
+		requestSlots:      make(chan bool, slots),
+		blocks:            make(chan bqBlock),
+		requestResults:    make(chan requestResult),
+	}
+
+	if slots > 0 {
+		// Read/write
+		for i := 0; i < slots; i++ {
+			p.requestSlots <- true
+		}
+		if debugPull {
+			dlog.Printf("starting puller; repo %q dir %q slots %d", repo, dir, slots)
+		}
+		go p.run()
+	} else {
+		// Read only
+		if debugPull {
+			dlog.Printf("starting puller; repo %q dir %q (read only)", repo, dir)
+		}
+		go p.runRO()
+	}
+	return p
+}
+
+func (p *puller) run() {
+	go func() {
+		// fill blocks queue when there are free slots
+		for {
+			<-p.requestSlots
+			b := p.bq.get()
+			if debugPull {
+				dlog.Printf("filler: queueing %q offset %d copy %d", b.file.Name, b.block.Offset, len(b.copy))
+			}
+			p.blocks <- b
+		}
+	}()
+
+	walkTicker := time.Tick(time.Duration(cfg.Options.RescanIntervalS) * time.Second)
+	timeout := time.Tick(5 * time.Second)
+
+	sup := &suppressor{threshold: int64(cfg.Options.MaxChangeKbps)}
+	w := &scanner.Walker{
+		Dir:            p.dir,
+		IgnoreFile:     ".stignore",
+		FollowSymlinks: cfg.Options.FollowSymlinks,
+		BlockSize:      BlockSize,
+		TempNamer:      defTempNamer,
+		Suppressor:     sup,
+		CurrentFiler:   p.model,
+	}
+
+	for {
+		// Run the pulling loop as long as there are blocks to fetch
+	pull:
+		for {
+			select {
+			case res := <-p.requestResults:
+				p.requestSlots <- true
+				p.handleRequestResult(res)
+
+			case b := <-p.blocks:
+				p.handleBlock(b)
+
+			case <-timeout:
+				if debugPull {
+					dlog.Println("timeout")
+				}
+				if len(p.openFiles) == 0 && p.bq.empty() {
+					// Nothing more to do for the moment
+					break pull
+				}
+				if debugPull {
+					dlog.Printf("idle but have %d open files", len(p.openFiles))
+					i := 5
+					for _, f := range p.openFiles {
+						dlog.Printf("  %v", f)
+						i--
+						if i == 0 {
+							break
+						}
+					}
+				}
+			}
+		}
+
+		// Do a rescan if it's time for it
+		select {
+		case <-walkTicker:
+			if debugPull {
+				dlog.Println("time for rescan")
+			}
+			files, _ := w.Walk()
+			p.model.fs.ReplaceWithDelete(cid.LocalID, files)
+
+		default:
+		}
+
+		// Queue more blocks to fetch, if any
+		p.queueNeededBlocks()
+	}
+}
+
+func (p *puller) runRO() {
+	walkTicker := time.Tick(time.Duration(cfg.Options.RescanIntervalS) * time.Second)
+
+	sup := &suppressor{threshold: int64(cfg.Options.MaxChangeKbps)}
+	w := &scanner.Walker{
+		Dir:            p.dir,
+		IgnoreFile:     ".stignore",
+		FollowSymlinks: cfg.Options.FollowSymlinks,
+		BlockSize:      BlockSize,
+		TempNamer:      defTempNamer,
+		Suppressor:     sup,
+		CurrentFiler:   p.model,
+	}
+
+	for _ = range walkTicker {
+		if debugPull {
+			dlog.Println("time for rescan")
+		}
+		files, _ := w.Walk()
+		p.model.fs.ReplaceWithDelete(cid.LocalID, files)
+	}
+}
+
+func (p *puller) handleRequestResult(res requestResult) {
+	p.oustandingPerNode.decrease(res.node)
+	f := res.file
+
+	of, ok := p.openFiles[f.Name]
+	if !ok || of.err != nil {
+		// no entry in openFiles means there was an error and we've cancelled the operation
+		return
+	}
+
+	_, of.err = of.file.WriteAt(res.data, res.offset)
+	buffers.Put(res.data)
+
+	of.outstanding--
+	p.openFiles[f.Name] = of
+
+	if debugPull {
+		dlog.Printf("pull: wrote %q offset %d outstanding %d done %v", f.Name, res.offset, of.outstanding, of.done)
+	}
+
+	if of.done && of.outstanding == 0 {
+		if debugPull {
+			dlog.Printf("pull: closing %q", f.Name)
+		}
+		of.file.Close()
+		defer os.Remove(of.temp)
+
+		delete(p.openFiles, f.Name)
+
+		fd, err := os.Open(of.temp)
+		if err != nil {
+			if debugPull {
+				dlog.Printf("pull: error: %q: %v", f.Name, err)
+			}
+			return
+		}
+		hb, _ := scanner.Blocks(fd, BlockSize)
+		fd.Close()
+
+		if l0, l1 := len(hb), len(f.Blocks); l0 != l1 {
+			if debugPull {
+				dlog.Printf("pull: %q: nblocks %d != %d", f.Name, l0, l1)
+			}
+			return
+		}
+
+		for i := range hb {
+			if bytes.Compare(hb[i].Hash, f.Blocks[i].Hash) != 0 {
+				dlog.Printf("pull: %q: block %d hash mismatch", f.Name, i)
+				return
+			}
+		}
+
+		t := time.Unix(f.Modified, 0)
+		os.Chtimes(of.temp, t, t)
+		os.Chmod(of.temp, os.FileMode(f.Flags&0777))
+		if debugPull {
+			dlog.Printf("pull: rename %q: %q", f.Name, of.filepath)
+		}
+		if err := Rename(of.temp, of.filepath); err == nil {
+			p.model.fs.Update(cid.LocalID, []scanner.File{f})
+		} else {
+			dlog.Printf("pull: error: %q: %v", f.Name, err)
+		}
+	}
+}
+
+func (p *puller) handleBlock(b bqBlock) {
+	f := b.file
+
+	of, ok := p.openFiles[f.Name]
+	of.done = b.last
+
+	if !ok {
+		if debugPull {
+			dlog.Printf("pull: opening file %q", f.Name)
+		}
+
+		of.availability = uint64(p.model.fs.Availability(f.Name))
+		of.filepath = filepath.Join(p.dir, f.Name)
+		of.temp = filepath.Join(p.dir, defTempNamer.TempName(f.Name))
+
+		dirName := filepath.Dir(of.filepath)
+		_, err := os.Stat(dirName)
+		if err != nil {
+			err = os.MkdirAll(dirName, 0777)
+		}
+		if err != nil {
+			dlog.Printf("pull: error: %q: %v", f.Name, err)
+		}
+
+		of.file, of.err = os.Create(of.temp)
+		if of.err != nil {
+			if debugPull {
+				dlog.Printf("pull: error: %q: %v", f.Name, of.err)
+			}
+			if !b.last {
+				p.openFiles[f.Name] = of
+			}
+			p.requestSlots <- true
+			return
+		}
+	}
+
+	if of.err != nil {
+		// We have already failed this file.
+		if debugPull {
+			dlog.Printf("pull: error: %q has already failed: %v", f.Name, of.err)
+		}
+		if b.last {
+			dlog.Printf("pull: removing failed file %q", f.Name)
+			delete(p.openFiles, f.Name)
+		}
+
+		p.requestSlots <- true
+		return
+	}
+
+	p.openFiles[f.Name] = of
+
+	switch {
+	case len(b.copy) > 0:
+		p.handleCopyBlock(b)
+		p.requestSlots <- true
+
+	case b.block.Size > 0:
+		p.handleRequestBlock(b)
+		// Request slot gets freed in <-p.blocks case
+
+	default:
+		p.handleEmptyBlock(b)
+		p.requestSlots <- true
+	}
+}
+
+func (p *puller) handleCopyBlock(b bqBlock) {
+	// We have blocks to copy from the existing file
+	f := b.file
+	of := p.openFiles[f.Name]
+
+	if debugPull {
+		dlog.Printf("pull: copying %d blocks for %q", len(b.copy), f.Name)
+	}
+
+	var exfd *os.File
+	exfd, of.err = os.Open(of.filepath)
+	if of.err != nil {
+		if debugPull {
+			dlog.Printf("pull: error: %q: %v", f.Name, of.err)
+		}
+		of.file.Close()
+		of.file = nil
+
+		p.openFiles[f.Name] = of
+		return
+	}
+	defer exfd.Close()
+
+	for _, b := range b.copy {
+		bs := buffers.Get(int(b.Size))
+		_, of.err = exfd.ReadAt(bs, b.Offset)
+		if of.err == nil {
+			_, of.err = of.file.WriteAt(bs, b.Offset)
+		}
+		buffers.Put(bs)
+		if of.err != nil {
+			if debugPull {
+				dlog.Printf("pull: error: %q: %v", f.Name, of.err)
+			}
+			exfd.Close()
+			of.file.Close()
+			of.file = nil
+
+			p.openFiles[f.Name] = of
+			return
+		}
+	}
+}
+
+func (p *puller) handleRequestBlock(b bqBlock) {
+	// We have a block to get from the network
+
+	f := b.file
+	of := p.openFiles[f.Name]
+
+	node := p.oustandingPerNode.leastBusyNode(of.availability, p.model.cm)
+	if len(node) == 0 {
+		of.err = errNoNode
+		if of.file != nil {
+			of.file.Close()
+			of.file = nil
+			os.Remove(of.temp)
+		}
+		if b.last {
+			delete(p.openFiles, f.Name)
+		} else {
+			p.openFiles[f.Name] = of
+		}
+		p.requestSlots <- true
+		return
+	}
+
+	of.outstanding++
+	p.openFiles[f.Name] = of
+
+	go func(node string, b bqBlock) {
+		if debugPull {
+			dlog.Printf("pull: requesting %q offset %d size %d from %q outstanding %d", f.Name, b.block.Offset, b.block.Size, node, of.outstanding)
+		}
+
+		bs, err := p.model.requestGlobal(node, f.Name, b.block.Offset, int(b.block.Size), nil)
+		p.requestResults <- requestResult{
+			node:     node,
+			file:     f,
+			filepath: of.filepath,
+			offset:   b.block.Offset,
+			data:     bs,
+			err:      err,
+		}
+	}(node, b)
+}
+
+func (p *puller) handleEmptyBlock(b bqBlock) {
+	f := b.file
+	of := p.openFiles[f.Name]
+
+	if b.last {
+		if of.err == nil {
+			of.file.Close()
+		}
+	}
+
+	if f.Flags&protocol.FlagDeleted != 0 {
+		if debugPull {
+			dlog.Printf("pull: delete %q", f.Name)
+		}
+		os.Remove(of.temp)
+		os.Remove(of.filepath)
+	} else {
+		if debugPull {
+			dlog.Printf("pull: no blocks to fetch and nothing to copy for %q", f.Name)
+		}
+		t := time.Unix(f.Modified, 0)
+		os.Chtimes(of.temp, t, t)
+		os.Chmod(of.temp, os.FileMode(f.Flags&0777))
+		Rename(of.temp, of.filepath)
+	}
+	delete(p.openFiles, f.Name)
+	p.model.fs.Update(cid.LocalID, []scanner.File{f})
+}
+
+func (p *puller) queueNeededBlocks() {
+	queued := 0
+	for _, f := range p.model.fs.Need(cid.LocalID) {
+		lf := p.model.fs.Get(cid.LocalID, f.Name)
+		have, need := scanner.BlockDiff(lf.Blocks, f.Blocks)
+		if debugNeed {
+			dlog.Printf("need:\n  local: %v\n  global: %v\n  haveBlocks: %v\n  needBlocks: %v", lf, f, have, need)
+		}
+		queued++
+		p.bq.put(bqAdd{
+			file: f,
+			have: have,
+			need: need,
+		})
+	}
+	if debugPull && queued > 0 {
+		dlog.Printf("queued %d blocks", queued)
+	}
+}

BIN
cmd/syncthing/syncthing


+ 4 - 9
cmd/syncthing/tempname.go

@@ -2,9 +2,7 @@ package main
 
 
 import (
 import (
 	"fmt"
 	"fmt"
-	"path"
 	"path/filepath"
 	"path/filepath"
-	"runtime"
 	"strings"
 	"strings"
 )
 )
 
 
@@ -15,14 +13,11 @@ type tempNamer struct {
 var defTempNamer = tempNamer{".syncthing"}
 var defTempNamer = tempNamer{".syncthing"}
 
 
 func (t tempNamer) IsTemporary(name string) bool {
 func (t tempNamer) IsTemporary(name string) bool {
-	if runtime.GOOS == "windows" {
-		name = filepath.ToSlash(name)
-	}
-	return strings.HasPrefix(path.Base(name), t.prefix)
+	return strings.HasPrefix(filepath.Base(name), t.prefix)
 }
 }
 
 
 func (t tempNamer) TempName(name string) string {
 func (t tempNamer) TempName(name string) string {
-	tdir := path.Dir(name)
-	tname := fmt.Sprintf("%s.%s", t.prefix, path.Base(name))
-	return path.Join(tdir, tname)
+	tdir := filepath.Dir(name)
+	tname := fmt.Sprintf("%s.%s", t.prefix, filepath.Base(name))
+	return filepath.Join(tdir, tname)
 }
 }

+ 4 - 4
cmd/syncthing/tls.go

@@ -11,7 +11,7 @@ import (
 	"encoding/pem"
 	"encoding/pem"
 	"math/big"
 	"math/big"
 	"os"
 	"os"
-	"path"
+	"path/filepath"
 	"strings"
 	"strings"
 	"time"
 	"time"
 )
 )
@@ -22,7 +22,7 @@ const (
 )
 )
 
 
 func loadCert(dir string) (tls.Certificate, error) {
 func loadCert(dir string) (tls.Certificate, error) {
-	return tls.LoadX509KeyPair(path.Join(dir, "cert.pem"), path.Join(dir, "key.pem"))
+	return tls.LoadX509KeyPair(filepath.Join(dir, "cert.pem"), filepath.Join(dir, "key.pem"))
 }
 }
 
 
 func certID(bs []byte) string {
 func certID(bs []byte) string {
@@ -57,13 +57,13 @@ func newCertificate(dir string) {
 	derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &priv.PublicKey, priv)
 	derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &priv.PublicKey, priv)
 	fatalErr(err)
 	fatalErr(err)
 
 
-	certOut, err := os.Create(path.Join(dir, "cert.pem"))
+	certOut, err := os.Create(filepath.Join(dir, "cert.pem"))
 	fatalErr(err)
 	fatalErr(err)
 	pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes})
 	pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes})
 	certOut.Close()
 	certOut.Close()
 	okln("Created RSA certificate file")
 	okln("Created RSA certificate file")
 
 
-	keyOut, err := os.OpenFile(path.Join(dir, "key.pem"), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
+	keyOut, err := os.OpenFile(filepath.Join(dir, "key.pem"), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
 	fatalErr(err)
 	fatalErr(err)
 	pem.Encode(keyOut, &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(priv)})
 	pem.Encode(keyOut, &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(priv)})
 	keyOut.Close()
 	keyOut.Close()

+ 15 - 1
cmd/syncthing/util.go

@@ -1,6 +1,10 @@
 package main
 package main
 
 
-import "fmt"
+import (
+	"fmt"
+	"os"
+	"runtime"
+)
 
 
 func MetricPrefix(n int64) string {
 func MetricPrefix(n int64) string {
 	if n > 1e9 {
 	if n > 1e9 {
@@ -27,3 +31,13 @@ func BinaryPrefix(n int64) string {
 	}
 	}
 	return fmt.Sprintf("%d ", n)
 	return fmt.Sprintf("%d ", n)
 }
 }
+
+func Rename(from, to string) error {
+	if runtime.GOOS == "windows" {
+		err := os.Remove(to)
+		if err != nil && !os.IsNotExist(err) {
+			warnln(err)
+		}
+	}
+	return os.Rename(from, to)
+}

+ 12 - 0
files/debug.go

@@ -0,0 +1,12 @@
+package files
+
+import (
+	"log"
+	"os"
+	"strings"
+)
+
+var (
+	dlog  = log.New(os.Stderr, "files: ", log.Lmicroseconds|log.Lshortfile)
+	debug = strings.Contains(os.Getenv("STTRACE"), "files")
+)

+ 324 - 0
files/set.go

@@ -0,0 +1,324 @@
+// Package files provides a set type to track local/remote files with newness checks.
+package files
+
+import (
+	"crypto/md5"
+	"sync"
+
+	"github.com/calmh/syncthing/cid"
+	"github.com/calmh/syncthing/lamport"
+	"github.com/calmh/syncthing/protocol"
+	"github.com/calmh/syncthing/scanner"
+)
+
+type key struct {
+	Name     string
+	Version  uint64
+	Modified int64
+	Hash     [md5.Size]byte
+}
+
+type fileRecord struct {
+	Usage int
+	File  scanner.File
+}
+
+type bitset uint64
+
+func keyFor(f scanner.File) key {
+	h := md5.New()
+	for _, b := range f.Blocks {
+		h.Write(b.Hash)
+	}
+	return key{
+		Name:     f.Name,
+		Version:  f.Version,
+		Modified: f.Modified,
+		Hash:     md5.Sum(nil),
+	}
+}
+
+func (a key) newerThan(b key) bool {
+	if a.Version != b.Version {
+		return a.Version > b.Version
+	}
+	if a.Modified != b.Modified {
+		return a.Modified > b.Modified
+	}
+	for i := 0; i < md5.Size; i++ {
+		if a.Hash[i] != b.Hash[i] {
+			return a.Hash[i] > b.Hash[i]
+		}
+	}
+	return false
+}
+
+type Set struct {
+	sync.Mutex
+	files              map[key]fileRecord
+	remoteKey          [64]map[string]key
+	changes            [64]uint64
+	globalAvailability map[string]bitset
+	globalKey          map[string]key
+}
+
+func NewSet() *Set {
+	var m = Set{
+		files:              make(map[key]fileRecord),
+		globalAvailability: make(map[string]bitset),
+		globalKey:          make(map[string]key),
+	}
+	return &m
+}
+
+func (m *Set) Replace(id uint, fs []scanner.File) {
+	if debug {
+		dlog.Printf("Replace(%d, [%d])", id, len(fs))
+	}
+	if id > 63 {
+		panic("Connection ID must be in the range 0 - 63 inclusive")
+	}
+
+	m.Lock()
+	if len(fs) == 0 || !m.equals(id, fs) {
+		m.changes[id]++
+		m.replace(id, fs)
+	}
+	m.Unlock()
+}
+
+func (m *Set) ReplaceWithDelete(id uint, fs []scanner.File) {
+	if debug {
+		dlog.Printf("ReplaceWithDelete(%d, [%d])", id, len(fs))
+	}
+	if id > 63 {
+		panic("Connection ID must be in the range 0 - 63 inclusive")
+	}
+
+	m.Lock()
+	if len(fs) == 0 || !m.equals(id, fs) {
+		m.changes[id]++
+
+		var nf = make(map[string]key, len(fs))
+		for _, f := range fs {
+			nf[f.Name] = keyFor(f)
+		}
+
+		// For previously existing files not in the list, add them to the list
+		// with the relevant delete flags etc set. Previously existing files
+		// with the delete bit already set are not modified.
+
+		for _, ck := range m.remoteKey[cid.LocalID] {
+			if _, ok := nf[ck.Name]; !ok {
+				cf := m.files[ck].File
+				if cf.Flags&protocol.FlagDeleted != protocol.FlagDeleted {
+					cf.Flags = protocol.FlagDeleted
+					cf.Blocks = nil
+					cf.Size = 0
+					cf.Version = lamport.Default.Tick(cf.Version)
+				}
+				fs = append(fs, cf)
+				if debug {
+					dlog.Println("deleted:", ck.Name)
+				}
+			}
+		}
+
+		m.replace(id, fs)
+	}
+	m.Unlock()
+}
+
+func (m *Set) Update(id uint, fs []scanner.File) {
+	if debug {
+		dlog.Printf("Update(%d, [%d])", id, len(fs))
+	}
+	m.Lock()
+	m.update(id, fs)
+	m.changes[id]++
+	m.Unlock()
+}
+
+func (m *Set) Need(id uint) []scanner.File {
+	if debug {
+		dlog.Printf("Need(%d)", id)
+	}
+	var fs []scanner.File
+	m.Lock()
+	for name, gk := range m.globalKey {
+		if gk.newerThan(m.remoteKey[id][name]) {
+			fs = append(fs, m.files[gk].File)
+		}
+	}
+	m.Unlock()
+	return fs
+}
+
+func (m *Set) Have(id uint) []scanner.File {
+	if debug {
+		dlog.Printf("Have(%d)", id)
+	}
+	var fs []scanner.File
+	m.Lock()
+	for _, rk := range m.remoteKey[id] {
+		fs = append(fs, m.files[rk].File)
+	}
+	m.Unlock()
+	return fs
+}
+
+func (m *Set) Global() []scanner.File {
+	if debug {
+		dlog.Printf("Global()")
+	}
+	var fs []scanner.File
+	m.Lock()
+	for _, rk := range m.globalKey {
+		fs = append(fs, m.files[rk].File)
+	}
+	m.Unlock()
+	return fs
+}
+
+func (m *Set) Get(id uint, file string) scanner.File {
+	m.Lock()
+	defer m.Unlock()
+	if debug {
+		dlog.Printf("Get(%d, %q)", id, file)
+	}
+	return m.files[m.remoteKey[id][file]].File
+}
+
+func (m *Set) GetGlobal(file string) scanner.File {
+	m.Lock()
+	defer m.Unlock()
+	if debug {
+		dlog.Printf("GetGlobal(%q)", file)
+	}
+	return m.files[m.globalKey[file]].File
+}
+
+func (m *Set) Availability(name string) bitset {
+	m.Lock()
+	defer m.Unlock()
+	av := m.globalAvailability[name]
+	if debug {
+		dlog.Printf("Availability(%q) = %0x", name, av)
+	}
+	return av
+}
+
+func (m *Set) Changes(id uint) uint64 {
+	m.Lock()
+	defer m.Unlock()
+	if debug {
+		dlog.Printf("Changes(%d)", id)
+	}
+	return m.changes[id]
+}
+
+func (m *Set) equals(id uint, fs []scanner.File) bool {
+	curWithoutDeleted := make(map[string]key)
+	for _, k := range m.remoteKey[id] {
+		f := m.files[k].File
+		if f.Flags&protocol.FlagDeleted == 0 {
+			curWithoutDeleted[f.Name] = k
+		}
+	}
+	if len(curWithoutDeleted) != len(fs) {
+		return false
+	}
+	for _, f := range fs {
+		if curWithoutDeleted[f.Name] != keyFor(f) {
+			return false
+		}
+	}
+	return true
+}
+
+func (m *Set) update(cid uint, fs []scanner.File) {
+	remFiles := m.remoteKey[cid]
+	for _, f := range fs {
+		n := f.Name
+		fk := keyFor(f)
+
+		if ck, ok := remFiles[n]; ok && ck == fk {
+			// The remote already has exactly this file, skip it
+			continue
+		}
+
+		remFiles[n] = fk
+
+		// Keep the block list or increment the usage
+		if br, ok := m.files[fk]; !ok {
+			m.files[fk] = fileRecord{
+				Usage: 1,
+				File:  f,
+			}
+		} else {
+			br.Usage++
+			m.files[fk] = br
+		}
+
+		// Update global view
+		gk, ok := m.globalKey[n]
+		switch {
+		case ok && fk == gk:
+			av := m.globalAvailability[n]
+			av |= 1 << cid
+			m.globalAvailability[n] = av
+		case fk.newerThan(gk):
+			m.globalKey[n] = fk
+			m.globalAvailability[n] = 1 << cid
+		}
+	}
+}
+
+func (m *Set) replace(cid uint, fs []scanner.File) {
+	// Decrement usage for all files belonging to this remote, and remove
+	// those that are no longer needed.
+	for _, fk := range m.remoteKey[cid] {
+		br, ok := m.files[fk]
+		switch {
+		case ok && br.Usage == 1:
+			delete(m.files, fk)
+		case ok && br.Usage > 1:
+			br.Usage--
+			m.files[fk] = br
+		}
+	}
+
+	// Clear existing remote remoteKey
+	m.remoteKey[cid] = make(map[string]key)
+
+	// Recalculate global based on all remaining remoteKey
+	for n := range m.globalKey {
+		var nk key    // newest key
+		var na bitset // newest availability
+
+		for i, rem := range m.remoteKey {
+			if rk, ok := rem[n]; ok {
+				switch {
+				case rk == nk:
+					na |= 1 << uint(i)
+				case rk.newerThan(nk):
+					nk = rk
+					na = 1 << uint(i)
+				}
+			}
+		}
+
+		if na != 0 {
+			// Someone had the file
+			m.globalKey[n] = nk
+			m.globalAvailability[n] = na
+		} else {
+			// Noone had the file
+			delete(m.globalKey, n)
+			delete(m.globalAvailability, n)
+		}
+	}
+
+	// Add new remote remoteKey to the mix
+	m.update(cid, fs)
+}

+ 321 - 0
files/set_test.go

@@ -0,0 +1,321 @@
+package files
+
+import (
+	"fmt"
+	"reflect"
+	"sort"
+	"testing"
+
+	"github.com/calmh/syncthing/cid"
+	"github.com/calmh/syncthing/lamport"
+	"github.com/calmh/syncthing/protocol"
+	"github.com/calmh/syncthing/scanner"
+)
+
+type fileList []scanner.File
+
+func (l fileList) Len() int {
+	return len(l)
+}
+
+func (l fileList) Less(a, b int) bool {
+	return l[a].Name < l[b].Name
+}
+
+func (l fileList) Swap(a, b int) {
+	l[a], l[b] = l[b], l[a]
+}
+
+func TestGlobalSet(t *testing.T) {
+	m := NewSet()
+
+	local := []scanner.File{
+		scanner.File{Name: "a", Version: 1000},
+		scanner.File{Name: "b", Version: 1000},
+		scanner.File{Name: "c", Version: 1000},
+		scanner.File{Name: "d", Version: 1000},
+	}
+
+	remote := []scanner.File{
+		scanner.File{Name: "a", Version: 1000},
+		scanner.File{Name: "b", Version: 1001},
+		scanner.File{Name: "c", Version: 1002},
+		scanner.File{Name: "e", Version: 1000},
+	}
+
+	expectedGlobal := []scanner.File{
+		scanner.File{Name: "a", Version: 1000},
+		scanner.File{Name: "b", Version: 1001},
+		scanner.File{Name: "c", Version: 1002},
+		scanner.File{Name: "d", Version: 1000},
+		scanner.File{Name: "e", Version: 1000},
+	}
+
+	m.ReplaceWithDelete(cid.LocalID, local)
+	m.Replace(1, remote)
+
+	g := m.Global()
+
+	sort.Sort(fileList(g))
+	sort.Sort(fileList(expectedGlobal))
+
+	if !reflect.DeepEqual(g, expectedGlobal) {
+		t.Errorf("Global incorrect;\n A: %v !=\n E: %v", g, expectedGlobal)
+	}
+
+	if lb := len(m.files); lb != 7 {
+		t.Errorf("Num files incorrect %d != 7\n%v", lb, m.files)
+	}
+}
+
+func TestLocalDeleted(t *testing.T) {
+	m := NewSet()
+	lamport.Default = lamport.Clock{}
+
+	local1 := []scanner.File{
+		scanner.File{Name: "a", Version: 1000},
+		scanner.File{Name: "b", Version: 1000},
+		scanner.File{Name: "c", Version: 1000},
+		scanner.File{Name: "d", Version: 1000},
+	}
+
+	m.ReplaceWithDelete(cid.LocalID, local1)
+
+	local2 := []scanner.File{
+		local1[0],
+		local1[2],
+	}
+
+	expectedGlobal1 := []scanner.File{
+		local1[0],
+		scanner.File{Name: "b", Version: 1001, Flags: protocol.FlagDeleted},
+		local1[2],
+		scanner.File{Name: "d", Version: 1002, Flags: protocol.FlagDeleted},
+	}
+
+	m.ReplaceWithDelete(cid.LocalID, local2)
+	g := m.Global()
+	sort.Sort(fileList(g))
+	sort.Sort(fileList(expectedGlobal1))
+
+	if !reflect.DeepEqual(g, expectedGlobal1) {
+		t.Errorf("Global incorrect;\n A: %v !=\n E: %v", g, expectedGlobal1)
+	}
+
+	local3 := []scanner.File{
+		local1[0],
+	}
+
+	expectedGlobal2 := []scanner.File{
+		local1[0],
+		scanner.File{Name: "b", Version: 1001, Flags: protocol.FlagDeleted},
+		scanner.File{Name: "c", Version: 1003, Flags: protocol.FlagDeleted},
+		scanner.File{Name: "d", Version: 1002, Flags: protocol.FlagDeleted},
+	}
+
+	m.ReplaceWithDelete(cid.LocalID, local3)
+	g = m.Global()
+	sort.Sort(fileList(g))
+	sort.Sort(fileList(expectedGlobal2))
+
+	if !reflect.DeepEqual(g, expectedGlobal2) {
+		t.Errorf("Global incorrect;\n A: %v !=\n E: %v", g, expectedGlobal2)
+	}
+}
+
+func BenchmarkSetLocal10k(b *testing.B) {
+	m := NewSet()
+
+	var local []scanner.File
+	for i := 0; i < 10000; i++ {
+		local = append(local, scanner.File{Name: fmt.Sprintf("file%d"), Version: 1000})
+	}
+
+	var remote []scanner.File
+	for i := 0; i < 10000; i++ {
+		remote = append(remote, scanner.File{Name: fmt.Sprintf("file%d"), Version: 1000})
+	}
+
+	m.Replace(1, remote)
+
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		m.ReplaceWithDelete(cid.LocalID, local)
+	}
+}
+
+func BenchmarkSetLocal10(b *testing.B) {
+	m := NewSet()
+
+	var local []scanner.File
+	for i := 0; i < 10; i++ {
+		local = append(local, scanner.File{Name: fmt.Sprintf("file%d"), Version: 1000})
+	}
+
+	var remote []scanner.File
+	for i := 0; i < 10000; i++ {
+		remote = append(remote, scanner.File{Name: fmt.Sprintf("file%d"), Version: 1000})
+	}
+
+	m.Replace(1, remote)
+
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		m.ReplaceWithDelete(cid.LocalID, local)
+	}
+}
+
+func BenchmarkAddLocal10k(b *testing.B) {
+	m := NewSet()
+
+	var local []scanner.File
+	for i := 0; i < 10000; i++ {
+		local = append(local, scanner.File{Name: fmt.Sprintf("file%d"), Version: 1000})
+	}
+
+	var remote []scanner.File
+	for i := 0; i < 10000; i++ {
+		remote = append(remote, scanner.File{Name: fmt.Sprintf("file%d"), Version: 1000})
+	}
+
+	m.Replace(1, remote)
+	m.ReplaceWithDelete(cid.LocalID, local)
+
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		b.StopTimer()
+		for j := range local {
+			local[j].Version++
+		}
+		b.StartTimer()
+		m.Update(cid.LocalID, local)
+	}
+}
+
+func BenchmarkAddLocal10(b *testing.B) {
+	m := NewSet()
+
+	var local []scanner.File
+	for i := 0; i < 10; i++ {
+		local = append(local, scanner.File{Name: fmt.Sprintf("file%d"), Version: 1000})
+	}
+
+	var remote []scanner.File
+	for i := 0; i < 10000; i++ {
+		remote = append(remote, scanner.File{Name: fmt.Sprintf("file%d"), Version: 1000})
+	}
+
+	m.Replace(1, remote)
+	m.ReplaceWithDelete(cid.LocalID, local)
+
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		for j := range local {
+			local[j].Version++
+		}
+		m.Update(cid.LocalID, local)
+	}
+}
+
+func TestGlobalReset(t *testing.T) {
+	m := NewSet()
+
+	local := []scanner.File{
+		scanner.File{Name: "a", Version: 1000},
+		scanner.File{Name: "b", Version: 1000},
+		scanner.File{Name: "c", Version: 1000},
+		scanner.File{Name: "d", Version: 1000},
+	}
+
+	remote := []scanner.File{
+		scanner.File{Name: "a", Version: 1000},
+		scanner.File{Name: "b", Version: 1001},
+		scanner.File{Name: "c", Version: 1002},
+		scanner.File{Name: "e", Version: 1000},
+	}
+
+	expectedGlobalKey := map[string]key{
+		"a": keyFor(local[0]),
+		"b": keyFor(local[1]),
+		"c": keyFor(local[2]),
+		"d": keyFor(local[3]),
+	}
+
+	m.ReplaceWithDelete(cid.LocalID, local)
+	m.Replace(1, remote)
+	m.Replace(1, nil)
+
+	if !reflect.DeepEqual(m.globalKey, expectedGlobalKey) {
+		t.Errorf("Global incorrect;\n%v !=\n%v", m.globalKey, expectedGlobalKey)
+	}
+
+	if lb := len(m.files); lb != 4 {
+		t.Errorf("Num files incorrect %d != 4\n%v", lb, m.files)
+	}
+}
+
+func TestNeed(t *testing.T) {
+	m := NewSet()
+
+	local := []scanner.File{
+		scanner.File{Name: "a", Version: 1000},
+		scanner.File{Name: "b", Version: 1000},
+		scanner.File{Name: "c", Version: 1000},
+		scanner.File{Name: "d", Version: 1000},
+	}
+
+	remote := []scanner.File{
+		scanner.File{Name: "a", Version: 1000},
+		scanner.File{Name: "b", Version: 1001},
+		scanner.File{Name: "c", Version: 1002},
+		scanner.File{Name: "e", Version: 1000},
+	}
+
+	shouldNeed := []scanner.File{
+		scanner.File{Name: "b", Version: 1001},
+		scanner.File{Name: "c", Version: 1002},
+		scanner.File{Name: "e", Version: 1000},
+	}
+
+	m.ReplaceWithDelete(cid.LocalID, local)
+	m.Replace(1, remote)
+
+	need := m.Need(0)
+	if !reflect.DeepEqual(need, shouldNeed) {
+		t.Errorf("Need incorrect;\n%v !=\n%v", need, shouldNeed)
+	}
+}
+
+func TestChanges(t *testing.T) {
+	m := NewSet()
+
+	local1 := []scanner.File{
+		scanner.File{Name: "a", Version: 1000},
+		scanner.File{Name: "b", Version: 1000},
+		scanner.File{Name: "c", Version: 1000},
+		scanner.File{Name: "d", Version: 1000},
+	}
+
+	local2 := []scanner.File{
+		local1[0],
+		// [1] deleted
+		local1[2],
+		scanner.File{Name: "d", Version: 1002},
+		scanner.File{Name: "e", Version: 1000},
+	}
+
+	m.ReplaceWithDelete(cid.LocalID, local1)
+	c0 := m.Changes(cid.LocalID)
+
+	m.ReplaceWithDelete(cid.LocalID, local2)
+	c1 := m.Changes(cid.LocalID)
+	if !(c1 > c0) {
+		t.Fatal("Change number should have incremented")
+	}
+
+	m.ReplaceWithDelete(cid.LocalID, local2)
+	c2 := m.Changes(cid.LocalID)
+	if c2 != c1 {
+		t.Fatal("Change number should be unchanged")
+	}
+}

+ 0 - 1
gui/app.js

@@ -28,7 +28,6 @@ syncthing.controller('SyncthingCtrl', function ($scope, $http) {
         {id: 'MaxChangeKbps', descr: 'Max File Change Rate (KBps)', type: 'number', restart: true},
         {id: 'MaxChangeKbps', descr: 'Max File Change Rate (KBps)', type: 'number', restart: true},
 
 
         {id: 'ReadOnly', descr: 'Read Only', type: 'bool', restart: true},
         {id: 'ReadOnly', descr: 'Read Only', type: 'bool', restart: true},
-        {id: 'AllowDelete', descr: 'Allow Delete', type: 'bool', restart: true},
         {id: 'FollowSymlinks', descr: 'Follow Symlinks', type: 'bool', restart: true},
         {id: 'FollowSymlinks', descr: 'Follow Symlinks', type: 'bool', restart: true},
         {id: 'GlobalAnnEnabled', descr: 'Global Announce', type: 'bool', restart: true},
         {id: 'GlobalAnnEnabled', descr: 'Global Announce', type: 'bool', restart: true},
         {id: 'LocalAnnEnabled', descr: 'Local Announce', type: 'bool', restart: true},
         {id: 'LocalAnnEnabled', descr: 'Local Announce', type: 'bool', restart: true},

+ 1 - 1
gui/index.html

@@ -150,7 +150,7 @@ thead tr th {
                     <div class="progress">
                     <div class="progress">
                         <div class="progress-bar" role="progressbar" aria-valuenow="60" aria-valuemin="0" aria-valuemax="100"
                         <div class="progress-bar" role="progressbar" aria-valuenow="60" aria-valuemin="0" aria-valuemax="100"
                             ng-class="{'progress-bar-success': model.needBytes === 0, 'progress-bar-info': model.needBytes !== 0}"
                             ng-class="{'progress-bar-success': model.needBytes === 0, 'progress-bar-info': model.needBytes !== 0}"
-                            style="width: {{100 * model.inSyncBytes / model.globalBytes | number:2}}%;">
+                            ng-style="{width: (100 * model.inSyncBytes / model.globalBytes) + '%'}">
                             {{100 * model.inSyncBytes / model.globalBytes | alwaysNumber | number:0}}%
                             {{100 * model.inSyncBytes / model.globalBytes | alwaysNumber | number:0}}%
                         </div>
                         </div>
                     </div>
                     </div>

+ 3 - 3
integration/genfiles.go

@@ -7,7 +7,7 @@ import (
 	"io/ioutil"
 	"io/ioutil"
 	mr "math/rand"
 	mr "math/rand"
 	"os"
 	"os"
-	"path"
+	"path/filepath"
 	"time"
 	"time"
 )
 )
 
 
@@ -27,7 +27,7 @@ func main() {
 
 
 	for i := 0; i < files; i++ {
 	for i := 0; i < files; i++ {
 		n := name()
 		n := name()
-		p0 := path.Join(string(n[0]), n[0:2])
+		p0 := filepath.Join(string(n[0]), n[0:2])
 		os.MkdirAll(p0, 0755)
 		os.MkdirAll(p0, 0755)
 		s := 1 << uint(mr.Intn(maxexp))
 		s := 1 << uint(mr.Intn(maxexp))
 		a := 128 * 1024
 		a := 128 * 1024
@@ -37,7 +37,7 @@ func main() {
 		s += mr.Intn(a)
 		s += mr.Intn(a)
 		b := make([]byte, s)
 		b := make([]byte, s)
 		rand.Reader.Read(b)
 		rand.Reader.Read(b)
-		p1 := path.Join(p0, n)
+		p1 := filepath.Join(p0, n)
 		ioutil.WriteFile(p1, b, 0644)
 		ioutil.WriteFile(p1, b, 0644)
 
 
 		os.Chmod(p1, os.FileMode(mr.Intn(0777)|0400))
 		os.Chmod(p1, os.FileMode(mr.Intn(0777)|0400))

+ 3 - 3
integration/h1/config.xml

@@ -1,13 +1,13 @@
 <configuration version="1">
 <configuration version="1">
     <repository directory="s1">
     <repository directory="s1">
         <node id="I6KAH7666SLLL5PFXSOAUFJCDZYAOMLEKCP2GB3BV5RQST3PSROA" name="s1">
         <node id="I6KAH7666SLLL5PFXSOAUFJCDZYAOMLEKCP2GB3BV5RQST3PSROA" name="s1">
-            <address>dynamic</address>
+            <address>127.0.0.1:22001</address>
         </node>
         </node>
         <node id="JMFJCXBGZDE4BOCJE3VF65GYZNAIVJRET3J6HMRAUQIGJOFKNHMQ" name="s2">
         <node id="JMFJCXBGZDE4BOCJE3VF65GYZNAIVJRET3J6HMRAUQIGJOFKNHMQ" name="s2">
-            <address>dynamic</address>
+            <address>127.0.0.1:22002</address>
         </node>
         </node>
         <node id="373HSRPQLPNLIJYKZVQFP4PKZ6R2ZE6K3YD442UJHBGBQGWWXAHA" name="s3">
         <node id="373HSRPQLPNLIJYKZVQFP4PKZ6R2ZE6K3YD442UJHBGBQGWWXAHA" name="s3">
-            <address>dynamic</address>
+            <address>127.0.0.1:22003</address>
         </node>
         </node>
     </repository>
     </repository>
     <options>
     <options>

+ 3 - 3
integration/h2/config.xml

@@ -1,13 +1,13 @@
 <configuration version="1">
 <configuration version="1">
     <repository directory="s2">
     <repository directory="s2">
         <node id="I6KAH7666SLLL5PFXSOAUFJCDZYAOMLEKCP2GB3BV5RQST3PSROA" name="s1">
         <node id="I6KAH7666SLLL5PFXSOAUFJCDZYAOMLEKCP2GB3BV5RQST3PSROA" name="s1">
-            <address>dynamic</address>
+            <address>127.0.0.1:22001</address>
         </node>
         </node>
         <node id="JMFJCXBGZDE4BOCJE3VF65GYZNAIVJRET3J6HMRAUQIGJOFKNHMQ" name="s2">
         <node id="JMFJCXBGZDE4BOCJE3VF65GYZNAIVJRET3J6HMRAUQIGJOFKNHMQ" name="s2">
-            <address>dynamic</address>
+            <address>127.0.0.1:22002</address>
         </node>
         </node>
         <node id="373HSRPQLPNLIJYKZVQFP4PKZ6R2ZE6K3YD442UJHBGBQGWWXAHA" name="s3">
         <node id="373HSRPQLPNLIJYKZVQFP4PKZ6R2ZE6K3YD442UJHBGBQGWWXAHA" name="s3">
-            <address>dynamic</address>
+            <address>127.0.0.1:22003</address>
         </node>
         </node>
     </repository>
     </repository>
     <options>
     <options>

+ 3 - 3
integration/h3/config.xml

@@ -1,13 +1,13 @@
 <configuration version="1">
 <configuration version="1">
     <repository directory="s3">
     <repository directory="s3">
         <node id="I6KAH7666SLLL5PFXSOAUFJCDZYAOMLEKCP2GB3BV5RQST3PSROA" name="s1">
         <node id="I6KAH7666SLLL5PFXSOAUFJCDZYAOMLEKCP2GB3BV5RQST3PSROA" name="s1">
-            <address>dynamic</address>
+            <address>127.0.0.1:22001</address>
         </node>
         </node>
         <node id="JMFJCXBGZDE4BOCJE3VF65GYZNAIVJRET3J6HMRAUQIGJOFKNHMQ" name="s2">
         <node id="JMFJCXBGZDE4BOCJE3VF65GYZNAIVJRET3J6HMRAUQIGJOFKNHMQ" name="s2">
-            <address>dynamic</address>
+            <address>127.0.0.1:22002</address>
         </node>
         </node>
         <node id="373HSRPQLPNLIJYKZVQFP4PKZ6R2ZE6K3YD442UJHBGBQGWWXAHA" name="s3">
         <node id="373HSRPQLPNLIJYKZVQFP4PKZ6R2ZE6K3YD442UJHBGBQGWWXAHA" name="s3">
-            <address>dynamic</address>
+            <address>127.0.0.1:22003</address>
         </node>
         </node>
     </repository>
     </repository>
     <options>
     <options>

+ 0 - 8
integration/test.sh

@@ -68,25 +68,17 @@ for i in 1 2 3 ; do
 	../genfiles -maxexp 22 -files 600
 	../genfiles -maxexp 22 -files 600
 	echo "  $i: empty file"
 	echo "  $i: empty file"
 	touch "empty-$i"
 	touch "empty-$i"
-	echo "  $i: common file"
-	dd if=/dev/urandom of=common bs=1000 count=1000 2>/dev/null
 	echo "  $i: large file"
 	echo "  $i: large file"
 	dd if=/dev/urandom of=large-$i bs=1024k count=55 2>/dev/null
 	dd if=/dev/urandom of=large-$i bs=1024k count=55 2>/dev/null
 	popd >/dev/null
 	popd >/dev/null
 done
 done
 
 
-# instance 1 common file should be the newest, the other should disappear
-sleep 2
-touch "s1/common"
-
 echo "MD5-summing..."
 echo "MD5-summing..."
 for i in 1 2 3 ; do
 for i in 1 2 3 ; do
 	pushd "s$i" >/dev/null
 	pushd "s$i" >/dev/null
 	../md5r -l > ../md5-$i
 	../md5r -l > ../md5-$i
 	popd >/dev/null
 	popd >/dev/null
 done
 done
-grep -v common md5-2 > t ; mv t md5-2
-grep -v common md5-3 > t ; mv t md5-3
 
 
 testConvergence
 testConvergence
 
 

+ 24 - 0
lamport/clock.go

@@ -0,0 +1,24 @@
+package lamport
+
+import "sync"
+
+var Default = Clock{}
+
+type Clock struct {
+	val uint64
+	mut sync.Mutex
+}
+
+func (c *Clock) Tick(v uint64) uint64 {
+	c.mut.Lock()
+	if v > c.val {
+		c.val = v + 1
+		c.mut.Unlock()
+		return v + 1
+	} else {
+		c.val++
+		v = c.val
+		c.mut.Unlock()
+		return v
+	}
+}

+ 43 - 20
protocol/PROTOCOL.md

@@ -19,20 +19,31 @@ File data is described and transferred in units of _blocks_, each being
 Transport and Authentication
 Transport and Authentication
 ----------------------------
 ----------------------------
 
 
-BEP itself does not provide retransmissions, compression, encryption nor
-authentication. It is expected that this is performed at lower layers of
-the networking stack. The typical deployment stack is the following:
+BEP is deployed as the highest level in a protocol stack, with the lower
+level protocols providing compression, encryption and authentication.
+The transport protocol is always TCP.
 
 
     +-----------------------------|
     +-----------------------------|
     |   Block Exchange Protocol   |
     |   Block Exchange Protocol   |
     |-----------------------------|
     |-----------------------------|
     |   Compression (RFC 1951)    |
     |   Compression (RFC 1951)    |
     |-----------------------------|
     |-----------------------------|
-    | Encryption & Auth (TLS 1.0) |
+    | Encryption & Auth (TLS 1.2) |
     |-----------------------------|
     |-----------------------------|
     |             TCP             |
     |             TCP             |
     |-----------------------------|
     |-----------------------------|
-    v                             v
+    v             ...             v
+
+Compression is started directly after a successfull TLS handshake,
+before the first message is sent. The compression is flushed at each
+message boundary.
+
+The TLS layer shall use a strong cipher suite. Only cipher suites
+without known weaknesses and providing Perfect Forward Secrecy (PFS) can
+be considered strong. Examples of valid cipher suites are given at the
+end of this document. This is not to be taken as an exhaustive list of
+allowed cipher suites but represents best practices at the time of
+writing.
 
 
 The exact nature of the authentication is up to the application.
 The exact nature of the authentication is up to the application.
 Possibilities include certificates signed by a common trusted CA,
 Possibilities include certificates signed by a common trusted CA,
@@ -44,10 +55,6 @@ message type may be sent at any time and the sender need not await a
 response to one message before sending another. Responses must however
 response to one message before sending another. Responses must however
 be sent in the same order as the requests are received.
 be sent in the same order as the requests are received.
 
 
-Compression is started directly after a successfull TLS handshake,
-before the first message is sent. The compression is flushed at each
-message boundary.
-
 Messages
 Messages
 --------
 --------
 
 
@@ -134,7 +141,9 @@ response to the Index message.
     +                      Modified (64 bits)                       +
     +                      Modified (64 bits)                       +
     |                                                               |
     |                                                               |
     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-    |                            Version                            |
+    |                                                               |
+    +                       Version (64 bits)                       +
+    |                                                               |
     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
     |                       Number of Blocks                        |
     |                       Number of Blocks                        |
     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
@@ -163,14 +172,16 @@ response to the Index message.
 The Repository field identifies the repository that the index message
 The Repository field identifies the repository that the index message
 pertains to. For single repository implementations an empty repository
 pertains to. For single repository implementations an empty repository
 ID is acceptable, or the word "default". The Name is the file name path
 ID is acceptable, or the word "default". The Name is the file name path
-relative to the repository root. The Name is always in UTF-8 NFC regardless
-of operating system or file system specific conventions. The combination of
-Repository and Name uniquely identifies each file in a cluster.
+relative to the repository root. The Name is always in UTF-8 NFC
+regardless of operating system or file system specific conventions. The
+combination of Repository and Name uniquely identifies each file in a
+cluster.
 
 
-The Version field is a counter that is initially zero for each file. It
-is incremented each time a change is detected. The combination of
-Repository, Name and Version uniquely identifies the contents of a file
-at a certain point in time.
+The Version field is the value of a cluster wide Lamport clock
+indicating when the change was detected. The clock ticks on every
+detected and received change. The combination of Repository, Name and
+Version uniquely identifies the contents of a file at a certain point in
+time.
 
 
 The Flags field is made up of the following single bit flags:
 The Flags field is made up of the following single bit flags:
 
 
@@ -220,7 +231,7 @@ block which may represent a smaller amount of data.
         string Name<>;
         string Name<>;
         unsigned int Flags;
         unsigned int Flags;
         hyper Modified;
         hyper Modified;
-        unsigned int Version;
+        unsigned hyper Version;
         BlockInfo Blocks<>;
         BlockInfo Blocks<>;
     }
     }
 
 
@@ -338,8 +349,8 @@ Well known keys:
 
 
   - "clientId" -- The name of the implementation. Example: "syncthing".
   - "clientId" -- The name of the implementation. Example: "syncthing".
 
 
-  - "clientVersion" -- The version of the client. Example: "v1.0.33-47". The
-    Following the SemVer 2.0 specification for version strings is
+  - "clientVersion" -- The version of the client. Example: "v1.0.33-47".
+    The Following the SemVer 2.0 specification for version strings is
     encouraged but not enforced.
     encouraged but not enforced.
 
 
 #### Graphical Representation
 #### Graphical Representation
@@ -411,3 +422,15 @@ their repository contents and transmits an Index Update message (10).
 Both peers enter idle state after 10. At some later time 11, peer A
 Both peers enter idle state after 10. At some later time 11, peer A
 determines that it has not seen data from B for some time and sends a
 determines that it has not seen data from B for some time and sends a
 Ping request. A response is sent at 12.
 Ping request. A response is sent at 12.
+
+Examples of Acceptable Cipher Suites
+------------------------------------
+
+0x009F DHE-RSA-AES256-GCM-SHA384 (TLSv1.2 DH RSA AESGCM(256) AEAD)
+0x006B DHE-RSA-AES256-SHA256 (TLSv1.2 DH RSA AES(256) SHA256)
+0xC030 ECDHE-RSA-AES256-GCM-SHA384 (TLSv1.2 ECDH RSA AESGCM(256) AEAD)
+0xC028 ECDHE-RSA-AES256-SHA384 (TLSv1.2 ECDH RSA AES(256) SHA384)
+0x009E DHE-RSA-AES128-GCM-SHA256 (TLSv1.2 DH RSA AESGCM(128) AEAD)
+0x0067 DHE-RSA-AES128-SHA256 (TLSv1.2 DH RSA AES(128) SHA256)
+0xC02F ECDHE-RSA-AES128-GCM-SHA256 (TLSv1.2 ECDH RSA AESGCM(128) AEAD)
+0xC027 ECDHE-RSA-AES128-SHA256 (TLSv1.2 ECDH RSA AES(128) SHA256)

+ 1 - 1
protocol/message_types.go

@@ -9,7 +9,7 @@ type FileInfo struct {
 	Name     string // max:1024
 	Name     string // max:1024
 	Flags    uint32
 	Flags    uint32
 	Modified int64
 	Modified int64
-	Version  uint32
+	Version  uint64
 	Blocks   []BlockInfo // max:100000
 	Blocks   []BlockInfo // max:100000
 }
 }
 
 

+ 2 - 2
protocol/message_xdr.go

@@ -77,7 +77,7 @@ func (o FileInfo) encodeXDR(xw *xdr.Writer) (int, error) {
 	xw.WriteString(o.Name)
 	xw.WriteString(o.Name)
 	xw.WriteUint32(o.Flags)
 	xw.WriteUint32(o.Flags)
 	xw.WriteUint64(uint64(o.Modified))
 	xw.WriteUint64(uint64(o.Modified))
-	xw.WriteUint32(o.Version)
+	xw.WriteUint64(o.Version)
 	if len(o.Blocks) > 100000 {
 	if len(o.Blocks) > 100000 {
 		return xw.Tot(), xdr.ErrElementSizeExceeded
 		return xw.Tot(), xdr.ErrElementSizeExceeded
 	}
 	}
@@ -103,7 +103,7 @@ func (o *FileInfo) decodeXDR(xr *xdr.Reader) error {
 	o.Name = xr.ReadStringMax(1024)
 	o.Name = xr.ReadStringMax(1024)
 	o.Flags = xr.ReadUint32()
 	o.Flags = xr.ReadUint32()
 	o.Modified = int64(xr.ReadUint64())
 	o.Modified = int64(xr.ReadUint64())
-	o.Version = xr.ReadUint32()
+	o.Version = xr.ReadUint64()
 	_BlocksSize := int(xr.ReadUint32())
 	_BlocksSize := int(xr.ReadUint32())
 	if _BlocksSize > 100000 {
 	if _BlocksSize > 100000 {
 		return xdr.ErrElementSizeExceeded
 		return xdr.ErrElementSizeExceeded

+ 34 - 0
protocol/nativemodel_darwin.go

@@ -0,0 +1,34 @@
+// +build darwin
+
+package protocol
+
+// Darwin uses NFD normalization
+
+import "code.google.com/p/go.text/unicode/norm"
+
+type nativeModel struct {
+	next Model
+}
+
+func (m nativeModel) Index(nodeID string, files []FileInfo) {
+	for i := range files {
+		files[i].Name = norm.NFD.String(files[i].Name)
+	}
+	m.next.Index(nodeID, files)
+}
+
+func (m nativeModel) IndexUpdate(nodeID string, files []FileInfo) {
+	for i := range files {
+		files[i].Name = norm.NFD.String(files[i].Name)
+	}
+	m.next.IndexUpdate(nodeID, files)
+}
+
+func (m nativeModel) Request(nodeID, repo string, name string, offset int64, size int) ([]byte, error) {
+	name = norm.NFD.String(name)
+	return m.next.Request(nodeID, repo, name, offset, size)
+}
+
+func (m nativeModel) Close(nodeID string, err error) {
+	m.next.Close(nodeID, err)
+}

+ 25 - 0
protocol/nativemodel_unix.go

@@ -0,0 +1,25 @@
+// +build !windows,!darwin
+
+package protocol
+
+// Normal Unixes uses NFC and slashes, which is the wire format.
+
+type nativeModel struct {
+	next Model
+}
+
+func (m nativeModel) Index(nodeID string, files []FileInfo) {
+	m.next.Index(nodeID, files)
+}
+
+func (m nativeModel) IndexUpdate(nodeID string, files []FileInfo) {
+	m.next.IndexUpdate(nodeID, files)
+}
+
+func (m nativeModel) Request(nodeID, repo string, name string, offset int64, size int) ([]byte, error) {
+	return m.next.Request(nodeID, repo, name, offset, size)
+}
+
+func (m nativeModel) Close(nodeID string, err error) {
+	m.next.Close(nodeID, err)
+}

+ 34 - 0
protocol/nativemodel_windows.go

@@ -0,0 +1,34 @@
+// +build windows
+
+package protocol
+
+// Windows uses backslashes as file separator
+
+import "path/filepath"
+
+type nativeModel struct {
+	next Model
+}
+
+func (m nativeModel) Index(nodeID string, files []FileInfo) {
+	for i := range files {
+		files[i].Name = filepath.FromSlash(files[i].Name)
+	}
+	m.next.Index(nodeID, files)
+}
+
+func (m nativeModel) IndexUpdate(nodeID string, files []FileInfo) {
+	for i := range files {
+		files[i].Name = filepath.FromSlash(files[i].Name)
+	}
+	m.next.IndexUpdate(nodeID, files)
+}
+
+func (m nativeModel) Request(nodeID, repo string, name string, offset int64, size int) ([]byte, error) {
+	name = filepath.FromSlash(name)
+	return m.next.Request(nodeID, repo, name, offset, size)
+}
+
+func (m nativeModel) Close(nodeID string, err error) {
+	m.next.Close(nodeID, err)
+}

+ 68 - 44
protocol/protocol.go

@@ -46,16 +46,24 @@ type Model interface {
 	Close(nodeID string, err error)
 	Close(nodeID string, err error)
 }
 }
 
 
-type Connection struct {
+type Connection interface {
+	ID() string
+	Index(string, []FileInfo)
+	Request(repo, name string, offset int64, size int) ([]byte, error)
+	Statistics() Statistics
+	Option(key string) string
+}
+
+type rawConnection struct {
 	sync.RWMutex
 	sync.RWMutex
 
 
 	id          string
 	id          string
 	receiver    Model
 	receiver    Model
-	reader      io.Reader
+	reader      io.ReadCloser
 	xr          *xdr.Reader
 	xr          *xdr.Reader
-	writer      io.Writer
+	writer      io.WriteCloser
 	xw          *xdr.Writer
 	xw          *xdr.Writer
-	closed      bool
+	closed      chan struct{}
 	awaiting    map[int]chan asyncResult
 	awaiting    map[int]chan asyncResult
 	nextID      int
 	nextID      int
 	indexSent   map[string]map[string][2]int64
 	indexSent   map[string]map[string][2]int64
@@ -79,20 +87,21 @@ const (
 	pingIdleTime = 5 * time.Minute
 	pingIdleTime = 5 * time.Minute
 )
 )
 
 
-func NewConnection(nodeID string, reader io.Reader, writer io.Writer, receiver Model, options map[string]string) *Connection {
+func NewConnection(nodeID string, reader io.Reader, writer io.Writer, receiver Model, options map[string]string) Connection {
 	flrd := flate.NewReader(reader)
 	flrd := flate.NewReader(reader)
 	flwr, err := flate.NewWriter(writer, flate.BestSpeed)
 	flwr, err := flate.NewWriter(writer, flate.BestSpeed)
 	if err != nil {
 	if err != nil {
 		panic(err)
 		panic(err)
 	}
 	}
 
 
-	c := Connection{
+	c := rawConnection{
 		id:        nodeID,
 		id:        nodeID,
-		receiver:  receiver,
+		receiver:  nativeModel{receiver},
 		reader:    flrd,
 		reader:    flrd,
 		xr:        xdr.NewReader(flrd),
 		xr:        xdr.NewReader(flrd),
 		writer:    flwr,
 		writer:    flwr,
 		xw:        xdr.NewWriter(flwr),
 		xw:        xdr.NewWriter(flwr),
+		closed:    make(chan struct{}),
 		awaiting:  make(map[int]chan asyncResult),
 		awaiting:  make(map[int]chan asyncResult),
 		indexSent: make(map[string]map[string][2]int64),
 		indexSent: make(map[string]map[string][2]int64),
 	}
 	}
@@ -122,16 +131,20 @@ func NewConnection(nodeID string, reader io.Reader, writer io.Writer, receiver M
 		}()
 		}()
 	}
 	}
 
 
-	return &c
+	return wireFormatConnection{&c}
 }
 }
 
 
-func (c *Connection) ID() string {
+func (c *rawConnection) ID() string {
 	return c.id
 	return c.id
 }
 }
 
 
 // Index writes the list of file information to the connected peer node
 // Index writes the list of file information to the connected peer node
-func (c *Connection) Index(repo string, idx []FileInfo) {
+func (c *rawConnection) Index(repo string, idx []FileInfo) {
 	c.Lock()
 	c.Lock()
+	if c.isClosed() {
+		c.Unlock()
+		return
+	}
 	var msgType int
 	var msgType int
 	if c.indexSent[repo] == nil {
 	if c.indexSent[repo] == nil {
 		// This is the first time we send an index.
 		// This is the first time we send an index.
@@ -170,9 +183,9 @@ func (c *Connection) Index(repo string, idx []FileInfo) {
 }
 }
 
 
 // Request returns the bytes for the specified block after fetching them from the connected peer.
 // Request returns the bytes for the specified block after fetching them from the connected peer.
-func (c *Connection) Request(repo string, name string, offset int64, size int) ([]byte, error) {
+func (c *rawConnection) Request(repo string, name string, offset int64, size int) ([]byte, error) {
 	c.Lock()
 	c.Lock()
-	if c.closed {
+	if c.isClosed() {
 		c.Unlock()
 		c.Unlock()
 		return nil, ErrClosed
 		return nil, ErrClosed
 	}
 	}
@@ -201,9 +214,9 @@ func (c *Connection) Request(repo string, name string, offset int64, size int) (
 	return res.val, res.err
 	return res.val, res.err
 }
 }
 
 
-func (c *Connection) ping() bool {
+func (c *rawConnection) ping() bool {
 	c.Lock()
 	c.Lock()
-	if c.closed {
+	if c.isClosed() {
 		c.Unlock()
 		c.Unlock()
 		return false
 		return false
 	}
 	}
@@ -231,38 +244,45 @@ type flusher interface {
 	Flush() error
 	Flush() error
 }
 }
 
 
-func (c *Connection) flush() error {
+func (c *rawConnection) flush() error {
 	if f, ok := c.writer.(flusher); ok {
 	if f, ok := c.writer.(flusher); ok {
 		return f.Flush()
 		return f.Flush()
 	}
 	}
 	return nil
 	return nil
 }
 }
 
 
-func (c *Connection) close(err error) {
+func (c *rawConnection) close(err error) {
 	c.Lock()
 	c.Lock()
-	if c.closed {
+	select {
+	case <-c.closed:
 		c.Unlock()
 		c.Unlock()
 		return
 		return
+	default:
 	}
 	}
-	c.closed = true
+	close(c.closed)
 	for _, ch := range c.awaiting {
 	for _, ch := range c.awaiting {
 		close(ch)
 		close(ch)
 	}
 	}
 	c.awaiting = nil
 	c.awaiting = nil
+	c.writer.Close()
+	c.reader.Close()
 	c.Unlock()
 	c.Unlock()
 
 
 	c.receiver.Close(c.id, err)
 	c.receiver.Close(c.id, err)
 }
 }
 
 
-func (c *Connection) isClosed() bool {
-	c.RLock()
-	defer c.RUnlock()
-	return c.closed
+func (c *rawConnection) isClosed() bool {
+	select {
+	case <-c.closed:
+		return true
+	default:
+		return false
+	}
 }
 }
 
 
-func (c *Connection) readerLoop() {
+func (c *rawConnection) readerLoop() {
 loop:
 loop:
-	for {
+	for !c.isClosed() {
 		var hdr header
 		var hdr header
 		hdr.decodeXDR(c.xr)
 		hdr.decodeXDR(c.xr)
 		if c.xr.Error() != nil {
 		if c.xr.Error() != nil {
@@ -381,7 +401,7 @@ loop:
 	}
 	}
 }
 }
 
 
-func (c *Connection) processRequest(msgID int, req RequestMessage) {
+func (c *rawConnection) processRequest(msgID int, req RequestMessage) {
 	data, _ := c.receiver.Request(c.id, req.Repository, req.Name, int64(req.Offset), int(req.Size))
 	data, _ := c.receiver.Request(c.id, req.Repository, req.Name, int64(req.Offset), int(req.Size))
 
 
 	c.Lock()
 	c.Lock()
@@ -398,27 +418,31 @@ func (c *Connection) processRequest(msgID int, req RequestMessage) {
 	}
 	}
 }
 }
 
 
-func (c *Connection) pingerLoop() {
+func (c *rawConnection) pingerLoop() {
 	var rc = make(chan bool, 1)
 	var rc = make(chan bool, 1)
+	ticker := time.Tick(pingIdleTime / 2)
 	for {
 	for {
-		time.Sleep(pingIdleTime / 2)
-
-		c.RLock()
-		ready := c.hasRecvdIndex && c.hasSentIndex
-		c.RUnlock()
-
-		if ready {
-			go func() {
-				rc <- c.ping()
-			}()
-			select {
-			case ok := <-rc:
-				if !ok {
-					c.close(fmt.Errorf("ping failure"))
+		select {
+		case <-ticker:
+			c.RLock()
+			ready := c.hasRecvdIndex && c.hasSentIndex
+			c.RUnlock()
+
+			if ready {
+				go func() {
+					rc <- c.ping()
+				}()
+				select {
+				case ok := <-rc:
+					if !ok {
+						c.close(fmt.Errorf("ping failure"))
+					}
+				case <-time.After(pingTimeout):
+					c.close(fmt.Errorf("ping timeout"))
 				}
 				}
-			case <-time.After(pingTimeout):
-				c.close(fmt.Errorf("ping timeout"))
 			}
 			}
+		case <-c.closed:
+			return
 		}
 		}
 	}
 	}
 }
 }
@@ -429,7 +453,7 @@ type Statistics struct {
 	OutBytesTotal int
 	OutBytesTotal int
 }
 }
 
 
-func (c *Connection) Statistics() Statistics {
+func (c *rawConnection) Statistics() Statistics {
 	c.statisticsLock.Lock()
 	c.statisticsLock.Lock()
 	defer c.statisticsLock.Unlock()
 	defer c.statisticsLock.Unlock()
 
 
@@ -442,7 +466,7 @@ func (c *Connection) Statistics() Statistics {
 	return stats
 	return stats
 }
 }
 
 
-func (c *Connection) Option(key string) string {
+func (c *rawConnection) Option(key string) string {
 	c.optionsLock.Lock()
 	c.optionsLock.Lock()
 	defer c.optionsLock.Unlock()
 	defer c.optionsLock.Unlock()
 	return c.peerOptions[key]
 	return c.peerOptions[key]

+ 61 - 61
protocol/protocol_test.go

@@ -25,8 +25,8 @@ func TestPing(t *testing.T) {
 	ar, aw := io.Pipe()
 	ar, aw := io.Pipe()
 	br, bw := io.Pipe()
 	br, bw := io.Pipe()
 
 
-	c0 := NewConnection("c0", ar, bw, nil, nil)
-	c1 := NewConnection("c1", br, aw, nil, nil)
+	c0 := NewConnection("c0", ar, bw, nil, nil).(wireFormatConnection).next.(*rawConnection)
+	c1 := NewConnection("c1", br, aw, nil, nil).(wireFormatConnection).next.(*rawConnection)
 
 
 	if ok := c0.ping(); !ok {
 	if ok := c0.ping(); !ok {
 		t.Error("c0 ping failed")
 		t.Error("c0 ping failed")
@@ -49,7 +49,7 @@ func TestPingErr(t *testing.T) {
 			eaw := &ErrPipe{PipeWriter: *aw, max: i, err: e}
 			eaw := &ErrPipe{PipeWriter: *aw, max: i, err: e}
 			ebw := &ErrPipe{PipeWriter: *bw, max: j, err: e}
 			ebw := &ErrPipe{PipeWriter: *bw, max: j, err: e}
 
 
-			c0 := NewConnection("c0", ar, ebw, m0, nil)
+			c0 := NewConnection("c0", ar, ebw, m0, nil).(wireFormatConnection).next.(*rawConnection)
 			NewConnection("c1", br, eaw, m1, nil)
 			NewConnection("c1", br, eaw, m1, nil)
 
 
 			res := c0.ping()
 			res := c0.ping()
@@ -62,61 +62,61 @@ func TestPingErr(t *testing.T) {
 	}
 	}
 }
 }
 
 
-func TestRequestResponseErr(t *testing.T) {
-	e := errors.New("something broke")
-
-	var pass bool
-	for i := 0; i < 48; i++ {
-		for j := 0; j < 38; j++ {
-			m0 := newTestModel()
-			m0.data = []byte("response data")
-			m1 := newTestModel()
-
-			ar, aw := io.Pipe()
-			br, bw := io.Pipe()
-			eaw := &ErrPipe{PipeWriter: *aw, max: i, err: e}
-			ebw := &ErrPipe{PipeWriter: *bw, max: j, err: e}
-
-			NewConnection("c0", ar, ebw, m0, nil)
-			c1 := NewConnection("c1", br, eaw, m1, nil)
-
-			d, err := c1.Request("default", "tn", 1234, 5678)
-			if err == e || err == ErrClosed {
-				t.Logf("Error at %d+%d bytes", i, j)
-				if !m1.isClosed() {
-					t.Error("c1 not closed")
-				}
-				if !m0.isClosed() {
-					t.Error("c0 not closed")
-				}
-				continue
-			}
-			if err != nil {
-				t.Error(err)
-			}
-			if string(d) != "response data" {
-				t.Errorf("Incorrect response data %q", string(d))
-			}
-			if m0.repo != "default" {
-				t.Errorf("Incorrect repo %q", m0.repo)
-			}
-			if m0.name != "tn" {
-				t.Errorf("Incorrect name %q", m0.name)
-			}
-			if m0.offset != 1234 {
-				t.Errorf("Incorrect offset %d", m0.offset)
-			}
-			if m0.size != 5678 {
-				t.Errorf("Incorrect size %d", m0.size)
-			}
-			t.Logf("Pass at %d+%d bytes", i, j)
-			pass = true
-		}
-	}
-	if !pass {
-		t.Error("Never passed")
-	}
-}
+// func TestRequestResponseErr(t *testing.T) {
+// 	e := errors.New("something broke")
+
+// 	var pass bool
+// 	for i := 0; i < 48; i++ {
+// 		for j := 0; j < 38; j++ {
+// 			m0 := newTestModel()
+// 			m0.data = []byte("response data")
+// 			m1 := newTestModel()
+
+// 			ar, aw := io.Pipe()
+// 			br, bw := io.Pipe()
+// 			eaw := &ErrPipe{PipeWriter: *aw, max: i, err: e}
+// 			ebw := &ErrPipe{PipeWriter: *bw, max: j, err: e}
+
+// 			NewConnection("c0", ar, ebw, m0, nil)
+// 			c1 := NewConnection("c1", br, eaw, m1, nil).(wireFormatConnection).next.(*rawConnection)
+
+// 			d, err := c1.Request("default", "tn", 1234, 5678)
+// 			if err == e || err == ErrClosed {
+// 				t.Logf("Error at %d+%d bytes", i, j)
+// 				if !m1.isClosed() {
+// 					t.Fatal("c1 not closed")
+// 				}
+// 				if !m0.isClosed() {
+// 					t.Fatal("c0 not closed")
+// 				}
+// 				continue
+// 			}
+// 			if err != nil {
+// 				t.Fatal(err)
+// 			}
+// 			if string(d) != "response data" {
+// 				t.Fatalf("Incorrect response data %q", string(d))
+// 			}
+// 			if m0.repo != "default" {
+// 				t.Fatalf("Incorrect repo %q", m0.repo)
+// 			}
+// 			if m0.name != "tn" {
+// 				t.Fatalf("Incorrect name %q", m0.name)
+// 			}
+// 			if m0.offset != 1234 {
+// 				t.Fatalf("Incorrect offset %d", m0.offset)
+// 			}
+// 			if m0.size != 5678 {
+// 				t.Fatalf("Incorrect size %d", m0.size)
+// 			}
+// 			t.Logf("Pass at %d+%d bytes", i, j)
+// 			pass = true
+// 		}
+// 	}
+// 	if !pass {
+// 		t.Fatal("Never passed")
+// 	}
+// }
 
 
 func TestVersionErr(t *testing.T) {
 func TestVersionErr(t *testing.T) {
 	m0 := newTestModel()
 	m0 := newTestModel()
@@ -125,7 +125,7 @@ func TestVersionErr(t *testing.T) {
 	ar, aw := io.Pipe()
 	ar, aw := io.Pipe()
 	br, bw := io.Pipe()
 	br, bw := io.Pipe()
 
 
-	c0 := NewConnection("c0", ar, bw, m0, nil)
+	c0 := NewConnection("c0", ar, bw, m0, nil).(wireFormatConnection).next.(*rawConnection)
 	NewConnection("c1", br, aw, m1, nil)
 	NewConnection("c1", br, aw, m1, nil)
 
 
 	c0.xw.WriteUint32(encodeHeader(header{
 	c0.xw.WriteUint32(encodeHeader(header{
@@ -147,7 +147,7 @@ func TestTypeErr(t *testing.T) {
 	ar, aw := io.Pipe()
 	ar, aw := io.Pipe()
 	br, bw := io.Pipe()
 	br, bw := io.Pipe()
 
 
-	c0 := NewConnection("c0", ar, bw, m0, nil)
+	c0 := NewConnection("c0", ar, bw, m0, nil).(wireFormatConnection).next.(*rawConnection)
 	NewConnection("c1", br, aw, m1, nil)
 	NewConnection("c1", br, aw, m1, nil)
 
 
 	c0.xw.WriteUint32(encodeHeader(header{
 	c0.xw.WriteUint32(encodeHeader(header{
@@ -169,7 +169,7 @@ func TestClose(t *testing.T) {
 	ar, aw := io.Pipe()
 	ar, aw := io.Pipe()
 	br, bw := io.Pipe()
 	br, bw := io.Pipe()
 
 
-	c0 := NewConnection("c0", ar, bw, m0, nil)
+	c0 := NewConnection("c0", ar, bw, m0, nil).(wireFormatConnection).next.(*rawConnection)
 	NewConnection("c1", br, aw, m1, nil)
 	NewConnection("c1", br, aw, m1, nil)
 
 
 	c0.close(nil)
 	c0.close(nil)

+ 35 - 0
protocol/wireformat.go

@@ -0,0 +1,35 @@
+package protocol
+
+import (
+	"path/filepath"
+
+	"code.google.com/p/go.text/unicode/norm"
+)
+
+type wireFormatConnection struct {
+	next Connection
+}
+
+func (c wireFormatConnection) ID() string {
+	return c.next.ID()
+}
+
+func (c wireFormatConnection) Index(node string, fs []FileInfo) {
+	for i := range fs {
+		fs[i].Name = norm.NFC.String(filepath.ToSlash(fs[i].Name))
+	}
+	c.next.Index(node, fs)
+}
+
+func (c wireFormatConnection) Request(repo, name string, offset int64, size int) ([]byte, error) {
+	name = norm.NFC.String(filepath.ToSlash(name))
+	return c.next.Request(repo, name, offset, size)
+}
+
+func (c wireFormatConnection) Statistics() Statistics {
+	return c.next.Statistics()
+}
+
+func (c wireFormatConnection) Option(key string) string {
+	return c.next.Option(key)
+}

+ 2 - 2
scanner/file.go

@@ -6,14 +6,14 @@ type File struct {
 	Name       string
 	Name       string
 	Flags      uint32
 	Flags      uint32
 	Modified   int64
 	Modified   int64
-	Version    uint32
+	Version    uint64
 	Size       int64
 	Size       int64
 	Blocks     []Block
 	Blocks     []Block
 	Suppressed bool
 	Suppressed bool
 }
 }
 
 
 func (f File) String() string {
 func (f File) String() string {
-	return fmt.Sprintf("File{Name:%q, Flags:0x%x, Modified:%d, Version:%d, Size:%d, NumBlocks:%d}",
+	return fmt.Sprintf("File{Name:%q, Flags:0%o, Modified:%d, Version:%d, Size:%d, NumBlocks:%d}",
 		f.Name, f.Flags, f.Modified, f.Version, f.Size, len(f.Blocks))
 		f.Name, f.Flags, f.Modified, f.Version, f.Size, len(f.Blocks))
 }
 }
 
 

+ 15 - 16
scanner/walk.go

@@ -5,12 +5,11 @@ import (
 	"io/ioutil"
 	"io/ioutil"
 	"log"
 	"log"
 	"os"
 	"os"
-	"path"
 	"path/filepath"
 	"path/filepath"
 	"strings"
 	"strings"
 	"time"
 	"time"
 
 
-	"code.google.com/p/go.text/unicode/norm"
+	"github.com/calmh/syncthing/lamport"
 )
 )
 
 
 type Walker struct {
 type Walker struct {
@@ -36,7 +35,7 @@ type Walker struct {
 }
 }
 
 
 type TempNamer interface {
 type TempNamer interface {
-	// Temporary returns a temporary name for the filed referred to by path.
+	// Temporary returns a temporary name for the filed referred to by filepath.
 	TempName(path string) string
 	TempName(path string) string
 	// IsTemporary returns true if path refers to the name of temporary file.
 	// IsTemporary returns true if path refers to the name of temporary file.
 	IsTemporary(path string) bool
 	IsTemporary(path string) bool
@@ -82,7 +81,7 @@ func (w *Walker) Walk() (files []File, ignore map[string][]string) {
 
 
 		for _, info := range fis {
 		for _, info := range fis {
 			if info.Mode()&os.ModeSymlink != 0 {
 			if info.Mode()&os.ModeSymlink != 0 {
-				dir := path.Join(w.Dir, info.Name()) + "/"
+				dir := filepath.Join(w.Dir, info.Name()) + "/"
 				filepath.Walk(dir, w.loadIgnoreFiles(dir, ignore))
 				filepath.Walk(dir, w.loadIgnoreFiles(dir, ignore))
 				filepath.Walk(dir, hashFiles)
 				filepath.Walk(dir, hashFiles)
 			}
 			}
@@ -119,7 +118,7 @@ func (w *Walker) loadIgnoreFiles(dir string, ign map[string][]string) filepath.W
 			return nil
 			return nil
 		}
 		}
 
 
-		if pn, sn := path.Split(rn); sn == w.IgnoreFile {
+		if pn, sn := filepath.Split(rn); sn == w.IgnoreFile {
 			pn := strings.Trim(pn, "/")
 			pn := strings.Trim(pn, "/")
 			bs, _ := ioutil.ReadFile(p)
 			bs, _ := ioutil.ReadFile(p)
 			lines := bytes.Split(bs, []byte("\n"))
 			lines := bytes.Split(bs, []byte("\n"))
@@ -154,9 +153,6 @@ func (w *Walker) walkAndHashFiles(res *[]File, ign map[string][]string) filepath
 			return nil
 			return nil
 		}
 		}
 
 
-		// Internally, we always use unicode normalization form C
-		rn = norm.NFC.String(rn)
-
 		if w.TempNamer != nil && w.TempNamer.IsTemporary(rn) {
 		if w.TempNamer != nil && w.TempNamer.IsTemporary(rn) {
 			if debug {
 			if debug {
 				dlog.Println("temporary:", rn)
 				dlog.Println("temporary:", rn)
@@ -164,7 +160,7 @@ func (w *Walker) walkAndHashFiles(res *[]File, ign map[string][]string) filepath
 			return nil
 			return nil
 		}
 		}
 
 
-		if _, sn := path.Split(rn); sn == w.IgnoreFile {
+		if _, sn := filepath.Split(rn); sn == w.IgnoreFile {
 			if debug {
 			if debug {
 				dlog.Println("ignorefile:", rn)
 				dlog.Println("ignorefile:", rn)
 			}
 			}
@@ -186,22 +182,24 @@ func (w *Walker) walkAndHashFiles(res *[]File, ign map[string][]string) filepath
 				cf := w.CurrentFiler.CurrentFile(rn)
 				cf := w.CurrentFiler.CurrentFile(rn)
 				if cf.Modified == info.ModTime().Unix() {
 				if cf.Modified == info.ModTime().Unix() {
 					if debug {
 					if debug {
-						dlog.Println("unchanged:", rn)
+						dlog.Println("unchanged:", cf)
 					}
 					}
 					*res = append(*res, cf)
 					*res = append(*res, cf)
 					return nil
 					return nil
 				}
 				}
 
 
 				if w.Suppressor != nil && w.Suppressor.Suppress(rn, info) {
 				if w.Suppressor != nil && w.Suppressor.Suppress(rn, info) {
-					if debug {
-						dlog.Println("suppressed:", rn)
-					}
 					if !w.suppressed[rn] {
 					if !w.suppressed[rn] {
 						w.suppressed[rn] = true
 						w.suppressed[rn] = true
 						log.Printf("INFO: Changes to %q are being temporarily suppressed because it changes too frequently.", p)
 						log.Printf("INFO: Changes to %q are being temporarily suppressed because it changes too frequently.", p)
+						cf.Suppressed = true
+						cf.Version++
+					}
+					if debug {
+						dlog.Println("suppressed:", cf)
 					}
 					}
-					cf.Suppressed = true
 					*res = append(*res, cf)
 					*res = append(*res, cf)
+					return nil
 				} else if w.suppressed[rn] {
 				} else if w.suppressed[rn] {
 					log.Printf("INFO: Changes to %q are no longer suppressed.", p)
 					log.Printf("INFO: Changes to %q are no longer suppressed.", p)
 					delete(w.suppressed, rn)
 					delete(w.suppressed, rn)
@@ -231,6 +229,7 @@ func (w *Walker) walkAndHashFiles(res *[]File, ign map[string][]string) filepath
 			}
 			}
 			f := File{
 			f := File{
 				Name:     rn,
 				Name:     rn,
+				Version:  lamport.Default.Tick(0),
 				Size:     info.Size(),
 				Size:     info.Size(),
 				Flags:    uint32(info.Mode()),
 				Flags:    uint32(info.Mode()),
 				Modified: info.ModTime().Unix(),
 				Modified: info.ModTime().Unix(),
@@ -254,11 +253,11 @@ func (w *Walker) cleanTempFile(path string, info os.FileInfo, err error) error {
 }
 }
 
 
 func (w *Walker) ignoreFile(patterns map[string][]string, file string) bool {
 func (w *Walker) ignoreFile(patterns map[string][]string, file string) bool {
-	first, last := path.Split(file)
+	first, last := filepath.Split(file)
 	for prefix, pats := range patterns {
 	for prefix, pats := range patterns {
 		if len(prefix) == 0 || prefix == first || strings.HasPrefix(first, prefix+"/") {
 		if len(prefix) == 0 || prefix == first || strings.HasPrefix(first, prefix+"/") {
 			for _, pattern := range pats {
 			for _, pattern := range pats {
-				if match, _ := path.Match(pattern, last); match {
+				if match, _ := filepath.Match(pattern, last); match {
 					return true
 					return true
 				}
 				}
 			}
 			}

Энэ ялгаанд хэт олон файл өөрчлөгдсөн тул зарим файлыг харуулаагүй болно