瀏覽代碼

fix(db): handle large numbers of blocks in update (#10025)

Avoid failure when inserting file with very large block list
Jakob Borg 6 月之前
父節點
當前提交
4096a35b86
共有 2 個文件被更改,包括 45 次插入5 次删除
  1. 33 0
      internal/db/sqlite/db_test.go
  2. 12 5
      internal/db/sqlite/db_update.go

+ 33 - 0
internal/db/sqlite/db_test.go

@@ -1055,6 +1055,39 @@ func TestBlocklistGarbageCollection(t *testing.T) {
 	}
 }
 
+func TestInsertLargeFile(t *testing.T) {
+	t.Parallel()
+
+	sdb, err := OpenTemp()
+	if err != nil {
+		t.Fatal(err)
+	}
+	t.Cleanup(func() {
+		if err := sdb.Close(); err != nil {
+			t.Fatal(err)
+		}
+	})
+
+	// Add a large file (many blocks)
+
+	files := []protocol.FileInfo{genFile("test1", 16000, 1)}
+	if err := sdb.Update(folderID, protocol.LocalDeviceID, files); err != nil {
+		t.Fatal(err)
+	}
+
+	// Verify all the blocks are here
+
+	for i, block := range files[0].Blocks {
+		bs, err := itererr.Collect(sdb.AllLocalBlocksWithHash(block.Hash))
+		if err != nil {
+			t.Fatal(err)
+		}
+		if len(bs) == 0 {
+			t.Error("missing blocks for", i)
+		}
+	}
+}
+
 func TestErrorWrap(t *testing.T) {
 	if wrap(nil, "foo") != nil {
 		t.Fatal("nil should wrap to nil")

+ 12 - 5
internal/db/sqlite/db_update.go

@@ -310,11 +310,18 @@ func (*DB) insertBlocksLocked(tx *txPreparedStmts, blocklistHash []byte, blocks
 			"size":           b.Size,
 		}
 	}
-	_, err := tx.NamedExec(`
-		INSERT OR IGNORE INTO blocks (hash, blocklist_hash, idx, offset, size)
-		VALUES (:hash, :blocklist_hash, :idx, :offset, :size)
-	`, bs)
-	return wrap(err)
+
+	// Very large block lists (>8000 blocks) result in "too many variables"
+	// error. Chunk it to a reasonable size.
+	for chunk := range slices.Chunk(bs, 1000) {
+		if _, err := tx.NamedExec(`
+			INSERT OR IGNORE INTO blocks (hash, blocklist_hash, idx, offset, size)
+			VALUES (:hash, :blocklist_hash, :idx, :offset, :size)
+		`, chunk); err != nil {
+			return wrap(err)
+		}
+	}
+	return nil
 }
 
 func (s *DB) recalcGlobalForFolder(txp *txPreparedStmts, folderIdx int64) error {