aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--main.go40
-rw-r--r--merkle.go218
-rw-r--r--merkle_test.go34
-rw-r--r--utils.go36
-rw-r--r--utils_test.go74
-rw-r--r--verify.go22
6 files changed, 424 insertions, 0 deletions
diff --git a/main.go b/main.go
new file mode 100644
index 0000000..bfe2d4b
--- /dev/null
+++ b/main.go
@@ -0,0 +1,40 @@
+package main
+
+import (
+ "flag"
+ "fmt"
+ "log"
+ "os"
+ "runtime/pprof"
+)
+
+type Prover struct {
+ Merkle
+}
+
+func NewProver(height int64, fname string, mtype string) *Prover {
+ m := NewMerkle(mtype, height, fname)
+ return &Prover{m}
+}
+
+func main() {
+ height := flag.Int64("height", 0, "number of nodes is 2 ** height - 1")
+ fname := flag.String("db", "test.db", "filename for the database")
+ mtype := flag.String("mtype", "bfs", "type of Merkle tree (bfs or post)")
+ prof := flag.String("prof", "prof.prof", "filename for profile information")
+ flag.Parse()
+
+ f, err := os.Create(*prof)
+ if err != nil {
+ log.Fatal(err)
+ }
+ pprof.StartCPUProfile(f)
+ defer pprof.StopCPUProfile()
+
+ p := NewProver(*height, *fname, *mtype)
+ root := p.Build()
+ fmt.Println(root)
+ id := p.Size() / 2
+ proof := p.Proof(id)
+ fmt.Println(verify(id, proof))
+}
diff --git a/merkle.go b/merkle.go
new file mode 100644
index 0000000..06e4855
--- /dev/null
+++ b/merkle.go
@@ -0,0 +1,218 @@
+package main
+
+import (
+ "encoding/binary"
+ "os"
+
+ "golang.org/x/crypto/sha3"
+)
+
+const hashSize int64 = 32
+
+type Merkle interface {
+ Build() []byte
+ Put(id int64, data []byte)
+ Read(buf []byte, id int64)
+ Size() int64
+ Proof(id int64) [][]byte
+}
+
+func NewMerkle(mtype string, height int64, fname string) Merkle {
+ var m Merkle
+ switch mtype {
+ case "bfs":
+ m = NewBFSMerkle(height, fname)
+ case "post":
+ m = NewPostMerkle(height, fname)
+ }
+ return m
+}
+func ChildrenId(m Merkle, id int64) (id1, id2 int64) {
+ id2 = (id + 1) * 2
+ id1 = id2 - 1
+ return id1, id2
+}
+
+func Children(child1, child2 []byte, id int64, m Merkle) {
+ id1, id2 := ChildrenId(m, id)
+ m.Read(child1, id1)
+ m.Read(child2, id2)
+}
+
+func Init(m Merkle) {
+ h := sha3.New256()
+ hsize := h.Size()
+ buf := make([]byte, hsize)
+
+ for id := m.Size() / 2; id <= m.Size(); id++ {
+ h.Reset()
+ binary.Write(h, binary.LittleEndian, id)
+ buf = h.Sum(buf[:0])
+ m.Put(id, buf)
+ }
+}
+
+// nodes are stored in BFS order, root node first
+type BFSMerkle struct {
+ height int64
+ file *os.File
+ size int64
+}
+
+func NewBFSMerkle(height int64, fname string) *BFSMerkle {
+ file, err := os.Create(fname)
+ if err != nil {
+ panic(err)
+ }
+ size := int64(1)<<uint64(height) - 2
+ return &BFSMerkle{height: height, file: file, size: size}
+}
+
+func (m *BFSMerkle) Size() int64 {
+ return m.size
+}
+
+func (m *BFSMerkle) Put(id int64, data []byte) {
+ m.file.WriteAt(data, id*hashSize)
+}
+
+func (m *BFSMerkle) Read(buf []byte, id int64) {
+ m.file.ReadAt(buf, id*hashSize)
+}
+
+// disk access is sequential and mostly backward
+func (m *BFSMerkle) Build() []byte {
+ Init(m)
+ size := m.Size()
+ h := sha3.New256()
+ hsize := h.Size()
+ child1 := make([]byte, hsize)
+ child2 := make([]byte, hsize)
+ buf := make([]byte, hsize)
+ for id := size/2 - 1; id >= 0; id-- {
+ Children(child1, child2, id, m)
+ h.Reset()
+ h.Write(child1)
+ h.Write(child2)
+ buf = h.Sum(buf[:0])
+ m.Put(id, buf)
+ }
+ return buf
+}
+
+func (m *BFSMerkle) Proof(id int64) [][]byte {
+ proof := make([][]byte, m.height)
+ proof[0] = make([]byte, hashSize)
+ m.Read(proof[0], id)
+ for i := 1; id > 0; i++ {
+ proof[i] = make([]byte, hashSize)
+ if id&1 == 0 {
+ m.Read(proof[i], id-1)
+ } else {
+ m.Read(proof[i], id+1)
+ }
+ id = (id - 1) >> 1
+ }
+ return proof
+}
+
+// nodes are stored in depth-first post order
+type PostMerkle struct {
+ height int64
+ file *os.File
+ size int64
+}
+
+func NewPostMerkle(height int64, fname string) *PostMerkle {
+ file, err := os.Create(fname)
+ if err != nil {
+ panic(err)
+ }
+ size := int64(1)<<uint64(height) - 2
+ return &PostMerkle{height: height, file: file, size: size}
+}
+
+func (m *PostMerkle) Size() int64 {
+ return m.size
+}
+
+func (m *PostMerkle) Put(id int64, data []byte) {
+ pos := Post(m.size, m.height, id)
+ m.file.WriteAt(data, pos*hashSize)
+}
+
+func (m *PostMerkle) Read(buf []byte, id int64) {
+ pos := Post(m.size, m.height, id)
+ m.file.ReadAt(buf, pos*hashSize)
+}
+
+// Iterative post-order depth-first construction of the Merkle tree
+// disk access is optimal and forward
+func (m *PostMerkle) Build() []byte {
+ size := m.Size()
+ h := sha3.New256()
+ hsize := int64(h.Size())
+
+ // pre-allocating the hash stack
+ stack := make([][]byte, m.height)
+ for i := 0; i < len(stack); i++ {
+ stack[i] = make([]byte, hsize)
+ }
+
+ var cur int64 = size / 2 // current node (bfs id)
+ var count int64 = 0 // post-order id of current node
+ var l = 0 // length of the stack
+
+ for count < size {
+ if cur >= size/2 { // leaf node
+ l++
+ h.Reset()
+ binary.Write(h, binary.LittleEndian, cur)
+ h.Sum(stack[l-1][:0])
+ m.file.WriteAt(stack[l-1], count*hsize)
+
+ for cur&1 == 0 && count < size {
+ // we just completed a right node, moving up to the parent
+ cur = (cur - 1) >> 1
+ count++
+ h.Reset()
+ h.Write(stack[l-2])
+ h.Write(stack[l-1])
+ l-- // pop two items, add one item
+ h.Sum(stack[l-1][:0])
+ m.file.WriteAt(stack[l-1], count*hsize)
+ }
+ // we just completed a left node, moving to its sibling
+ cur++
+ count++
+ } else {
+ cur = (cur << 1) + 1 // moving to the left child
+ }
+ }
+ return stack[0]
+}
+
+func (m *PostMerkle) Proof(id int64) [][]byte {
+ // traversing the tree from the root to the leaf reading the siblings along
+ // the path and filling the proof from right to left
+ proof := make([][]byte, m.height)
+ cur := int64(1)<<uint64(m.height) - 2 // post-order id of the current node along the path, starting from the root
+ size := int64(1) << uint64(m.height-1) // size of a subtree of the current node
+ mask := size >> 1
+ id += 1
+ for i := len(proof) - 1; mask > 0; i-- {
+ proof[i] = make([]byte, hashSize)
+ if mask&id > 0 { // leaf is in the right subtree of current node
+ m.file.ReadAt(proof[i], (cur-size)*hashSize) // reading the left child
+ cur -= 1 // moving to the right subtree
+ } else { // left is in the left subtree of current node
+ m.file.ReadAt(proof[i], (cur-1)*hashSize) // reading the right child
+ cur -= size // moving the left subtree
+ }
+ size = mask
+ mask >>= 1
+ }
+ proof[0] = make([]byte, hashSize)
+ m.file.ReadAt(proof[0], cur*hashSize)
+ return proof
+}
diff --git a/merkle_test.go b/merkle_test.go
new file mode 100644
index 0000000..c486dc1
--- /dev/null
+++ b/merkle_test.go
@@ -0,0 +1,34 @@
+package main
+
+import (
+ "bytes"
+ "testing"
+)
+
+func testMerkle(mtype string) bool {
+ m := NewMerkle(mtype, 4, "test.db")
+ root := m.Build()
+ id := m.Size() / 2
+ proof := m.Proof(id)
+ v := verify(id, proof)
+ if !bytes.Equal(v, root) {
+ return false
+ }
+
+ id = m.Size()/2 + 1
+ proof = m.Proof(id)
+ v = verify(id, proof)
+ if !bytes.Equal(v, root) {
+ return false
+ }
+ return true
+}
+
+func TestMerkle(t *testing.T) {
+ tests := []string{"bfs", "post"}
+ for _, test := range tests {
+ if !testMerkle(test) {
+ t.Error(test)
+ }
+ }
+}
diff --git a/utils.go b/utils.go
new file mode 100644
index 0000000..513786c
--- /dev/null
+++ b/utils.go
@@ -0,0 +1,36 @@
+package main
+
+// floor of log(x) (i.e MSB for positive integers)
+func Log(x int64) int64 {
+ var r int64 = 0
+ for ; x > 1; x >>= 1 {
+ r++
+ }
+ return r
+}
+
+// squared exponentiation
+func Pow(x int64, n int64) int64 {
+ var r int64 = 1
+ for i := n; i > 0; i >>= 1 {
+ if i&1 == 1 {
+ r *= x
+ }
+ x *= x
+ }
+ return r
+}
+
+// convert BFS order to DF-post-order, dark magic
+func Post(size int64, height int64, id int64) int64 {
+ id += 1
+ s := int64(1) << uint64(height-Log(id)) // size of the substree rooted at id
+ r := s - 2
+ for ; id > 1; id >>= 1 {
+ if id&1 == 1 { // right child
+ r += s - 1
+ }
+ s <<= 1
+ }
+ return r
+}
diff --git a/utils_test.go b/utils_test.go
new file mode 100644
index 0000000..c42ecef
--- /dev/null
+++ b/utils_test.go
@@ -0,0 +1,74 @@
+package main
+
+import "testing"
+
+func TestLog(t *testing.T) {
+ tests := []struct {
+ input int64
+ expected int64
+ }{
+ {1, 0},
+ {2, 1},
+ {0, 0},
+ {3, 1},
+ {4, 2},
+ }
+ for _, test := range tests {
+ actual := Log(test.input)
+ if test.expected != actual {
+ t.Errorf("%d, expected: %d, actual: %d", test.input, test.expected,
+ actual)
+ }
+ }
+}
+
+func TestPow(t *testing.T) {
+ tests := []struct {
+ x int64
+ n int64
+ expected int64
+ }{
+ {1, 0, 1},
+ {2, 0, 1},
+ {0, 0, 1},
+ {0, 2, 0},
+ {1, 1, 1},
+ {1, 2, 1},
+ {2, 1, 2},
+ {2, 2, 4},
+ {2, 3, 8},
+ {3, 2, 9},
+ {6, 7, 279936},
+ }
+ for _, test := range tests {
+ actual := Pow(test.x, test.n)
+ if test.expected != actual {
+ t.Errorf("%d**%d, expected: %d, actual: %d", test.x, test.n,
+ test.expected, actual)
+ }
+ }
+
+}
+
+func TestPost(t *testing.T) {
+ tests := []struct {
+ size int64
+ height int64
+ id int64
+ expected int64
+ }{
+ {14, 4, 7, 0},
+ {14, 4, 8, 1},
+ {14, 4, 3, 2},
+ {14, 4, 5, 9},
+ {14, 4, 0, 14},
+ }
+ for _, test := range tests {
+ actual := Post(test.size, test.height, test.id)
+ if test.expected != actual {
+ t.Errorf("%d, %d, expected: %d, actual: %d", test.size, test.id,
+ test.expected, actual)
+ }
+ }
+
+}
diff --git a/verify.go b/verify.go
new file mode 100644
index 0000000..74c89b6
--- /dev/null
+++ b/verify.go
@@ -0,0 +1,22 @@
+package main
+
+import "golang.org/x/crypto/sha3"
+
+// Verify the merkle proof for a given node id
+func verify(id int64, proof [][]byte) []byte {
+ h := sha3.New256()
+ buf := proof[0]
+ for _, hash := range proof[1:] {
+ h.Reset()
+ if id&1 == 0 {
+ h.Write(hash)
+ h.Write(buf)
+ } else {
+ h.Write(buf)
+ h.Write(hash)
+ }
+ buf = h.Sum(buf[:0])
+ id = (id - 1) >> 1
+ }
+ return buf
+}