aboutsummaryrefslogtreecommitdiffstats
path: root/merkle.go
diff options
context:
space:
mode:
authorThibaut Horel <thibaut.horel@gmail.com>2016-05-04 15:54:19 -0400
committerThibaut Horel <thibaut.horel@gmail.com>2016-05-04 15:54:19 -0400
commit7943430749a22e6f26aa16ca2c48e97e9277998f (patch)
tree51a4d95e987d60f1b2936d5a5da9f5040efdf835 /merkle.go
downloadpos-7943430749a22e6f26aa16ca2c48e97e9277998f.tar.gz
Initial commit
Diffstat (limited to 'merkle.go')
-rw-r--r--merkle.go218
1 files changed, 218 insertions, 0 deletions
diff --git a/merkle.go b/merkle.go
new file mode 100644
index 0000000..06e4855
--- /dev/null
+++ b/merkle.go
@@ -0,0 +1,218 @@
+package main
+
+import (
+ "encoding/binary"
+ "os"
+
+ "golang.org/x/crypto/sha3"
+)
+
+const hashSize int64 = 32
+
+type Merkle interface {
+ Build() []byte
+ Put(id int64, data []byte)
+ Read(buf []byte, id int64)
+ Size() int64
+ Proof(id int64) [][]byte
+}
+
+func NewMerkle(mtype string, height int64, fname string) Merkle {
+ var m Merkle
+ switch mtype {
+ case "bfs":
+ m = NewBFSMerkle(height, fname)
+ case "post":
+ m = NewPostMerkle(height, fname)
+ }
+ return m
+}
+func ChildrenId(m Merkle, id int64) (id1, id2 int64) {
+ id2 = (id + 1) * 2
+ id1 = id2 - 1
+ return id1, id2
+}
+
+func Children(child1, child2 []byte, id int64, m Merkle) {
+ id1, id2 := ChildrenId(m, id)
+ m.Read(child1, id1)
+ m.Read(child2, id2)
+}
+
+func Init(m Merkle) {
+ h := sha3.New256()
+ hsize := h.Size()
+ buf := make([]byte, hsize)
+
+ for id := m.Size() / 2; id <= m.Size(); id++ {
+ h.Reset()
+ binary.Write(h, binary.LittleEndian, id)
+ buf = h.Sum(buf[:0])
+ m.Put(id, buf)
+ }
+}
+
+// nodes are stored in BFS order, root node first
+type BFSMerkle struct {
+ height int64
+ file *os.File
+ size int64
+}
+
+func NewBFSMerkle(height int64, fname string) *BFSMerkle {
+ file, err := os.Create(fname)
+ if err != nil {
+ panic(err)
+ }
+ size := int64(1)<<uint64(height) - 2
+ return &BFSMerkle{height: height, file: file, size: size}
+}
+
+func (m *BFSMerkle) Size() int64 {
+ return m.size
+}
+
+func (m *BFSMerkle) Put(id int64, data []byte) {
+ m.file.WriteAt(data, id*hashSize)
+}
+
+func (m *BFSMerkle) Read(buf []byte, id int64) {
+ m.file.ReadAt(buf, id*hashSize)
+}
+
+// disk access is sequential and mostly backward
+func (m *BFSMerkle) Build() []byte {
+ Init(m)
+ size := m.Size()
+ h := sha3.New256()
+ hsize := h.Size()
+ child1 := make([]byte, hsize)
+ child2 := make([]byte, hsize)
+ buf := make([]byte, hsize)
+ for id := size/2 - 1; id >= 0; id-- {
+ Children(child1, child2, id, m)
+ h.Reset()
+ h.Write(child1)
+ h.Write(child2)
+ buf = h.Sum(buf[:0])
+ m.Put(id, buf)
+ }
+ return buf
+}
+
+func (m *BFSMerkle) Proof(id int64) [][]byte {
+ proof := make([][]byte, m.height)
+ proof[0] = make([]byte, hashSize)
+ m.Read(proof[0], id)
+ for i := 1; id > 0; i++ {
+ proof[i] = make([]byte, hashSize)
+ if id&1 == 0 {
+ m.Read(proof[i], id-1)
+ } else {
+ m.Read(proof[i], id+1)
+ }
+ id = (id - 1) >> 1
+ }
+ return proof
+}
+
+// nodes are stored in depth-first post order
+type PostMerkle struct {
+ height int64
+ file *os.File
+ size int64
+}
+
+func NewPostMerkle(height int64, fname string) *PostMerkle {
+ file, err := os.Create(fname)
+ if err != nil {
+ panic(err)
+ }
+ size := int64(1)<<uint64(height) - 2
+ return &PostMerkle{height: height, file: file, size: size}
+}
+
+func (m *PostMerkle) Size() int64 {
+ return m.size
+}
+
+func (m *PostMerkle) Put(id int64, data []byte) {
+ pos := Post(m.size, m.height, id)
+ m.file.WriteAt(data, pos*hashSize)
+}
+
+func (m *PostMerkle) Read(buf []byte, id int64) {
+ pos := Post(m.size, m.height, id)
+ m.file.ReadAt(buf, pos*hashSize)
+}
+
+// Iterative post-order depth-first construction of the Merkle tree
+// disk access is optimal and forward
+func (m *PostMerkle) Build() []byte {
+ size := m.Size()
+ h := sha3.New256()
+ hsize := int64(h.Size())
+
+ // pre-allocating the hash stack
+ stack := make([][]byte, m.height)
+ for i := 0; i < len(stack); i++ {
+ stack[i] = make([]byte, hsize)
+ }
+
+ var cur int64 = size / 2 // current node (bfs id)
+ var count int64 = 0 // post-order id of current node
+ var l = 0 // length of the stack
+
+ for count < size {
+ if cur >= size/2 { // leaf node
+ l++
+ h.Reset()
+ binary.Write(h, binary.LittleEndian, cur)
+ h.Sum(stack[l-1][:0])
+ m.file.WriteAt(stack[l-1], count*hsize)
+
+ for cur&1 == 0 && count < size {
+ // we just completed a right node, moving up to the parent
+ cur = (cur - 1) >> 1
+ count++
+ h.Reset()
+ h.Write(stack[l-2])
+ h.Write(stack[l-1])
+ l-- // pop two items, add one item
+ h.Sum(stack[l-1][:0])
+ m.file.WriteAt(stack[l-1], count*hsize)
+ }
+ // we just completed a left node, moving to its sibling
+ cur++
+ count++
+ } else {
+ cur = (cur << 1) + 1 // moving to the left child
+ }
+ }
+ return stack[0]
+}
+
+func (m *PostMerkle) Proof(id int64) [][]byte {
+ // traversing the tree from the root to the leaf reading the siblings along
+ // the path and filling the proof from right to left
+ proof := make([][]byte, m.height)
+ cur := int64(1)<<uint64(m.height) - 2 // post-order id of the current node along the path, starting from the root
+ size := int64(1) << uint64(m.height-1) // size of a subtree of the current node
+ mask := size >> 1
+ id += 1
+ for i := len(proof) - 1; mask > 0; i-- {
+ proof[i] = make([]byte, hashSize)
+ if mask&id > 0 { // leaf is in the right subtree of current node
+ m.file.ReadAt(proof[i], (cur-size)*hashSize) // reading the left child
+ cur -= 1 // moving to the right subtree
+ } else { // left is in the left subtree of current node
+ m.file.ReadAt(proof[i], (cur-1)*hashSize) // reading the right child
+ cur -= size // moving the left subtree
+ }
+ size = mask
+ mask >>= 1
+ }
+ proof[0] = make([]byte, hashSize)
+ m.file.ReadAt(proof[0], cur*hashSize)
+ return proof
+}