From 7b285ebcb14f4d14d872b1bec0b9b26f376d3ac8 Mon Sep 17 00:00:00 2001
From: Zooko O'Whielacronx <zooko@zooko.com>
Date: Fri, 19 Dec 2008 08:18:07 -0700
Subject: [PATCH] immutable: remove the last bits of code (only test code or
 unused code) which did something with plaintext hashes or plaintext hash
 trees

---
 src/allmydata/immutable/layout.py  | 34 +++++++-----------------------
 src/allmydata/interfaces.py        |  5 -----
 src/allmydata/test/test_storage.py | 10 ++-------
 3 files changed, 10 insertions(+), 39 deletions(-)

diff --git a/src/allmydata/immutable/layout.py b/src/allmydata/immutable/layout.py
index 89153cf7..9412a2d4 100644
--- a/src/allmydata/immutable/layout.py
+++ b/src/allmydata/immutable/layout.py
@@ -18,13 +18,13 @@ the beginning of the share data.
 0x04: segment size
 0x08: data size
 0x0c: offset of data (=00 00 00 24)
-0x10: offset of plaintext_hash_tree
+0x10: offset of plaintext_hash_tree UNUSED
 0x14: offset of crypttext_hash_tree
 0x18: offset of block_hashes
 0x1c: offset of share_hashes
 0x20: offset of uri_extension_length + uri_extension
 0x24: start of data
-?   : start of plaintext_hash_tree
+?   : start of plaintext_hash_tree UNUSED
 ?   : start of crypttext_hash_tree
 ?   : start of block_hashes
 ?   : start of share_hashes
@@ -43,7 +43,7 @@ limitations described in #346.
 0x04: segment size
 0x0c: data size
 0x14: offset of data (=00 00 00 00 00 00 00 44)
-0x1c: offset of plaintext_hash_tree
+0x1c: offset of plaintext_hash_tree UNUSED
 0x24: offset of crypttext_hash_tree
 0x2c: offset of block_hashes
 0x34: offset of share_hashes
@@ -92,7 +92,7 @@ class WriteBucketProxy:
         x = 0x24
         offsets['data'] = x
         x += data_size
-        offsets['plaintext_hash_tree'] = x
+        offsets['plaintext_hash_tree'] = x # UNUSED
         x += self._segment_hash_size
         offsets['crypttext_hash_tree'] = x
         x += self._segment_hash_size
@@ -110,7 +110,7 @@ class WriteBucketProxy:
                                   segment_size,
                                   data_size,
                                   offsets['data'],
-                                  offsets['plaintext_hash_tree'],
+                                  offsets['plaintext_hash_tree'], # UNUSED
                                   offsets['crypttext_hash_tree'],
                                   offsets['block_hashes'],
                                   offsets['share_hashes'],
@@ -143,17 +143,6 @@ class WriteBucketProxy:
                          len(data), self._segment_size)
         return self._write(offset, data)
 
-    def put_plaintext_hashes(self, hashes):
-        offset = self._offsets['plaintext_hash_tree']
-        assert isinstance(hashes, list)
-        data = "".join(hashes)
-        precondition(len(data) == self._segment_hash_size,
-                     len(data), self._segment_hash_size)
-        precondition(offset+len(data) <= self._offsets['crypttext_hash_tree'],
-                     offset, len(data), offset+len(data),
-                     self._offsets['crypttext_hash_tree'])
-        return self._write(offset, data)
-
     def put_crypttext_hashes(self, hashes):
         offset = self._offsets['crypttext_hash_tree']
         assert isinstance(hashes, list)
@@ -220,7 +209,7 @@ class WriteBucketProxy_v2(WriteBucketProxy):
         x = 0x44
         offsets['data'] = x
         x += data_size
-        offsets['plaintext_hash_tree'] = x
+        offsets['plaintext_hash_tree'] = x # UNUSED
         x += self._segment_hash_size
         offsets['crypttext_hash_tree'] = x
         x += self._segment_hash_size
@@ -238,7 +227,7 @@ class WriteBucketProxy_v2(WriteBucketProxy):
                                   segment_size,
                                   data_size,
                                   offsets['data'],
-                                  offsets['plaintext_hash_tree'],
+                                  offsets['plaintext_hash_tree'], # UNUSED
                                   offsets['crypttext_hash_tree'],
                                   offsets['block_hashes'],
                                   offsets['share_hashes'],
@@ -306,7 +295,7 @@ class ReadBucketProxy:
         self._fieldstruct = fieldstruct
 
         for field in ( 'data',
-                       'plaintext_hash_tree',
+                       'plaintext_hash_tree', # UNUSED
                        'crypttext_hash_tree',
                        'block_hashes',
                        'share_hashes',
@@ -333,13 +322,6 @@ class ReadBucketProxy:
         return [ s[i:i+HASH_SIZE]
                  for i in range(0, len(s), HASH_SIZE) ]
 
-    def get_plaintext_hashes(self):
-        offset = self._offsets['plaintext_hash_tree']
-        size = self._offsets['crypttext_hash_tree'] - offset
-        d = self._read(offset, size)
-        d.addCallback(self._str2l)
-        return d
-
     def get_crypttext_hashes(self):
         offset = self._offsets['crypttext_hash_tree']
         size = self._offsets['block_hashes'] - offset
diff --git a/src/allmydata/interfaces.py b/src/allmydata/interfaces.py
index 5067123c..e87050ca 100644
--- a/src/allmydata/interfaces.py
+++ b/src/allmydata/interfaces.py
@@ -313,11 +313,6 @@ class IStorageBucketReader(Interface):
         @return: ShareData
         """
 
-    def get_plaintext_hashes():
-        """
-        @return: ListOf(Hash)
-        """
-
     def get_crypttext_hashes():
         """
         @return: ListOf(Hash)
diff --git a/src/allmydata/test/test_storage.py b/src/allmydata/test/test_storage.py
index 4fd6ce90..0f153ad4 100644
--- a/src/allmydata/test/test_storage.py
+++ b/src/allmydata/test/test_storage.py
@@ -134,8 +134,8 @@ class BucketProxy(unittest.TestCase):
 
     def _do_test_readwrite(self, name, header_size, wbp_class, rbp_class):
         # Let's pretend each share has 100 bytes of data, and that there are
-        # 4 segments (25 bytes each), and 8 shares total. So the three
-        # per-segment merkle trees (plaintext_hash_tree, crypttext_hash_tree,
+        # 4 segments (25 bytes each), and 8 shares total. So the two
+        # per-segment merkle trees (crypttext_hash_tree,
         # block_hashes) will have 4 leaves and 7 nodes each. The per-share
         # merkle tree (share_hashes) has 8 leaves and 15 nodes, and we need 3
         # nodes. Furthermore, let's assume the uri_extension is 500 bytes
@@ -146,8 +146,6 @@ class BucketProxy(unittest.TestCase):
 
         sharesize = header_size + 100 + 7*32 + 7*32 + 7*32 + 3*(2+32) + 4+500
 
-        plaintext_hashes = [hashutil.tagged_hash("plain", "bar%d" % i)
-                            for i in range(7)]
         crypttext_hashes = [hashutil.tagged_hash("crypt", "bar%d" % i)
                             for i in range(7)]
         block_hashes = [hashutil.tagged_hash("block", "bar%d" % i)
@@ -170,7 +168,6 @@ class BucketProxy(unittest.TestCase):
         d.addCallback(lambda res: bp.put_block(1, "b"*25))
         d.addCallback(lambda res: bp.put_block(2, "c"*25))
         d.addCallback(lambda res: bp.put_block(3, "d"*20))
-        d.addCallback(lambda res: bp.put_plaintext_hashes(plaintext_hashes))
         d.addCallback(lambda res: bp.put_crypttext_hashes(crypttext_hashes))
         d.addCallback(lambda res: bp.put_block_hashes(block_hashes))
         d.addCallback(lambda res: bp.put_share_hashes(share_hashes))
@@ -197,9 +194,6 @@ class BucketProxy(unittest.TestCase):
             d1.addCallback(lambda res: rbp.get_block(3))
             d1.addCallback(lambda res: self.failUnlessEqual(res, "d"*20))
 
-            d1.addCallback(lambda res: rbp.get_plaintext_hashes())
-            d1.addCallback(lambda res:
-                           self.failUnlessEqual(res, plaintext_hashes))
             d1.addCallback(lambda res: rbp.get_crypttext_hashes())
             d1.addCallback(lambda res:
                            self.failUnlessEqual(res, crypttext_hashes))
-- 
2.45.2