WriteBucketProxy: improve __repr__
authorBrian Warner <warner@allmydata.com>
Tue, 29 Jan 2008 01:53:51 +0000 (18:53 -0700)
committerBrian Warner <warner@allmydata.com>
Tue, 29 Jan 2008 01:53:51 +0000 (18:53 -0700)
src/allmydata/storage.py
src/allmydata/test/test_storage.py
src/allmydata/upload.py

index 86c58864ae4c8558b553f7ca8e73df53f2db0f80..487d63af9df845d9d9a59a339990eb5f70b09ccb 100644 (file)
@@ -1007,18 +1007,19 @@ section starts. Each offset is measured from the beginning of the file.
 def allocated_size(data_size, num_segments, num_share_hashes,
                    uri_extension_size):
     wbp = WriteBucketProxy(None, data_size, 0, num_segments, num_share_hashes,
-                           uri_extension_size)
+                           uri_extension_size, None)
     uri_extension_starts_at = wbp._offsets['uri_extension']
     return uri_extension_starts_at + 4 + uri_extension_size
 
 class WriteBucketProxy:
     implements(IStorageBucketWriter)
     def __init__(self, rref, data_size, segment_size, num_segments,
-                 num_share_hashes, uri_extension_size):
+                 num_share_hashes, uri_extension_size, nodeid):
         self._rref = rref
         self._data_size = data_size
         self._segment_size = segment_size
         self._num_segments = num_segments
+        self._nodeid = nodeid
 
         effective_segments = mathutil.next_power_of_k(num_segments,2)
         self._segment_hash_size = (2*effective_segments - 1) * HASH_SIZE
@@ -1056,6 +1057,13 @@ class WriteBucketProxy:
         assert len(offset_data) == 0x24
         self._offset_data = offset_data
 
+    def __repr__(self):
+        if self._nodeid:
+            nodeid_s = idlib.nodeid_b2a(self._nodeid)
+        else:
+            nodeid_s = "[None]"
+        return "<allmydata.storage.WriteBucketProxy for node %s>" % nodeid_s
+
     def start(self):
         return self._write(0, self._offset_data)
 
index 339b7d63b4dbdfb5dd38e05764fe0fd595f035b4..5d09e14dc52c19f1fa20619e900e02e3647b3341 100644 (file)
@@ -99,7 +99,7 @@ class BucketProxy(unittest.TestCase):
                               segment_size=10,
                               num_segments=5,
                               num_share_hashes=3,
-                              uri_extension_size=500)
+                              uri_extension_size=500, nodeid=None)
         self.failUnless(interfaces.IStorageBucketWriter.providedBy(bp))
 
     def test_readwrite(self):
@@ -129,7 +129,8 @@ class BucketProxy(unittest.TestCase):
                               segment_size=25,
                               num_segments=4,
                               num_share_hashes=3,
-                              uri_extension_size=len(uri_extension))
+                              uri_extension_size=len(uri_extension),
+                              nodeid=None)
 
         d = bp.start()
         d.addCallback(lambda res: bp.put_block(0, "a"*25))
index b892e219f8f619f6095b621db25aed41e18fd778..4bd795ebf9f864cb6c0450544a2092f556224059 100644 (file)
@@ -105,7 +105,8 @@ class PeerTracker:
                                           self.blocksize,
                                           self.num_segments,
                                           self.num_share_hashes,
-                                          EXTENSION_SIZE)
+                                          EXTENSION_SIZE,
+                                          self.peerid)
             b[sharenum] = bp
         self.buckets.update(b)
         return (alreadygot, set(b.keys()))