]> git.rkrishnan.org Git - tahoe-lafs/tahoe-lafs.git/commitdiff
deletion phase1: send renew/cancel-lease secrets, but my_secret is fake, and the...
authorBrian Warner <warner@lothar.com>
Tue, 28 Aug 2007 00:28:51 +0000 (17:28 -0700)
committerBrian Warner <warner@lothar.com>
Tue, 28 Aug 2007 00:28:51 +0000 (17:28 -0700)
src/allmydata/interfaces.py
src/allmydata/storage.py
src/allmydata/test/test_storage.py
src/allmydata/test/test_upload.py
src/allmydata/upload.py
src/allmydata/util/hashutil.py

index 785b74774fa1f0803994a60bd7402cfa0635ef62..8311bc15dd23b7f3102eb5b516fdbb37a97e0b76 100644 (file)
@@ -16,6 +16,9 @@ URI = StringConstraint(300) # kind of arbitrary
 MAX_BUCKETS = 200  # per peer
 ShareData = StringConstraint(400000) # 1MB segment / k=3 = 334kB
 URIExtensionData = StringConstraint(1000)
+LeaseRenewSecret = Hash # used to protect bucket lease renewal requests
+LeaseCancelSecret = Hash # used to protect bucket lease cancellation requests
+
 
 class RIIntroducerClient(RemoteInterface):
     def new_peers(furls=SetOf(FURL)):
@@ -79,12 +82,26 @@ class RIBucketReader(RemoteInterface):
 
 class RIStorageServer(RemoteInterface):
     def allocate_buckets(storage_index=StorageIndex,
+                         renew_secret=LeaseRenewSecret,
+                         cancel_secret=LeaseCancelSecret,
                          sharenums=SetOf(int, maxLength=MAX_BUCKETS),
                          allocated_size=int, canary=Referenceable):
         """
-        @param canary: If the canary is lost before close(), the bucket is deleted.
+        @param storage_index: the index of the bucket to be created or
+                              increfed.
+        @param sharenums: these are the share numbers (probably between 0 and
+                          99) that the sender is proposing to store on this
+                          server.
+        @param renew_secret: This is the secret used to protect bucket refresh
+                             This secret is generated by the client and
+                             stored for later comparison by the server. Each
+                             server is given a different secret.
+        @param cancel_secret: Like renew_secret, but protects bucket decref.
+        @param canary: If the canary is lost before close(), the bucket is
+                       deleted.
         @return: tuple of (alreadygot, allocated), where alreadygot is what we
-            already have and is what we hereby agree to accept
+                 already have and is what we hereby agree to accept. New
+                 leases are added for shares in both lists.
         """
         return TupleOf(SetOf(int, maxLength=MAX_BUCKETS),
                        DictOf(int, RIBucketWriter, maxKeys=MAX_BUCKETS))
index 8e42012d593ca3250f5e81c56e7815b392d0b36a..4998e20fa889e816cb8a283d3b97ad14c5d3351b 100644 (file)
@@ -98,7 +98,9 @@ class StorageServer(service.MultiService, Referenceable):
             space += bw.allocated_size()
         return space
 
-    def remote_allocate_buckets(self, storage_index, sharenums, allocated_size,
+    def remote_allocate_buckets(self, storage_index,
+                                renew_secret, cancel_secret,
+                                sharenums, allocated_size,
                                 canary):
         alreadygot = set()
         bucketwriters = {} # k: shnum, v: BucketWriter
index d58b064bbdbe210afaa9021fb0d0290e816a8297..accd66db64d74c8d8240a5e67797f7f3a445959f 100644 (file)
@@ -10,6 +10,9 @@ from allmydata.util import fileutil, hashutil
 from allmydata.storage import BucketWriter, BucketReader, \
      WriteBucketProxy, ReadBucketProxy, StorageServer
 
+RS = hashutil.tagged_hash("blah", "foo")
+CS = RS
+
 
 class Bucket(unittest.TestCase):
     def make_workdir(self, name):
@@ -186,7 +189,7 @@ class Server(unittest.TestCase):
         self.failUnlessEqual(ss.remote_get_buckets("vid"), {})
 
         canary = Referenceable()
-        already,writers = ss.remote_allocate_buckets("vid", [0,1,2],
+        already,writers = ss.remote_allocate_buckets("vid", RS, CS, [0,1,2],
                                                      75, canary)
         self.failUnlessEqual(already, set())
         self.failUnlessEqual(set(writers.keys()), set([0,1,2]))
@@ -205,7 +208,7 @@ class Server(unittest.TestCase):
 
         # now if we about writing again, the server should offer those three
         # buckets as already present
-        already,writers = ss.remote_allocate_buckets("vid", [0,1,2,3,4],
+        already,writers = ss.remote_allocate_buckets("vid", RS, CS, [0,1,2,3,4],
                                                      75, canary)
         self.failUnlessEqual(already, set([0,1,2]))
         self.failUnlessEqual(set(writers.keys()), set([3,4]))
@@ -214,7 +217,7 @@ class Server(unittest.TestCase):
         # tell new uploaders that they already exist (so that we don't try to
         # upload into them a second time)
 
-        already,writers = ss.remote_allocate_buckets("vid", [2,3,4,5],
+        already,writers = ss.remote_allocate_buckets("vid", RS, CS, [2,3,4,5],
                                                      75, canary)
         self.failUnlessEqual(already, set([2,3,4]))
         self.failUnlessEqual(set(writers.keys()), set([5]))
@@ -223,14 +226,14 @@ class Server(unittest.TestCase):
         ss = self.create("test_sizelimits", 100)
         canary = Referenceable()
         
-        already,writers = ss.remote_allocate_buckets("vid1", [0,1,2],
+        already,writers = ss.remote_allocate_buckets("vid1", RS, CS, [0,1,2],
                                                      25, canary)
         self.failUnlessEqual(len(writers), 3)
         # now the StorageServer should have 75 bytes provisionally allocated,
         # allowing only 25 more to be claimed
         self.failUnlessEqual(len(ss._active_writers), 3)
 
-        already2,writers2 = ss.remote_allocate_buckets("vid2", [0,1,2],
+        already2,writers2 = ss.remote_allocate_buckets("vid2", RS, CS, [0,1,2],
                                                        25, canary)
         self.failUnlessEqual(len(writers2), 1)
         self.failUnlessEqual(len(ss._active_writers), 4)
@@ -252,7 +255,8 @@ class Server(unittest.TestCase):
         self.failUnlessEqual(len(ss._active_writers), 0)
 
         # now there should be 25 bytes allocated, and 75 free
-        already3,writers3 = ss.remote_allocate_buckets("vid3", [0,1,2,3],
+        already3,writers3 = ss.remote_allocate_buckets("vid3", RS, CS,
+                                                       [0,1,2,3],
                                                        25, canary)
         self.failUnlessEqual(len(writers3), 3)
         self.failUnlessEqual(len(ss._active_writers), 3)
@@ -268,7 +272,8 @@ class Server(unittest.TestCase):
         # during runtime, so if we were creating any metadata, the allocation
         # would be more than 25 bytes and this test would need to be changed.
         ss = self.create("test_sizelimits", 100)
-        already4,writers4 = ss.remote_allocate_buckets("vid4", [0,1,2,3],
+        already4,writers4 = ss.remote_allocate_buckets("vid4",
+                                                       RS, CS, [0,1,2,3],
                                                        25, canary)
         self.failUnlessEqual(len(writers4), 3)
         self.failUnlessEqual(len(ss._active_writers), 3)
index 48ece34518d74c70de1aac9e48b42b216425e17e..b4df9f5af06f1617f3d6d2dda94349a958f89ac5 100644 (file)
@@ -85,8 +85,8 @@ class FakeStorageServer:
         d.addCallback(lambda res: _call())
         return d
 
-    def allocate_buckets(self, crypttext_hash, sharenums,
-                         share_size, canary):
+    def allocate_buckets(self, storage_index, renew_secret, cancel_secret,
+                         sharenums, share_size, canary):
         #print "FakeStorageServer.allocate_buckets(num=%d, size=%d)" % (len(sharenums), share_size)
         if self.mode == "full":
             return (set(), {},)
index 055e82cc8d038ccb32fdca5d99959e9aee974f65..1179cbb8f7eab5ec921166b1aeb8101df2a1c636 100644 (file)
@@ -33,7 +33,7 @@ EXTENSION_SIZE = 1000
 class PeerTracker:
     def __init__(self, peerid, permutedid, connection,
                  sharesize, blocksize, num_segments, num_share_hashes,
-                 crypttext_hash):
+                 storage_index):
         self.peerid = peerid
         self.permutedid = permutedid
         self.connection = connection # to an RIClient
@@ -49,9 +49,16 @@ class PeerTracker:
         self.blocksize = blocksize
         self.num_segments = num_segments
         self.num_share_hashes = num_share_hashes
-        self.crypttext_hash = crypttext_hash
+        self.storage_index = storage_index
         self._storageserver = None
 
+        h = hashutil.bucket_renewal_secret_hash
+        # XXX
+        self.my_secret = "secret"
+        self.renew_secret = h(self.my_secret, self.storage_index, self.peerid)
+        h = hashutil.bucket_cancel_secret_hash
+        self.cancel_secret = h(self.my_secret, self.storage_index, self.peerid)
+
     def query(self, sharenums):
         if not self._storageserver:
             d = self.connection.callRemote("get_service", "storageserver")
@@ -64,7 +71,9 @@ class PeerTracker:
     def _query(self, sharenums):
         #print " query", self.peerid, len(sharenums)
         d = self._storageserver.callRemote("allocate_buckets",
-                                           self.crypttext_hash,
+                                           self.storage_index,
+                                           self.renew_secret,
+                                           self.cancel_secret,
                                            sharenums,
                                            self.allocated_size,
                                            canary=Referenceable())
index 25ac24244837d4136dacae5bec62f7ca98666422..47ff0bf2b49a4eaf9a13958a617cec59e0226c15 100644 (file)
@@ -66,6 +66,34 @@ KEYLEN = 16
 def random_key():
     return os.urandom(KEYLEN)
 
+def file_renewal_secret_hash(my_secret, storage_index):
+    my_renewal_secret = tagged_hash(my_secret, "bucket_renewal_secret")
+    file_renewal_secret = tagged_pair_hash("file_renewal_secret",
+                                           my_renewal_secret, storage_index)
+    return file_renewal_secret
+
+def file_cancel_secret_hash(my_secret, storage_index):
+    my_cancel_secret = tagged_hash(my_secret, "bucket_cancel_secret")
+    file_cancel_secret = tagged_pair_hash("file_cancel_secret",
+                                          my_cancel_secret, storage_index)
+    return file_cancel_secret
+
+def bucket_renewal_secret_hash(my_secret, storage_index, peerid):
+    my_renewal_secret = tagged_hash(my_secret, "bucket_renewal_secret")
+    file_renewal_secret = tagged_pair_hash("file_renewal_secret",
+                                           my_renewal_secret, storage_index)
+    bucket_renewal_secret = tagged_pair_hash("bucket_renewal_secret",
+                                             file_renewal_secret, peerid)
+    return bucket_renewal_secret
+
+def bucket_cancel_secret_hash(my_secret, storage_index, peerid):
+    my_cancel_secret = tagged_hash(my_secret, "bucket_cancel_secret")
+    file_cancel_secret = tagged_pair_hash("file_cancel_secret",
+                                          my_cancel_secret, storage_index)
+    bucket_cancel_secret = tagged_pair_hash("bucket_cancel_secret",
+                                            file_cancel_secret, peerid)
+    return bucket_cancel_secret
+
 def dir_write_enabler_hash(write_key):
     return tagged_hash("allmydata_dir_write_enabler_v1", write_key)
 def dir_read_key_hash(write_key):