From: David-Sarah Hopwood <david-sarah@jacaranda.org>
Date: Wed, 12 Dec 2012 06:23:16 +0000 (+0000)
Subject: Changes and additions to interface documentation.
X-Git-Url: https://git.rkrishnan.org/%5B/frontends/index.php?a=commitdiff_plain;h=5a5622ce1df5193dc1f76b6a52a66a173c014e68;p=tahoe-lafs%2Ftahoe-lafs.git

Changes and additions to interface documentation.

Signed-off-by: David-Sarah Hopwood <david-sarah@jacaranda.org>
---

diff --git a/src/allmydata/interfaces.py b/src/allmydata/interfaces.py
index 28481bc6..58f5b455 100644
--- a/src/allmydata/interfaces.py
+++ b/src/allmydata/interfaces.py
@@ -99,23 +99,29 @@ class RIStorageServer(RemoteInterface):
                          sharenums=SetOf(int, maxLength=MAX_BUCKETS),
                          allocated_size=Offset, canary=Referenceable):
         """
-        @param storage_index: the index of the bucket to be created or
-                              increfed.
-        @param sharenums: these are the share numbers (probably between 0 and
-                          99) that the sender is proposing to store on this
-                          server.
-        @param renew_secret: This is the secret used to protect bucket refresh
-                             This secret is generated by the client and
-                             stored for later comparison by the server. Each
-                             server is given a different secret.
-        @param cancel_secret: This no longer allows lease cancellation, but
-                              must still be a unique value identifying the
-                              lease. XXX stop relying on it to be unique.
-        @param canary: If the canary is lost before close(), the bucket is
-                       deleted.
+        Allocate BucketWriters for a set of shares on this server.
+
+        renew_secret and cancel_secret are ignored as of Tahoe-LAFS v1.11.0,
+        but for backward compatibility with older servers, should be
+        calculated in the same way as previous clients (see
+        allmydata.util.hashutil.file_{renewal,cancel}_secret_hash).
+
+        Servers that ignore renew_secret and cancel_secret in methods
+        of this interface, will advertise a true value for the
+        'ignores-lease-renewal-and-cancel-secrets' key (under
+        'http://allmydata.org/tahoe/protocols/storage/v1') in their
+        version information.
+
+        @param storage_index: the index of the shares to be created.
+        @param sharenums: the share numbers that the sender is proposing to store
+                          on this server.
+        @param renew_secret: previously used to authorize lease renewal.
+        @param cancel_secret: previously used to authorize lease cancellation.
+        @param canary: if the canary is lost before close(), the writes are
+                       abandoned.
         @return: tuple of (alreadygot, allocated), where alreadygot is what we
                  already have and allocated is what we hereby agree to accept.
-                 New leases are added for shares in both lists.
+                 Leases are added if necessary for shares in both lists.
         """
         return TupleOf(SetOf(int, maxLength=MAX_BUCKETS),
                        DictOf(int, RIBucketWriter, maxKeys=MAX_BUCKETS))
@@ -163,9 +169,6 @@ class RIStorageServer(RemoteInterface):
         """
         return Any() # always None
 
-    # XXX add a method that allows adding/renewing leases on only some shnums.
-    # See ticket #1816.
-
     def get_buckets(storage_index=StorageIndex):
         return DictOf(int, RIBucketReader, maxKeys=MAX_BUCKETS)
 
@@ -199,24 +202,23 @@ class RIStorageServer(RemoteInterface):
         This method is, um, large. The goal is to allow clients to update all
         the shares associated with a mutable file in a single round trip.
 
-        @param storage_index: the index of the bucket to be created or
-                              increfed.
+        @param storage_index: the index of the shareset to be operated on.
         @param write_enabler: a secret that is stored along with the slot.
                               Writes are accepted from any caller who can
                               present the matching secret. A different secret
                               should be used for each slot*server pair.
-        @param renew_secret: This is the secret used to protect bucket refresh
-                             This secret is generated by the client and
-                             stored for later comparison by the server. Each
-                             server is given a different secret.
-        @param cancel_secret: This no longer allows lease cancellation, but
-                              must still be a unique value identifying the
-                              lease. XXX stop relying on it to be unique.
+        @param renew_secret: previously used to authorize lease renewal.
+        @param cancel_secret: previously used to authorize lease cancellation.
 
         The 'secrets' argument is a tuple of (write_enabler, renew_secret,
-        cancel_secret). The first is required to perform any write. The
-        latter two are used when allocating new shares. To simply acquire a
-        new lease on existing shares, use an empty testv and an empty writev.
+        cancel_secret). The first is required to perform any write.
+        renew_secret and cancel_secret are ignored as of Tahoe-LAFS v1.11.0,
+        but for backward compatibility with older servers, should be
+        calculated in the same way as previous clients (see
+        allmydata.util.hashutil.file_{renewal,cancel}_secret_hash).
+
+        To simply acquire a new lease on existing shares, use an empty testv
+        and an empty writev.
 
         Each share can have a separate test vector (i.e. a list of
         comparisons to perform). If all vectors for all shares pass, then all
@@ -309,6 +311,277 @@ class RIStorageServer(RemoteInterface):
         """
 
 
+class IStorageBackend(Interface):
+    """
+    Objects of this kind live on the server side and are used by the
+    storage server object.
+    """
+    def get_available_space():
+        """
+        Returns available space for share storage in bytes, or
+        None if this information is not available or if the available
+        space is unlimited.
+
+        If the backend is configured for read-only mode then this will
+        return 0.
+        """
+
+    def get_sharesets_for_prefix(prefix):
+        """
+        Return an iterable containing IShareSet objects for all storage
+        indices matching the given base-32 prefix, for which this backend
+        holds shares.
+        XXX This will probably need to return a Deferred, but for now it
+        is synchronous.
+        """
+
+    def get_shareset(storageindex):
+        """
+        Get an IShareSet object for the given storage index.
+        This method is synchronous.
+        """
+
+    def fill_in_space_stats(stats):
+        """
+        Fill in the 'stats' dict with space statistics for this backend, in
+        'storage_server.*' keys.
+        """
+
+    def advise_corrupt_share(storageindex, sharetype, shnum, reason):
+        """
+        Clients who discover hash failures in shares that they have
+        downloaded from me will use this method to inform me about the
+        failures. I will record their concern so that my operator can
+        manually inspect the shares in question. This method is synchronous.
+
+        'sharetype' is either 'mutable' or 'immutable'. 'shnum' is the integer
+        share number. 'reason' is a human-readable explanation of the problem,
+        probably including some expected hash values and the computed ones
+        that did not match. Corruption advisories for mutable shares should
+        include a hash of the public key (the same value that appears in the
+        mutable-file verify-cap), since the current share format does not
+        store that on disk.
+
+        @param storageindex=str
+        @param sharetype=str
+        @param shnum=int
+        @param reason=str
+        """
+
+    def must_use_tubid_as_permutation_seed():
+        """
+        Is this a disk backend with existing shares? If True, then the server
+        must assume that it was around before #466, so must use its TubID as a
+        permutation-seed.
+        """
+
+
+class IShareSet(Interface):
+    def get_storage_index():
+        """
+        Returns the storage index for this shareset.
+        """
+
+    def get_storage_index_string():
+        """
+        Returns the base32-encoded storage index for this shareset.
+        """
+
+    def get_overhead():
+        """
+        Returns an estimate of the storage overhead, in bytes, of this shareset
+        (exclusive of the space used by its shares).
+        """
+
+    def get_shares():
+        """
+        Returns a Deferred that fires with a pair
+        (list of IShareBase objects, set of corrupted shnums).
+        The share objects include only completed shares in this shareset.
+        """
+
+    def get_share(shnum):
+        """
+        Returns a Deferred that fires with an IShareBase object if the given
+        share exists, or fails with IndexError otherwise.
+        """
+
+    def delete_share(shnum):
+        """
+        Delete a stored share. Returns a Deferred that fires when complete.
+        This does not delete incoming shares.
+        """
+
+    def has_incoming(shnum):
+        """
+        Returns True if this shareset has an incoming (partial) share with this
+        number, otherwise False.
+        """
+
+    def make_bucket_writer(account, shnum, allocated_data_length, canary):
+        """
+        Create a bucket writer that can be used to write data to a given share.
+
+        @param account=Account
+        @param shnum=int: A share number in this shareset
+        @param allocated_data_length=int: The maximum space allocated for the
+                 share, in bytes
+        @param canary=Referenceable: If the canary is lost before close(), the
+                 bucket is deleted.
+        @return an IStorageBucketWriter for the given share
+        """
+
+    def make_bucket_reader(account, share):
+        """
+        Create a bucket reader that can be used to read data from a given share.
+
+        @param account=Account
+        @param share=IShareForReading
+        @return an IStorageBucketReader for the given share
+        """
+
+    def readv(wanted_shnums, read_vector):
+        """
+        Read a vector from the numbered shares in this shareset. An empty
+        wanted_shnums list means to return data from all known shares.
+        Return a Deferred that fires with a dict mapping the share number
+        to the corresponding ReadData.
+
+        @param wanted_shnums=ListOf(int)
+        @param read_vector=ReadVector
+        @return DeferredOf(DictOf(int, ReadData)): shnum -> results, with one key per share
+        """
+
+    def testv_and_readv_and_writev(write_enabler, test_and_write_vectors, read_vector, account):
+        """
+        General-purpose atomic test-read-and-set operation for mutable slots.
+        Perform a bunch of comparisons against the existing shares in this
+        shareset. If they all pass: use the read vectors to extract data from
+        all the shares, then apply a bunch of write vectors to those shares.
+        Return a Deferred that fires with a pair consisting of a boolean that is
+        True iff the test vectors passed, and a dict mapping the share number
+        to the corresponding ReadData. Reads do not include any modifications
+        made by the writes.
+
+        See the similar method in RIStorageServer for more detail.
+
+        @param write_enabler=WriteEnablerSecret
+        @param test_and_write_vectors=TestAndWriteVectorsForShares
+        @param read_vector=ReadVector
+        @param account=Account
+        @return DeferredOf(TupleOf(bool, DictOf(int, ReadData)))
+        """
+
+
+class IShareBase(Interface):
+    """
+    I represent an immutable or mutable share stored by a particular backend.
+    I may hold some, all, or none of the share data in memory.
+    """
+    def get_storage_index():
+        """
+        Returns the storage index.
+        """
+
+    def get_storage_index_string():
+        """
+        Returns the base32-encoded storage index.
+        """
+
+    def get_shnum():
+        """
+        Returns the share number.
+        """
+
+    def get_data_length():
+        """
+        Returns the data length in bytes.
+        """
+
+    def get_size():
+        """
+        Returns the size of the share in bytes.
+        """
+
+    def get_used_space():
+        """
+        Returns the amount of backend storage including overhead (which may
+        have to be estimated), in bytes, used by this share.
+        """
+
+    def unlink():
+        """
+        Signal that this share can be removed from the backend storage. This does
+        not guarantee that the share data will be immediately inaccessible, or
+        that it will be securely erased.
+        Returns a Deferred that fires after the share has been removed.
+
+        This may be called on a share that is being written and is not closed.
+        """
+
+
+class IShareForReading(IShareBase):
+    """
+    I represent an immutable share that can be read from.
+    """
+    def read_share_data(offset, length):
+        """
+        Return a Deferred that fires with the read result.
+        """
+
+    def readv(read_vector):
+        """
+        Given a list of (offset, length) pairs, return a Deferred that fires with
+        a list of read results.
+        """
+
+
+class IShareForWriting(IShareBase):
+    """
+    I represent an immutable share that is being written.
+    """
+    def get_allocated_data_length():
+        """
+        Returns the allocated data length of the share in bytes. This is the maximum
+        amount of data that can be written (not including headers and leases).
+        """
+
+    def write_share_data(offset, data):
+        """
+        Write data at the given offset. Return a Deferred that fires when we
+        are ready to accept the next write.
+
+        Data must be written with no backtracking, i.e. offset must not be before
+        the previous end-of-data.
+        """
+
+    def close():
+        """
+        Complete writing to this share.
+        """
+
+
+class IMutableShare(IShareBase):
+    """
+    I represent a mutable share.
+    """
+    def check_write_enabler(write_enabler):
+        """
+        @param write_enabler=WriteEnablerSecret
+        """
+
+    def check_testv(test_vector):
+        """
+        @param test_vector=TestVector
+        """
+
+    def writev(datav, new_length):
+        """
+        @param datav=DataVector
+        @param new_length=ChoiceOf(None, Offset)
+        """
+
+
 class IStorageBucketWriter(Interface):
     """
     Objects of this kind live on the client side.
@@ -344,7 +617,7 @@ class IStorageBucketWriter(Interface):
         of plaintext, crypttext, and shares), as well as encoding parameters
         that are necessary to recover the data. This is a serialized dict
         mapping strings to other strings. The hash of this data is kept in
-        the URI and verified before any of the data is used. All buckets for
+        the URI and verified before any of the data is used. All shares for
         a given file contain identical copies of this data.
 
         The serialization format is specified with the following pseudocode:
@@ -357,10 +630,9 @@ class IStorageBucketWriter(Interface):
         """
 
     def close():
-        """Finish writing and close the bucket. The share is not finalized
-        until this method is called: if the uploading client disconnects
-        before calling close(), the partially-written share will be
-        discarded.
+        """
+        Finish writing and finalize the share. If the uploading client disconnects
+        before calling close(), the partially-written share will be discarded.
 
         @return: a Deferred that fires (with None) when the operation completes
         """