2 from zope.interface import Interface
3 from foolscap.api import StringConstraint, ListOf, TupleOf, SetOf, DictOf, \
4 ChoiceOf, IntegerConstraint, Any, RemoteInterface, Referenceable
12 Hash = StringConstraint(maxLength=HASH_SIZE,
13 minLength=HASH_SIZE)# binary format 32-byte SHA256 hash
14 Nodeid = StringConstraint(maxLength=20,
15 minLength=20) # binary format 20-byte SHA1 hash
16 FURL = StringConstraint(1000)
17 StorageIndex = StringConstraint(16)
18 URI = StringConstraint(300) # kind of arbitrary
20 MAX_BUCKETS = 256 # per peer -- zfec offers at most 256 shares per file
22 DEFAULT_MAX_SEGMENT_SIZE = 128*1024
24 ShareData = StringConstraint(None)
25 URIExtensionData = StringConstraint(1000)
26 Number = IntegerConstraint(8) # 2**(8*8) == 16EiB ~= 18e18 ~= 18 exabytes
28 ReadSize = int # the 'int' constraint is 2**31 == 2Gib -- large files are processed in not-so-large increments
29 WriteEnablerSecret = Hash # used to protect mutable bucket modifications
30 LeaseRenewSecret = Hash # used to protect bucket lease renewal requests
31 LeaseCancelSecret = Hash # used to protect bucket lease cancellation requests
33 class RIBucketWriter(RemoteInterface):
34 """ Objects of this kind live on the server side. """
35 def write(offset=Offset, data=ShareData):
40 If the data that has been written is incomplete or inconsistent then
41 the server will throw the data away, else it will store it for future
47 """Abandon all the data that has been written.
51 class RIBucketReader(RemoteInterface):
52 def read(offset=Offset, length=ReadSize):
55 def advise_corrupt_share(reason=str):
56 """Clients who discover hash failures in shares that they have
57 downloaded from me will use this method to inform me about the
58 failures. I will record their concern so that my operator can
59 manually inspect the shares in question. I return None.
61 This is a wrapper around RIStorageServer.advise_corrupt_share(),
62 which is tied to a specific share, and therefore does not need the
63 extra share-identifying arguments. Please see that method for full
67 TestVector = ListOf(TupleOf(Offset, ReadSize, str, str))
68 # elements are (offset, length, operator, specimen)
69 # operator is one of "lt, le, eq, ne, ge, gt"
70 # nop always passes and is used to fetch data while writing.
71 # you should use length==len(specimen) for everything except nop
72 DataVector = ListOf(TupleOf(Offset, ShareData))
73 # (offset, data). This limits us to 30 writes of 1MiB each per call
74 TestAndWriteVectorsForShares = DictOf(int,
77 ChoiceOf(None, Offset), # new_length
79 ReadVector = ListOf(TupleOf(Offset, ReadSize))
80 ReadData = ListOf(ShareData)
81 # returns data[offset:offset+length] for each element of TestVector
83 class RIStorageServer(RemoteInterface):
84 __remote_name__ = "RIStorageServer.tahoe.allmydata.com"
88 Return a dictionary of version information.
90 return DictOf(str, Any())
92 def allocate_buckets(storage_index=StorageIndex,
93 renew_secret=LeaseRenewSecret,
94 cancel_secret=LeaseCancelSecret,
95 sharenums=SetOf(int, maxLength=MAX_BUCKETS),
96 allocated_size=Offset, canary=Referenceable):
98 @param storage_index: the index of the bucket to be created or
100 @param sharenums: these are the share numbers (probably between 0 and
101 99) that the sender is proposing to store on this
103 @param renew_secret: This is the secret used to protect bucket refresh
104 This secret is generated by the client and
105 stored for later comparison by the server. Each
106 server is given a different secret.
107 @param cancel_secret: Like renew_secret, but protects bucket decref.
108 @param canary: If the canary is lost before close(), the bucket is
110 @return: tuple of (alreadygot, allocated), where alreadygot is what we
111 already have and allocated is what we hereby agree to accept.
112 New leases are added for shares in both lists.
114 return TupleOf(SetOf(int, maxLength=MAX_BUCKETS),
115 DictOf(int, RIBucketWriter, maxKeys=MAX_BUCKETS))
117 def add_lease(storage_index=StorageIndex,
118 renew_secret=LeaseRenewSecret,
119 cancel_secret=LeaseCancelSecret):
121 Add a new lease on the given bucket. If the renew_secret matches an
122 existing lease, that lease will be renewed instead. If there is no
123 bucket for the given storage_index, return silently. (note that in
124 tahoe-1.3.0 and earlier, IndexError was raised if there was no
127 return Any() # returns None now, but future versions might change
129 def renew_lease(storage_index=StorageIndex, renew_secret=LeaseRenewSecret):
131 Renew the lease on a given bucket, resetting the timer to 31 days.
132 Some networks will use this, some will not. If there is no bucket for
133 the given storage_index, IndexError will be raised.
135 For mutable shares, if the given renew_secret does not match an
136 existing lease, IndexError will be raised with a note listing the
137 server-nodeids on the existing leases, so leases on migrated shares
138 can be renewed or cancelled. For immutable shares, IndexError
139 (without the note) will be raised.
143 def get_buckets(storage_index=StorageIndex):
144 return DictOf(int, RIBucketReader, maxKeys=MAX_BUCKETS)
148 def slot_readv(storage_index=StorageIndex,
149 shares=ListOf(int), readv=ReadVector):
150 """Read a vector from the numbered shares associated with the given
151 storage index. An empty shares list means to return data from all
152 known shares. Returns a dictionary with one key per share."""
153 return DictOf(int, ReadData) # shnum -> results
155 def slot_testv_and_readv_and_writev(storage_index=StorageIndex,
156 secrets=TupleOf(WriteEnablerSecret,
159 tw_vectors=TestAndWriteVectorsForShares,
162 """General-purpose test-and-set operation for mutable slots. Perform
163 a bunch of comparisons against the existing shares. If they all pass,
164 then apply a bunch of write vectors to those shares. Then use the
165 read vectors to extract data from all the shares and return the data.
167 This method is, um, large. The goal is to allow clients to update all
168 the shares associated with a mutable file in a single round trip.
170 @param storage_index: the index of the bucket to be created or
172 @param write_enabler: a secret that is stored along with the slot.
173 Writes are accepted from any caller who can
174 present the matching secret. A different secret
175 should be used for each slot*server pair.
176 @param renew_secret: This is the secret used to protect bucket refresh
177 This secret is generated by the client and
178 stored for later comparison by the server. Each
179 server is given a different secret.
180 @param cancel_secret: Like renew_secret, but protects bucket decref.
182 The 'secrets' argument is a tuple of (write_enabler, renew_secret,
183 cancel_secret). The first is required to perform any write. The
184 latter two are used when allocating new shares. To simply acquire a
185 new lease on existing shares, use an empty testv and an empty writev.
187 Each share can have a separate test vector (i.e. a list of
188 comparisons to perform). If all vectors for all shares pass, then all
189 writes for all shares are recorded. Each comparison is a 4-tuple of
190 (offset, length, operator, specimen), which effectively does a bool(
191 (read(offset, length)) OPERATOR specimen ) and only performs the
192 write if all these evaluate to True. Basic test-and-set uses 'eq'.
193 Write-if-newer uses a seqnum and (offset, length, 'lt', specimen).
194 Write-if-same-or-newer uses 'le'.
196 Reads from the end of the container are truncated, and missing shares
197 behave like empty ones, so to assert that a share doesn't exist (for
198 use when creating a new share), use (0, 1, 'eq', '').
200 The write vector will be applied to the given share, expanding it if
201 necessary. A write vector applied to a share number that did not
202 exist previously will cause that share to be created.
204 In Tahoe-LAFS v1.8.3 or later (except 1.9.0a1), if you send a write
205 vector whose offset is beyond the end of the current data, the space
206 between the end of the current data and the beginning of the write
207 vector will be filled with zero bytes. In earlier versions the
208 contents of this space was unspecified (and might end up containing
209 secrets). Storage servers with the new zero-filling behavior will
210 advertise a true value for the 'fills-holes-with-zero-bytes' key
211 (under 'http://allmydata.org/tahoe/protocols/storage/v1') in their
214 Each write vector is accompanied by a 'new_length' argument, which
215 can be used to truncate the data. If new_length is not None and it is
216 less than the current size of the data (after applying all write
217 vectors), then the data will be truncated to new_length. If
218 new_length==0, the share will be deleted.
220 In Tahoe-LAFS v1.8.2 and earlier, new_length could also be used to
221 enlarge the file by sending a number larger than the size of the data
222 after applying all write vectors. That behavior was not used, and as
223 of Tahoe-LAFS v1.8.3 it no longer works and the new_length is ignored
226 If a storage client knows that the server supports zero-filling, for
227 example from the 'fills-holes-with-zero-bytes' key in its version
228 information, it can extend the file efficiently by writing a single
229 zero byte just before the new end-of-file. Otherwise it must
230 explicitly write zeroes to all bytes between the old and new
231 end-of-file. In any case it should avoid sending new_length larger
232 than the size of the data after applying all write vectors.
234 The read vector is used to extract data from all known shares,
235 *before* any writes have been applied. The same vector is used for
236 all shares. This captures the state that was tested by the test
239 This method returns two values: a boolean and a dict. The boolean is
240 True if the write vectors were applied, False if not. The dict is
241 keyed by share number, and each value contains a list of strings, one
242 for each element of the read vector.
244 If the write_enabler is wrong, this will raise BadWriteEnablerError.
245 To enable share migration (using update_write_enabler), the exception
246 will have the nodeid used for the old write enabler embedded in it,
247 in the following string::
249 The write enabler was recorded by nodeid '%s'.
251 Note that the nodeid here is encoded using the same base32 encoding
252 used by Foolscap and allmydata.util.idlib.nodeid_b2a().
255 return TupleOf(bool, DictOf(int, ReadData))
257 def advise_corrupt_share(share_type=str, storage_index=StorageIndex,
258 shnum=int, reason=str):
259 """Clients who discover hash failures in shares that they have
260 downloaded from me will use this method to inform me about the
261 failures. I will record their concern so that my operator can
262 manually inspect the shares in question. I return None.
264 'share_type' is either 'mutable' or 'immutable'. 'storage_index' is a
265 (binary) storage index string, and 'shnum' is the integer share
266 number. 'reason' is a human-readable explanation of the problem,
267 probably including some expected hash values and the computed ones
268 which did not match. Corruption advisories for mutable shares should
269 include a hash of the public key (the same value that appears in the
270 mutable-file verify-cap), since the current share format does not
274 class IStorageBucketWriter(Interface):
276 Objects of this kind live on the client side.
278 def put_block(segmentnum=int, data=ShareData):
279 """@param data: For most segments, this data will be 'blocksize'
280 bytes in length. The last segment might be shorter.
281 @return: a Deferred that fires (with None) when the operation completes
284 def put_plaintext_hashes(hashes=ListOf(Hash)):
286 @return: a Deferred that fires (with None) when the operation completes
289 def put_crypttext_hashes(hashes=ListOf(Hash)):
291 @return: a Deferred that fires (with None) when the operation completes
294 def put_block_hashes(blockhashes=ListOf(Hash)):
296 @return: a Deferred that fires (with None) when the operation completes
299 def put_share_hashes(sharehashes=ListOf(TupleOf(int, Hash))):
301 @return: a Deferred that fires (with None) when the operation completes
304 def put_uri_extension(data=URIExtensionData):
305 """This block of data contains integrity-checking information (hashes
306 of plaintext, crypttext, and shares), as well as encoding parameters
307 that are necessary to recover the data. This is a serialized dict
308 mapping strings to other strings. The hash of this data is kept in
309 the URI and verified before any of the data is used. All buckets for
310 a given file contain identical copies of this data.
312 The serialization format is specified with the following pseudocode:
313 for k in sorted(dict.keys()):
314 assert re.match(r'^[a-zA-Z_\-]+$', k)
315 write(k + ':' + netstring(dict[k]))
317 @return: a Deferred that fires (with None) when the operation completes
321 """Finish writing and close the bucket. The share is not finalized
322 until this method is called: if the uploading client disconnects
323 before calling close(), the partially-written share will be
326 @return: a Deferred that fires (with None) when the operation completes
329 class IStorageBucketReader(Interface):
331 def get_block_data(blocknum=int, blocksize=int, size=int):
332 """Most blocks will be the same size. The last block might be shorter
338 def get_crypttext_hashes():
340 @return: ListOf(Hash)
343 def get_block_hashes(at_least_these=SetOf(int)):
345 @return: ListOf(Hash)
348 def get_share_hashes(at_least_these=SetOf(int)):
350 @return: ListOf(TupleOf(int, Hash))
353 def get_uri_extension():
355 @return: URIExtensionData
358 class IStorageBroker(Interface):
359 def get_servers_for_psi(peer_selection_index):
361 @return: list of IServer instances
363 def get_connected_servers():
365 @return: frozenset of connected IServer instances
367 def get_known_servers():
369 @return: frozenset of IServer instances
371 def get_all_serverids():
373 @return: frozenset of serverid strings
375 def get_nickname_for_serverid(serverid):
377 @return: unicode nickname, or None
380 # methods moved from IntroducerClient, need review
381 def get_all_connections():
382 """Return a frozenset of (nodeid, service_name, rref) tuples, one for
383 each active connection we've established to a remote service. This is
384 mostly useful for unit tests that need to wait until a certain number
385 of connections have been made."""
387 def get_all_connectors():
388 """Return a dict that maps from (nodeid, service_name) to a
389 RemoteServiceConnector instance for all services that we are actively
390 trying to connect to. Each RemoteServiceConnector has the following
393 service_name: the type of service provided, like 'storage'
394 announcement_time: when we first heard about this service
395 last_connect_time: when we last established a connection
396 last_loss_time: when we last lost a connection
398 version: the peer's version, from the most recent connection
399 oldest_supported: the peer's oldest supported version, same
401 rref: the RemoteReference, if connected, otherwise None
402 remote_host: the IAddress, if connected, otherwise None
404 This method is intended for monitoring interfaces, such as a web page
405 which describes connecting and connected peers.
408 def get_all_peerids():
409 """Return a frozenset of all peerids to whom we have a connection (to
410 one or more services) established. Mostly useful for unit tests."""
412 def get_all_connections_for(service_name):
413 """Return a frozenset of (nodeid, service_name, rref) tuples, one
414 for each active connection that provides the given SERVICE_NAME."""
416 def get_permuted_peers(service_name, key):
417 """Returns an ordered list of (peerid, rref) tuples, selecting from
418 the connections that provide SERVICE_NAME, using a hash-based
419 permutation keyed by KEY. This randomizes the service list in a
420 repeatable way, to distribute load over many peers.
423 class IDisplayableServer(Interface):
431 class IServer(IDisplayableServer):
432 """I live in the client, and represent a single server."""
433 def start_connecting(tub, trigger_cb):
439 class IMutableSlotWriter(Interface):
441 The interface for a writer around a mutable slot on a remote server.
443 def set_checkstring(checkstring, *args):
445 Set the checkstring that I will pass to the remote server when
448 @param checkstring A packed checkstring to use.
450 Note that implementations can differ in which semantics they
451 wish to support for set_checkstring -- they can, for example,
452 build the checkstring themselves from its constituents, or
456 def get_checkstring():
458 Get the checkstring that I think currently exists on the remote
462 def put_block(data, segnum, salt):
464 Add a block and salt to the share.
467 def put_encprivkey(encprivkey):
469 Add the encrypted private key to the share.
472 def put_blockhashes(blockhashes=list):
474 Add the block hash tree to the share.
477 def put_sharehashes(sharehashes=dict):
479 Add the share hash chain to the share.
484 Return the part of the share that needs to be signed.
487 def put_signature(signature):
489 Add the signature to the share.
492 def put_verification_key(verification_key):
494 Add the verification key to the share.
497 def finish_publishing():
499 Do anything necessary to finish writing the share to a remote
500 server. I require that no further publishing needs to take place
501 after this method has been called.
505 class IURI(Interface):
506 def init_from_string(uri):
507 """Accept a string (as created by my to_string() method) and populate
508 this instance with its data. I am not normally called directly,
509 please use the module-level uri.from_string() function to convert
510 arbitrary URI strings into IURI-providing instances."""
513 """Return False if this URI be used to modify the data. Return True
514 if this URI cannot be used to modify the data."""
517 """Return True if the data can be modified by *somebody* (perhaps
518 someone who has a more powerful URI than this one)."""
520 # TODO: rename to get_read_cap()
522 """Return another IURI instance, which represents a read-only form of
523 this one. If is_readonly() is True, this returns self."""
525 def get_verify_cap():
526 """Return an instance that provides IVerifierURI, which can be used
527 to check on the availability of the file or directory, without
528 providing enough capabilities to actually read or modify the
529 contents. This may return None if the file does not need checking or
530 verification (e.g. LIT URIs).
534 """Return a string of printable ASCII characters, suitable for
535 passing into init_from_string."""
537 class IVerifierURI(Interface, IURI):
538 def init_from_string(uri):
539 """Accept a string (as created by my to_string() method) and populate
540 this instance with its data. I am not normally called directly,
541 please use the module-level uri.from_string() function to convert
542 arbitrary URI strings into IURI-providing instances."""
545 """Return a string of printable ASCII characters, suitable for
546 passing into init_from_string."""
548 class IDirnodeURI(Interface):
549 """I am a URI which represents a dirnode."""
551 class IFileURI(Interface):
552 """I am a URI which represents a filenode."""
554 """Return the length (in bytes) of the file that I represent."""
556 class IImmutableFileURI(IFileURI):
559 class IMutableFileURI(Interface):
562 class IDirectoryURI(Interface):
565 class IReadonlyDirectoryURI(Interface):
568 class CapConstraintError(Exception):
569 """A constraint on a cap was violated."""
571 class MustBeDeepImmutableError(CapConstraintError):
572 """Mutable children cannot be added to an immutable directory.
573 Also, caps obtained from an immutable directory can trigger this error
574 if they are later found to refer to a mutable object and then used."""
576 class MustBeReadonlyError(CapConstraintError):
577 """Known write caps cannot be specified in a ro_uri field. Also,
578 caps obtained from a ro_uri field can trigger this error if they
579 are later found to be write caps and then used."""
581 class MustNotBeUnknownRWError(CapConstraintError):
582 """Cannot add an unknown child cap specified in a rw_uri field."""
585 class IReadable(Interface):
586 """I represent a readable object -- either an immutable file, or a
587 specific version of a mutable file.
591 """Return True if this reference provides mutable access to the given
592 file or directory (i.e. if you can modify it), or False if not. Note
593 that even if this reference is read-only, someone else may hold a
594 read-write reference to it.
596 For an IReadable returned by get_best_readable_version(), this will
597 always return True, but for instances of subinterfaces such as
598 IMutableFileVersion, it may return False."""
601 """Return True if this file or directory is mutable (by *somebody*,
602 not necessarily you), False if it is is immutable. Note that a file
603 might be mutable overall, but your reference to it might be
604 read-only. On the other hand, all references to an immutable file
605 will be read-only; there are no read-write references to an immutable
608 def get_storage_index():
609 """Return the storage index of the file."""
612 """Return the length (in bytes) of this readable object."""
614 def download_to_data():
615 """Download all of the file contents. I return a Deferred that fires
616 with the contents as a byte string."""
618 def read(consumer, offset=0, size=None):
619 """Download a portion (possibly all) of the file's contents, making
620 them available to the given IConsumer. Return a Deferred that fires
621 (with the consumer) when the consumer is unregistered (either because
622 the last byte has been given to it, or because the consumer threw an
623 exception during write(), possibly because it no longer wants to
624 receive data). The portion downloaded will start at 'offset' and
625 contain 'size' bytes (or the remainder of the file if size==None).
627 The consumer will be used in non-streaming mode: an IPullProducer
628 will be attached to it.
630 The consumer will not receive data right away: several network trips
631 must occur first. The order of events will be::
633 consumer.registerProducer(p, streaming)
634 (if streaming == False)::
635 consumer does p.resumeProducing()
637 consumer does p.resumeProducing()
638 consumer.write(data).. (repeat until all data is written)
639 consumer.unregisterProducer()
640 deferred.callback(consumer)
642 If a download error occurs, or an exception is raised by
643 consumer.registerProducer() or consumer.write(), I will call
644 consumer.unregisterProducer() and then deliver the exception via
645 deferred.errback(). To cancel the download, the consumer should call
646 p.stopProducing(), which will result in an exception being delivered
647 via deferred.errback().
649 See src/allmydata/util/consumer.py for an example of a simple
650 download-to-memory consumer.
654 class IWriteable(Interface):
656 I define methods that callers can use to update SDMF and MDMF
657 mutable files on a Tahoe-LAFS grid.
659 # XXX: For the moment, we have only this. It is possible that we
660 # want to move overwrite() and modify() in here too.
661 def update(data, offset):
663 I write the data from my data argument to the MDMF file,
664 starting at offset. I continue writing data until my data
665 argument is exhausted, appending data to the file as necessary.
667 # assert IMutableUploadable.providedBy(data)
668 # to append data: offset=node.get_size_of_best_version()
669 # do we want to support compacting MDMF?
670 # for an MDMF file, this can be done with O(data.get_size())
671 # memory. For an SDMF file, any modification takes
672 # O(node.get_size_of_best_version()).
675 class IMutableFileVersion(IReadable):
676 """I provide access to a particular version of a mutable file. The
677 access is read/write if I was obtained from a filenode derived from
678 a write cap, or read-only if the filenode was derived from a read cap.
681 def get_sequence_number():
682 """Return the sequence number of this version."""
685 """Return the IMutableFileServerMap instance that was used to create
690 """Return this filenode's writekey, or None if the node does not have
691 write-capability. This may be used to assist with data structures
692 that need to make certain data available only to writers, such as the
693 read-write child caps in dirnodes. The recommended process is to have
694 reader-visible data be submitted to the filenode in the clear (where
695 it will be encrypted by the filenode using the readkey), but encrypt
696 writer-visible data using this writekey.
699 # TODO: Can this be overwrite instead of replace?
700 def replace(new_contents):
701 """Replace the contents of the mutable file, provided that no other
702 node has published (or is attempting to publish, concurrently) a
703 newer version of the file than this one.
705 I will avoid modifying any share that is different than the version
706 given by get_sequence_number(). However, if another node is writing
707 to the file at the same time as me, I may manage to update some shares
708 while they update others. If I see any evidence of this, I will signal
709 UncoordinatedWriteError, and the file will be left in an inconsistent
710 state (possibly the version you provided, possibly the old version,
711 possibly somebody else's version, and possibly a mix of shares from
714 The recommended response to UncoordinatedWriteError is to either
715 return it to the caller (since they failed to coordinate their
716 writes), or to attempt some sort of recovery. It may be sufficient to
717 wait a random interval (with exponential backoff) and repeat your
718 operation. If I do not signal UncoordinatedWriteError, then I was
719 able to write the new version without incident.
721 I return a Deferred that fires (with a PublishStatus object) when the
722 update has completed.
725 def modify(modifier_cb):
726 """Modify the contents of the file, by downloading this version,
727 applying the modifier function (or bound method), then uploading
728 the new version. This will succeed as long as no other node
729 publishes a version between the download and the upload.
730 I return a Deferred that fires (with a PublishStatus object) when
731 the update is complete.
733 The modifier callable will be given three arguments: a string (with
734 the old contents), a 'first_time' boolean, and a servermap. As with
735 download_to_data(), the old contents will be from this version,
736 but the modifier can use the servermap to make other decisions
737 (such as refusing to apply the delta if there are multiple parallel
738 versions, or if there is evidence of a newer unrecoverable version).
739 'first_time' will be True the first time the modifier is called,
740 and False on any subsequent calls.
742 The callable should return a string with the new contents. The
743 callable must be prepared to be called multiple times, and must
744 examine the input string to see if the change that it wants to make
745 is already present in the old version. If it does not need to make
746 any changes, it can either return None, or return its input string.
748 If the modifier raises an exception, it will be returned in the
753 # The hierarchy looks like this:
760 class IFilesystemNode(Interface):
762 """Return the strongest 'cap instance' associated with this node.
763 (writecap for writeable-mutable files/directories, readcap for
764 immutable or readonly-mutable files/directories). To convert this
765 into a string, call .to_string() on the result."""
768 """Return a readonly cap instance for this node. For immutable or
769 readonly nodes, get_cap() and get_readcap() return the same thing."""
771 def get_repair_cap():
772 """Return an IURI instance that can be used to repair the file, or
773 None if this node cannot be repaired (either because it is not
774 distributed, like a LIT file, or because the node does not represent
775 sufficient authority to create a repair-cap, like a read-only RSA
776 mutable file node [which cannot create the correct write-enablers]).
779 def get_verify_cap():
780 """Return an IVerifierURI instance that represents the
781 'verifiy/refresh capability' for this node. The holder of this
782 capability will be able to renew the lease for this node, protecting
783 it from garbage-collection. They will also be able to ask a server if
784 it holds a share for the file or directory.
788 """Return the URI string corresponding to the strongest cap associated
789 with this node. If this node is read-only, the URI will only offer
790 read-only access. If this node is read-write, the URI will offer
793 If you have read-write access to a node and wish to share merely
794 read-only access with others, use get_readonly_uri().
798 """Return the URI string that can be used by others to get write
799 access to this node, if it is writeable. If this is a read-only node,
802 def get_readonly_uri():
803 """Return the URI string that can be used by others to get read-only
804 access to this node. The result is a read-only URI, regardless of
805 whether this node is read-only or read-write.
807 If you have merely read-only access to this node, get_readonly_uri()
808 will return the same thing as get_uri().
811 def get_storage_index():
812 """Return a string with the (binary) storage index in use on this
813 download. This may be None if there is no storage index (i.e. LIT
814 files and directories)."""
817 """Return True if this reference provides mutable access to the given
818 file or directory (i.e. if you can modify it), or False if not. Note
819 that even if this reference is read-only, someone else may hold a
820 read-write reference to it."""
823 """Return True if this file or directory is mutable (by *somebody*,
824 not necessarily you), False if it is is immutable. Note that a file
825 might be mutable overall, but your reference to it might be
826 read-only. On the other hand, all references to an immutable file
827 will be read-only; there are no read-write references to an immutable
832 """Return True if this is an unknown node."""
834 def is_allowed_in_immutable_directory():
835 """Return True if this node is allowed as a child of a deep-immutable
836 directory. This is true if either the node is of a known-immutable type,
837 or it is unknown and read-only.
841 """Raise any error associated with this node."""
843 # XXX: These may not be appropriate outside the context of an IReadable.
845 """Return the length (in bytes) of the data this node represents. For
846 directory nodes, I return the size of the backing store. I return
847 synchronously and do not consult the network, so for mutable objects,
848 I will return the most recently observed size for the object, or None
849 if I don't remember a size. Use get_current_size, which returns a
850 Deferred, if you want more up-to-date information."""
852 def get_current_size():
853 """I return a Deferred that fires with the length (in bytes) of the
854 data this node represents.
857 class IFileNode(IFilesystemNode):
858 """I am a node which represents a file: a sequence of bytes. I am not a
859 container, like IDirectoryNode."""
860 def get_best_readable_version():
861 """Return a Deferred that fires with an IReadable for the 'best'
862 available version of the file. The IReadable provides only read
863 access, even if this filenode was derived from a write cap.
865 For an immutable file, there is only one version. For a mutable
866 file, the 'best' version is the recoverable version with the
867 highest sequence number. If no uncoordinated writes have occurred,
868 and if enough shares are available, then this will be the most
869 recent version that has been uploaded. If no version is recoverable,
870 the Deferred will errback with an UnrecoverableFileError.
873 def download_best_version():
874 """Download the contents of the version that would be returned
875 by get_best_readable_version(). This is equivalent to calling
876 download_to_data() on the IReadable given by that method.
878 I return a Deferred that fires with a byte string when the file
879 has been fully downloaded. To support streaming download, use
880 the 'read' method of IReadable. If no version is recoverable,
881 the Deferred will errback with an UnrecoverableFileError.
884 def get_size_of_best_version():
885 """Find the size of the version that would be returned by
886 get_best_readable_version().
888 I return a Deferred that fires with an integer. If no version
889 is recoverable, the Deferred will errback with an
890 UnrecoverableFileError.
894 class IImmutableFileNode(IFileNode, IReadable):
895 """I am a node representing an immutable file. Immutable files have
899 class IMutableFileNode(IFileNode):
900 """I provide access to a 'mutable file', which retains its identity
901 regardless of what contents are put in it.
903 The consistency-vs-availability problem means that there might be
904 multiple versions of a file present in the grid, some of which might be
905 unrecoverable (i.e. have fewer than 'k' shares). These versions are
906 loosely ordered: each has a sequence number and a hash, and any version
907 with seqnum=N was uploaded by a node which has seen at least one version
910 The 'servermap' (an instance of IMutableFileServerMap) is used to
911 describe the versions that are known to be present in the grid, and which
912 servers are hosting their shares. It is used to represent the 'state of
913 the world', and is used for this purpose by my test-and-set operations.
914 Downloading the contents of the mutable file will also return a
915 servermap. Uploading a new version into the mutable file requires a
916 servermap as input, and the semantics of the replace operation is
917 'replace the file with my new version if it looks like nobody else has
918 changed the file since my previous download'. Because the file is
919 distributed, this is not a perfect test-and-set operation, but it will do
920 its best. If the replace process sees evidence of a simultaneous write,
921 it will signal an UncoordinatedWriteError, so that the caller can take
925 Most readers will want to use the 'best' current version of the file, and
926 should use my 'download_best_version()' method.
928 To unconditionally replace the file, callers should use overwrite(). This
929 is the mode that user-visible mutable files will probably use.
931 To apply some delta to the file, call modify() with a callable modifier
932 function that can apply the modification that you want to make. This is
933 the mode that dirnodes will use, since most directory modification
934 operations can be expressed in terms of deltas to the directory state.
937 Three methods are available for users who need to perform more complex
938 operations. The first is get_servermap(), which returns an up-to-date
939 servermap using a specified mode. The second is download_version(), which
940 downloads a specific version (not necessarily the 'best' one). The third
941 is 'upload', which accepts new contents and a servermap (which must have
942 been updated with MODE_WRITE). The upload method will attempt to apply
943 the new contents as long as no other node has modified the file since the
944 servermap was updated. This might be useful to a caller who wants to
945 merge multiple versions into a single new one.
947 Note that each time the servermap is updated, a specific 'mode' is used,
948 which determines how many peers are queried. To use a servermap for my
949 replace() method, that servermap must have been updated in MODE_WRITE.
950 These modes are defined in allmydata.mutable.common, and consist of
951 MODE_READ, MODE_WRITE, MODE_ANYTHING, and MODE_CHECK. Please look in
952 allmydata/mutable/servermap.py for details about the differences.
954 Mutable files are currently limited in size (about 3.5MB max) and can
955 only be retrieved and updated all-at-once, as a single big string. Future
956 versions of our mutable files will remove this restriction.
958 def get_best_mutable_version():
959 """Return a Deferred that fires with an IMutableFileVersion for
960 the 'best' available version of the file. The best version is
961 the recoverable version with the highest sequence number. If no
962 uncoordinated writes have occurred, and if enough shares are
963 available, then this will be the most recent version that has
966 If no version is recoverable, the Deferred will errback with an
967 UnrecoverableFileError.
970 def overwrite(new_contents):
971 """Unconditionally replace the contents of the mutable file with new
972 ones. This simply chains get_servermap(MODE_WRITE) and upload(). This
973 is only appropriate to use when the new contents of the file are
974 completely unrelated to the old ones, and you do not care about other
977 I return a Deferred that fires (with a PublishStatus object) when the
978 update has completed.
981 def modify(modifier_cb):
982 """Modify the contents of the file, by downloading the current
983 version, applying the modifier function (or bound method), then
984 uploading the new version. I return a Deferred that fires (with a
985 PublishStatus object) when the update is complete.
987 The modifier callable will be given three arguments: a string (with
988 the old contents), a 'first_time' boolean, and a servermap. As with
989 download_best_version(), the old contents will be from the best
990 recoverable version, but the modifier can use the servermap to make
991 other decisions (such as refusing to apply the delta if there are
992 multiple parallel versions, or if there is evidence of a newer
993 unrecoverable version). 'first_time' will be True the first time the
994 modifier is called, and False on any subsequent calls.
996 The callable should return a string with the new contents. The
997 callable must be prepared to be called multiple times, and must
998 examine the input string to see if the change that it wants to make
999 is already present in the old version. If it does not need to make
1000 any changes, it can either return None, or return its input string.
1002 If the modifier raises an exception, it will be returned in the
1006 def get_servermap(mode):
1007 """Return a Deferred that fires with an IMutableFileServerMap
1008 instance, updated using the given mode.
1011 def download_version(servermap, version):
1012 """Download a specific version of the file, using the servermap
1013 as a guide to where the shares are located.
1015 I return a Deferred that fires with the requested contents, or
1016 errbacks with UnrecoverableFileError. Note that a servermap which was
1017 updated with MODE_ANYTHING or MODE_READ may not know about shares for
1018 all versions (those modes stop querying servers as soon as they can
1019 fulfil their goals), so you may want to use MODE_CHECK (which checks
1020 everything) to get increased visibility.
1023 def upload(new_contents, servermap):
1024 """Replace the contents of the file with new ones. This requires a
1025 servermap that was previously updated with MODE_WRITE.
1027 I attempt to provide test-and-set semantics, in that I will avoid
1028 modifying any share that is different than the version I saw in the
1029 servermap. However, if another node is writing to the file at the
1030 same time as me, I may manage to update some shares while they update
1031 others. If I see any evidence of this, I will signal
1032 UncoordinatedWriteError, and the file will be left in an inconsistent
1033 state (possibly the version you provided, possibly the old version,
1034 possibly somebody else's version, and possibly a mix of shares from
1037 The recommended response to UncoordinatedWriteError is to either
1038 return it to the caller (since they failed to coordinate their
1039 writes), or to attempt some sort of recovery. It may be sufficient to
1040 wait a random interval (with exponential backoff) and repeat your
1041 operation. If I do not signal UncoordinatedWriteError, then I was
1042 able to write the new version without incident.
1044 I return a Deferred that fires (with a PublishStatus object) when the
1045 publish has completed. I will update the servermap in-place with the
1046 location of all new shares.
1050 """Return this filenode's writekey, or None if the node does not have
1051 write-capability. This may be used to assist with data structures
1052 that need to make certain data available only to writers, such as the
1053 read-write child caps in dirnodes. The recommended process is to have
1054 reader-visible data be submitted to the filenode in the clear (where
1055 it will be encrypted by the filenode using the readkey), but encrypt
1056 writer-visible data using this writekey.
1060 """Returns the mutable file protocol version."""
1062 class NotEnoughSharesError(Exception):
1063 """Download was unable to get enough shares"""
1065 class NoSharesError(Exception):
1066 """Download was unable to get any shares at all."""
1068 class DownloadStopped(Exception):
1071 class UploadUnhappinessError(Exception):
1072 """Upload was unable to satisfy 'servers_of_happiness'"""
1074 class UnableToFetchCriticalDownloadDataError(Exception):
1075 """I was unable to fetch some piece of critical data which is supposed to
1076 be identically present in all shares."""
1078 class NoServersError(Exception):
1079 """Upload wasn't given any servers to work with, usually indicating a
1080 network or Introducer problem."""
1082 class ExistingChildError(Exception):
1083 """A directory node was asked to add or replace a child that already
1084 exists, and overwrite= was set to False."""
1086 class NoSuchChildError(Exception):
1087 """A directory node was asked to fetch a child which does not exist."""
1089 # avoid UnicodeEncodeErrors when converting to str
1090 return self.__repr__()
1092 class ChildOfWrongTypeError(Exception):
1093 """An operation was attempted on a child of the wrong type (file or directory)."""
1095 class IDirectoryNode(IFilesystemNode):
1096 """I represent a filesystem node that is a container, with a
1097 name-to-child mapping, holding the tahoe equivalent of a directory. All
1098 child names are unicode strings, and all children are some sort of
1099 IFilesystemNode (a file, subdirectory, or unknown node).
1104 The dirnode ('1') URI returned by this method can be used in
1105 set_uri() on a different directory ('2') to 'mount' a reference to
1106 this directory ('1') under the other ('2'). This URI is just a
1107 string, so it can be passed around through email or other out-of-band
1111 def get_readonly_uri():
1113 The dirnode ('1') URI returned by this method can be used in
1114 set_uri() on a different directory ('2') to 'mount' a reference to
1115 this directory ('1') under the other ('2'). This URI is just a
1116 string, so it can be passed around through email or other out-of-band
1121 """I return a Deferred that fires with a dictionary mapping child
1122 name (a unicode string) to (node, metadata_dict) tuples, in which
1123 'node' is an IFilesystemNode and 'metadata_dict' is a dictionary of
1126 def has_child(name):
1127 """I return a Deferred that fires with a boolean, True if there
1128 exists a child of the given name, False if not. The child name must
1129 be a unicode string."""
1132 """I return a Deferred that fires with a specific named child node,
1133 which is an IFilesystemNode. The child name must be a unicode string.
1134 I raise NoSuchChildError if I do not have a child by that name."""
1136 def get_metadata_for(name):
1137 """I return a Deferred that fires with the metadata dictionary for
1138 a specific named child node. The child name must be a unicode string.
1139 This metadata is stored in the *edge*, not in the child, so it is
1140 attached to the parent dirnode rather than the child node.
1141 I raise NoSuchChildError if I do not have a child by that name."""
1143 def set_metadata_for(name, metadata):
1144 """I replace any existing metadata for the named child with the new
1145 metadata. The child name must be a unicode string. This metadata is
1146 stored in the *edge*, not in the child, so it is attached to the
1147 parent dirnode rather than the child node. I return a Deferred
1148 (that fires with this dirnode) when the operation is complete.
1149 I raise NoSuchChildError if I do not have a child by that name."""
1151 def get_child_at_path(path):
1152 """Transform a child path into an IFilesystemNode.
1154 I perform a recursive series of 'get' operations to find the named
1155 descendant node. I return a Deferred that fires with the node, or
1156 errbacks with NoSuchChildError if the node could not be found.
1158 The path can be either a single string (slash-separated) or a list of
1159 path-name elements. All elements must be unicode strings.
1162 def get_child_and_metadata_at_path(path):
1163 """Transform a child path into an IFilesystemNode and metadata.
1165 I am like get_child_at_path(), but my Deferred fires with a tuple of
1166 (node, metadata). The metadata comes from the last edge. If the path
1167 is empty, the metadata will be an empty dictionary.
1170 def set_uri(name, writecap, readcap=None, metadata=None, overwrite=True):
1171 """I add a child (by writecap+readcap) at the specific name. I return
1172 a Deferred that fires when the operation finishes. If overwrite= is
1173 True, I will replace any existing child of the same name, otherwise
1174 an existing child will cause me to return ExistingChildError. The
1175 child name must be a unicode string.
1177 The child caps could be for a file, or for a directory. If you have
1178 both the writecap and readcap, you should provide both arguments.
1179 If you have only one cap and don't know whether it is read-only,
1180 provide it as the writecap argument and leave the readcap as None.
1181 If you have only one cap that is known to be read-only, provide it
1182 as the readcap argument and leave the writecap as None.
1183 The filecaps are typically obtained from an IFilesystemNode with
1184 get_uri() and get_readonly_uri().
1186 If metadata= is provided, I will use it as the metadata for the named
1187 edge. This will replace any existing metadata. If metadata= is left
1188 as the default value of None, I will set ['mtime'] to the current
1189 time, and I will set ['ctime'] to the current time if there was not
1190 already a child by this name present. This roughly matches the
1191 ctime/mtime semantics of traditional filesystems. See the
1192 "About the metadata" section of webapi.txt for futher information.
1194 If this directory node is read-only, the Deferred will errback with a
1195 NotWriteableError."""
1197 def set_children(entries, overwrite=True):
1198 """Add multiple children (by writecap+readcap) to a directory node.
1199 Takes a dictionary, with childname as keys and (writecap, readcap)
1200 tuples (or (writecap, readcap, metadata) triples) as values. Returns
1201 a Deferred that fires (with this dirnode) when the operation
1202 finishes. This is equivalent to calling set_uri() multiple times, but
1203 is much more efficient. All child names must be unicode strings.
1206 def set_node(name, child, metadata=None, overwrite=True):
1207 """I add a child at the specific name. I return a Deferred that fires
1208 when the operation finishes. This Deferred will fire with the child
1209 node that was just added. I will replace any existing child of the
1210 same name. The child name must be a unicode string. The 'child'
1211 instance must be an instance providing IFilesystemNode.
1213 If metadata= is provided, I will use it as the metadata for the named
1214 edge. This will replace any existing metadata. If metadata= is left
1215 as the default value of None, I will set ['mtime'] to the current
1216 time, and I will set ['ctime'] to the current time if there was not
1217 already a child by this name present. This roughly matches the
1218 ctime/mtime semantics of traditional filesystems. See the
1219 "About the metadata" section of webapi.txt for futher information.
1221 If this directory node is read-only, the Deferred will errback with a
1222 NotWriteableError."""
1224 def set_nodes(entries, overwrite=True):
1225 """Add multiple children to a directory node. Takes a dict mapping
1226 unicode childname to (child_node, metdata) tuples. If metdata=None,
1227 the original metadata is left unmodified. Returns a Deferred that
1228 fires (with this dirnode) when the operation finishes. This is
1229 equivalent to calling set_node() multiple times, but is much more
1232 def add_file(name, uploadable, metadata=None, overwrite=True):
1233 """I upload a file (using the given IUploadable), then attach the
1234 resulting ImmutableFileNode to the directory at the given name. I set
1235 metadata the same way as set_uri and set_node. The child name must be
1238 I return a Deferred that fires (with the IFileNode of the uploaded
1239 file) when the operation completes."""
1241 def delete(name, must_exist=True, must_be_directory=False, must_be_file=False):
1242 """I remove the child at the specific name. I return a Deferred that
1243 fires when the operation finishes. The child name must be a unicode
1244 string. If must_exist is True and I do not have a child by that name,
1245 I raise NoSuchChildError. If must_be_directory is True and the child
1246 is a file, or if must_be_file is True and the child is a directory,
1247 I raise ChildOfWrongTypeError."""
1249 def create_subdirectory(name, initial_children={}, overwrite=True, metadata=None):
1250 """I create and attach a directory at the given name. The new
1251 directory can be empty, or it can be populated with children
1252 according to 'initial_children', which takes a dictionary in the same
1253 format as set_nodes (i.e. mapping unicode child name to (childnode,
1254 metadata) tuples). The child name must be a unicode string. I return
1255 a Deferred that fires (with the new directory node) when the
1256 operation finishes."""
1258 def move_child_to(current_child_name, new_parent, new_child_name=None,
1260 """I take one of my children and move them to a new parent. The child
1261 is referenced by name. On the new parent, the child will live under
1262 'new_child_name', which defaults to 'current_child_name'. TODO: what
1263 should we do about metadata? I return a Deferred that fires when the
1264 operation finishes. The child name must be a unicode string. I raise
1265 NoSuchChildError if I do not have a child by that name."""
1267 def build_manifest():
1268 """I generate a table of everything reachable from this directory.
1269 I also compute deep-stats as described below.
1271 I return a Monitor. The Monitor's results will be a dictionary with
1274 res['manifest']: a list of (path, cap) tuples for all nodes
1275 (directories and files) reachable from this one.
1276 'path' will be a tuple of unicode strings. The
1277 origin dirnode will be represented by an empty path
1279 res['verifycaps']: a list of (printable) verifycap strings, one for
1280 each reachable non-LIT node. This is a set:
1281 it will contain no duplicates.
1282 res['storage-index']: a list of (base32) storage index strings,
1283 one for each reachable non-LIT node. This is
1284 a set: it will contain no duplicates.
1285 res['stats']: a dictionary, the same that is generated by
1286 start_deep_stats() below.
1288 The Monitor will also have an .origin_si attribute with the (binary)
1289 storage index of the starting point.
1292 def start_deep_stats():
1293 """Return a Monitor, examining all nodes (directories and files)
1294 reachable from this one. The Monitor's results will be a dictionary
1295 with the following keys::
1297 count-immutable-files: count of how many CHK files are in the set
1298 count-mutable-files: same, for mutable files (does not include
1300 count-literal-files: same, for LIT files
1301 count-files: sum of the above three
1303 count-directories: count of directories
1305 size-immutable-files: total bytes for all CHK files in the set
1306 size-mutable-files (TODO): same, for current version of all mutable
1307 files, does not include directories
1308 size-literal-files: same, for LIT files
1309 size-directories: size of mutable files used by directories
1311 largest-directory: number of bytes in the largest directory
1312 largest-directory-children: number of children in the largest
1314 largest-immutable-file: number of bytes in the largest CHK file
1316 size-mutable-files is not yet implemented, because it would involve
1317 even more queries than deep_stats does.
1319 The Monitor will also have an .origin_si attribute with the (binary)
1320 storage index of the starting point.
1322 This operation will visit every directory node underneath this one,
1323 and can take a long time to run. On a typical workstation with good
1324 bandwidth, this can examine roughly 15 directories per second (and
1325 takes several minutes of 100% CPU for ~1700 directories).
1328 class ICodecEncoder(Interface):
1329 def set_params(data_size, required_shares, max_shares):
1330 """Set up the parameters of this encoder.
1332 This prepares the encoder to perform an operation that converts a
1333 single block of data into a number of shares, such that a future
1334 ICodecDecoder can use a subset of these shares to recover the
1335 original data. This operation is invoked by calling encode(). Once
1336 the encoding parameters are set up, the encode operation can be
1337 invoked multiple times.
1339 set_params() prepares the encoder to accept blocks of input data that
1340 are exactly 'data_size' bytes in length. The encoder will be prepared
1341 to produce 'max_shares' shares for each encode() operation (although
1342 see the 'desired_share_ids' to use less CPU). The encoding math will
1343 be chosen such that the decoder can get by with as few as
1344 'required_shares' of these shares and still reproduce the original
1345 data. For example, set_params(1000, 5, 5) offers no redundancy at
1346 all, whereas set_params(1000, 1, 10) provides 10x redundancy.
1348 Numerical Restrictions: 'data_size' is required to be an integral
1349 multiple of 'required_shares'. In general, the caller should choose
1350 required_shares and max_shares based upon their reliability
1351 requirements and the number of peers available (the total storage
1352 space used is roughly equal to max_shares*data_size/required_shares),
1353 then choose data_size to achieve the memory footprint desired (larger
1354 data_size means more efficient operation, smaller data_size means
1355 smaller memory footprint).
1357 In addition, 'max_shares' must be equal to or greater than
1358 'required_shares'. Of course, setting them to be equal causes
1359 encode() to degenerate into a particularly slow form of the 'split'
1362 See encode() for more details about how these parameters are used.
1364 set_params() must be called before any other ICodecEncoder methods
1369 """Return the 3-tuple of data_size, required_shares, max_shares"""
1371 def get_encoder_type():
1372 """Return a short string that describes the type of this encoder.
1374 There is required to be a global table of encoder classes. This method
1375 returns an index into this table; the value at this index is an
1376 encoder class, and this encoder is an instance of that class.
1379 def get_block_size():
1380 """Return the length of the shares that encode() will produce.
1383 def encode_proposal(data, desired_share_ids=None):
1384 """Encode some data.
1386 'data' must be a string (or other buffer object), and len(data) must
1387 be equal to the 'data_size' value passed earlier to set_params().
1389 This will return a Deferred that will fire with two lists. The first
1390 is a list of shares, each of which is a string (or other buffer
1391 object) such that len(share) is the same as what get_share_size()
1392 returned earlier. The second is a list of shareids, in which each is
1393 an integer. The lengths of the two lists will always be equal to each
1394 other. The user should take care to keep each share closely
1395 associated with its shareid, as one is useless without the other.
1397 The length of this output list will normally be the same as the value
1398 provided to the 'max_shares' parameter of set_params(). This may be
1399 different if 'desired_share_ids' is provided.
1401 'desired_share_ids', if provided, is required to be a sequence of
1402 ints, each of which is required to be >= 0 and < max_shares. If not
1403 provided, encode() will produce 'max_shares' shares, as if
1404 'desired_share_ids' were set to range(max_shares). You might use this
1405 if you initially thought you were going to use 10 peers, started
1406 encoding, and then two of the peers dropped out: you could use
1407 desired_share_ids= to skip the work (both memory and CPU) of
1408 producing shares for the peers which are no longer available.
1412 def encode(inshares, desired_share_ids=None):
1413 """Encode some data. This may be called multiple times. Each call is
1416 inshares is a sequence of length required_shares, containing buffers
1417 (i.e. strings), where each buffer contains the next contiguous
1418 non-overlapping segment of the input data. Each buffer is required to
1419 be the same length, and the sum of the lengths of the buffers is
1420 required to be exactly the data_size promised by set_params(). (This
1421 implies that the data has to be padded before being passed to
1422 encode(), unless of course it already happens to be an even multiple
1423 of required_shares in length.)
1425 Note: the requirement to break up your data into
1426 'required_shares' chunks of exactly the right length before
1427 calling encode() is surprising from point of view of a user
1428 who doesn't know how FEC works. It feels like an
1429 implementation detail that has leaked outside the abstraction
1430 barrier. Is there a use case in which the data to be encoded
1431 might already be available in pre-segmented chunks, such that
1432 it is faster or less work to make encode() take a list rather
1433 than splitting a single string?
1435 Yes, there is: suppose you are uploading a file with K=64,
1436 N=128, segsize=262,144. Then each in-share will be of size
1437 4096. If you use this .encode() API then your code could first
1438 read each successive 4096-byte chunk from the file and store
1439 each one in a Python string and store each such Python string
1440 in a Python list. Then you could call .encode(), passing that
1441 list as "inshares". The encoder would generate the other 64
1442 "secondary shares" and return to you a new list containing
1443 references to the same 64 Python strings that you passed in
1444 (as the primary shares) plus references to the new 64 Python
1447 (You could even imagine that your code could use readv() so
1448 that the operating system can arrange to get all of those
1449 bytes copied from the file into the Python list of Python
1450 strings as efficiently as possible instead of having a loop
1451 written in C or in Python to copy the next part of the file
1452 into the next string.)
1454 On the other hand if you instead use the .encode_proposal()
1455 API (above), then your code can first read in all of the
1456 262,144 bytes of the segment from the file into a Python
1457 string, then call .encode_proposal() passing the segment data
1458 as the "data" argument. The encoder would basically first
1459 split the "data" argument into a list of 64 in-shares of 4096
1460 byte each, and then do the same thing that .encode() does. So
1461 this would result in a little bit more copying of data and a
1462 little bit higher of a "maximum memory usage" during the
1463 process, although it might or might not make a practical
1464 difference for our current use cases.
1466 Note that "inshares" is a strange name for the parameter if
1467 you think of the parameter as being just for feeding in data
1468 to the codec. It makes more sense if you think of the result
1469 of this encoding as being the set of shares from inshares plus
1470 an extra set of "secondary shares" (or "check shares"). It is
1471 a surprising name! If the API is going to be surprising then
1472 the name should be surprising. If we switch to
1473 encode_proposal() above then we should also switch to an
1476 'desired_share_ids', if provided, is required to be a sequence of
1477 ints, each of which is required to be >= 0 and < max_shares. If not
1478 provided, encode() will produce 'max_shares' shares, as if
1479 'desired_share_ids' were set to range(max_shares). You might use this
1480 if you initially thought you were going to use 10 peers, started
1481 encoding, and then two of the peers dropped out: you could use
1482 desired_share_ids= to skip the work (both memory and CPU) of
1483 producing shares for the peers which are no longer available.
1485 For each call, encode() will return a Deferred that fires with two
1486 lists, one containing shares and the other containing the shareids.
1487 The get_share_size() method can be used to determine the length of
1488 the share strings returned by encode(). Each shareid is a small
1489 integer, exactly as passed into 'desired_share_ids' (or
1490 range(max_shares), if desired_share_ids was not provided).
1492 The shares and their corresponding shareids are required to be kept
1493 together during storage and retrieval. Specifically, the share data is
1494 useless by itself: the decoder needs to be told which share is which
1495 by providing it with both the shareid and the actual share data.
1497 This function will allocate an amount of memory roughly equal to::
1499 (max_shares - required_shares) * get_share_size()
1501 When combined with the memory that the caller must allocate to
1502 provide the input data, this leads to a memory footprint roughly
1503 equal to the size of the resulting encoded shares (i.e. the expansion
1504 factor times the size of the input segment).
1509 # returning a list of (shareidN,shareN) tuples instead of a pair of
1510 # lists (shareids..,shares..). Brian thought the tuples would
1511 # encourage users to keep the share and shareid together throughout
1512 # later processing, Zooko pointed out that the code to iterate
1513 # through two lists is not really more complicated than using a list
1514 # of tuples and there's also a performance improvement
1516 # having 'data_size' not required to be an integral multiple of
1517 # 'required_shares'. Doing this would require encode() to perform
1518 # padding internally, and we'd prefer to have any padding be done
1519 # explicitly by the caller. Yes, it is an abstraction leak, but
1520 # hopefully not an onerous one.
1523 class ICodecDecoder(Interface):
1524 def set_params(data_size, required_shares, max_shares):
1525 """Set the params. They have to be exactly the same ones that were
1526 used for encoding."""
1528 def get_needed_shares():
1529 """Return the number of shares needed to reconstruct the data.
1530 set_params() is required to be called before this."""
1532 def decode(some_shares, their_shareids):
1533 """Decode a partial list of shares into data.
1535 'some_shares' is required to be a sequence of buffers of sharedata, a
1536 subset of the shares returned by ICodecEncode.encode(). Each share is
1537 required to be of the same length. The i'th element of their_shareids
1538 is required to be the shareid of the i'th buffer in some_shares.
1540 This returns a Deferred which fires with a sequence of buffers. This
1541 sequence will contain all of the segments of the original data, in
1542 order. The sum of the lengths of all of the buffers will be the
1543 'data_size' value passed into the original ICodecEncode.set_params()
1544 call. To get back the single original input block of data, use
1545 ''.join(output_buffers), or you may wish to simply write them in
1546 order to an output file.
1548 Note that some of the elements in the result sequence may be
1549 references to the elements of the some_shares input sequence. In
1550 particular, this means that if those share objects are mutable (e.g.
1551 arrays) and if they are changed, then both the input (the
1552 'some_shares' parameter) and the output (the value given when the
1553 deferred is triggered) will change.
1555 The length of 'some_shares' is required to be exactly the value of
1556 'required_shares' passed into the original ICodecEncode.set_params()
1560 class IEncoder(Interface):
1561 """I take an object that provides IEncryptedUploadable, which provides
1562 encrypted data, and a list of shareholders. I then encode, hash, and
1563 deliver shares to those shareholders. I will compute all the necessary
1564 Merkle hash trees that are necessary to validate the crypttext that
1565 eventually comes back from the shareholders. I provide the URI Extension
1566 Block Hash, and the encoding parameters, both of which must be included
1569 I do not choose shareholders, that is left to the IUploader. I must be
1570 given a dict of RemoteReferences to storage buckets that are ready and
1571 willing to receive data.
1575 """Specify the number of bytes that will be encoded. This must be
1576 peformed before get_serialized_params() can be called.
1578 def set_params(params):
1579 """Override the default encoding parameters. 'params' is a tuple of
1580 (k,d,n), where 'k' is the number of required shares, 'd' is the
1581 servers_of_happiness, and 'n' is the total number of shares that will
1584 Encoding parameters can be set in three ways. 1: The Encoder class
1585 provides defaults (3/7/10). 2: the Encoder can be constructed with
1586 an 'options' dictionary, in which the
1587 needed_and_happy_and_total_shares' key can be a (k,d,n) tuple. 3:
1588 set_params((k,d,n)) can be called.
1590 If you intend to use set_params(), you must call it before
1591 get_share_size or get_param are called.
1594 def set_encrypted_uploadable(u):
1595 """Provide a source of encrypted upload data. 'u' must implement
1596 IEncryptedUploadable.
1598 When this is called, the IEncryptedUploadable will be queried for its
1599 length and the storage_index that should be used.
1601 This returns a Deferred that fires with this Encoder instance.
1603 This must be performed before start() can be called.
1606 def get_param(name):
1607 """Return an encoding parameter, by name.
1609 'storage_index': return a string with the (16-byte truncated SHA-256
1610 hash) storage index to which these shares should be
1613 'share_counts': return a tuple describing how many shares are used:
1614 (needed_shares, servers_of_happiness, total_shares)
1616 'num_segments': return an int with the number of segments that
1619 'segment_size': return an int with the size of each segment.
1621 'block_size': return the size of the individual blocks that will
1622 be delivered to a shareholder's put_block() method. By
1623 knowing this, the shareholder will be able to keep all
1624 blocks in a single file and still provide random access
1625 when reading them. # TODO: can we avoid exposing this?
1627 'share_size': an int with the size of the data that will be stored
1628 on each shareholder. This is aggregate amount of data
1629 that will be sent to the shareholder, summed over all
1630 the put_block() calls I will ever make. It is useful to
1631 determine this size before asking potential
1632 shareholders whether they will grant a lease or not,
1633 since their answers will depend upon how much space we
1634 need. TODO: this might also include some amount of
1635 overhead, like the size of all the hashes. We need to
1636 decide whether this is useful or not.
1638 'serialized_params': a string with a concise description of the
1639 codec name and its parameters. This may be passed
1640 into the IUploadable to let it make sure that
1641 the same file encoded with different parameters
1642 will result in different storage indexes.
1644 Once this is called, set_size() and set_params() may not be called.
1647 def set_shareholders(shareholders, servermap):
1648 """Tell the encoder where to put the encoded shares. 'shareholders'
1649 must be a dictionary that maps share number (an integer ranging from
1650 0 to n-1) to an instance that provides IStorageBucketWriter.
1651 'servermap' is a dictionary that maps share number (as defined above)
1652 to a set of peerids. This must be performed before start() can be
1656 """Begin the encode/upload process. This involves reading encrypted
1657 data from the IEncryptedUploadable, encoding it, uploading the shares
1658 to the shareholders, then sending the hash trees.
1660 set_encrypted_uploadable() and set_shareholders() must be called
1661 before this can be invoked.
1663 This returns a Deferred that fires with a verify cap when the upload
1664 process is complete. The verifycap, plus the encryption key, is
1665 sufficient to construct the read cap.
1668 class IDecoder(Interface):
1669 """I take a list of shareholders and some setup information, then
1670 download, validate, decode, and decrypt data from them, writing the
1671 results to an output file.
1673 I do not locate the shareholders, that is left to the IDownloader. I must
1674 be given a dict of RemoteReferences to storage buckets that are ready to
1679 """I take a file-like object (providing write and close) to which all
1680 the plaintext data will be written.
1682 TODO: producer/consumer . Maybe write() should return a Deferred that
1683 indicates when it will accept more data? But probably having the
1684 IDecoder be a producer is easier to glue to IConsumer pieces.
1687 def set_shareholders(shareholders):
1688 """I take a dictionary that maps share identifiers (small integers)
1689 to RemoteReferences that provide RIBucketReader. This must be called
1693 """I start the download. This process involves retrieving data and
1694 hash chains from the shareholders, using the hashes to validate the
1695 data, decoding the shares into segments, decrypting the segments,
1696 then writing the resulting plaintext to the output file.
1698 I return a Deferred that will fire (with self) when the download is
1702 class IDownloadTarget(Interface):
1703 # Note that if the IDownloadTarget is also an IConsumer, the downloader
1704 # will register itself as a producer. This allows the target to invoke
1705 # downloader.pauseProducing, resumeProducing, and stopProducing.
1707 """Called before any calls to write() or close(). If an error
1708 occurs before any data is available, fail() may be called without
1709 a previous call to open().
1711 'size' is the length of the file being downloaded, in bytes."""
1714 """Output some data to the target."""
1716 """Inform the target that there is no more data to be written."""
1718 """fail() is called to indicate that the download has failed. 'why'
1719 is a Failure object indicating what went wrong. No further methods
1720 will be invoked on the IDownloadTarget after fail()."""
1721 def register_canceller(cb):
1722 """The CiphertextDownloader uses this to register a no-argument function
1723 that the target can call to cancel the download. Once this canceller
1724 is invoked, no further calls to write() or close() will be made."""
1726 """When the CiphertextDownloader is done, this finish() function will be
1727 called. Whatever it returns will be returned to the invoker of
1728 Downloader.download.
1731 class IDownloader(Interface):
1732 def download(uri, target):
1733 """Perform a CHK download, sending the data to the given target.
1734 'target' must provide IDownloadTarget.
1736 Returns a Deferred that fires (with the results of target.finish)
1737 when the download is finished, or errbacks if something went wrong."""
1739 class IEncryptedUploadable(Interface):
1740 def set_upload_status(upload_status):
1741 """Provide an IUploadStatus object that should be filled with status
1742 information. The IEncryptedUploadable is responsible for setting
1743 key-determination progress ('chk'), size, storage_index, and
1744 ciphertext-fetch progress. It may delegate some of this
1745 responsibility to others, in particular to the IUploadable."""
1748 """This behaves just like IUploadable.get_size()."""
1750 def get_all_encoding_parameters():
1751 """Return a Deferred that fires with a tuple of
1752 (k,happy,n,segment_size). The segment_size will be used as-is, and
1753 must match the following constraints: it must be a multiple of k, and
1754 it shouldn't be unreasonably larger than the file size (if
1755 segment_size is larger than filesize, the difference must be stored
1758 This usually passes through to the IUploadable method of the same
1761 The encoder strictly obeys the values returned by this method. To
1762 make an upload use non-default encoding parameters, you must arrange
1763 to control the values that this method returns.
1766 def get_storage_index():
1767 """Return a Deferred that fires with a 16-byte storage index.
1770 def read_encrypted(length, hash_only):
1771 """This behaves just like IUploadable.read(), but returns crypttext
1772 instead of plaintext. If hash_only is True, then this discards the
1773 data (and returns an empty list); this improves efficiency when
1774 resuming an interrupted upload (where we need to compute the
1775 plaintext hashes, but don't need the redundant encrypted data)."""
1777 def get_plaintext_hashtree_leaves(first, last, num_segments):
1778 """OBSOLETE; Get the leaf nodes of a merkle hash tree over the
1779 plaintext segments, i.e. get the tagged hashes of the given segments.
1780 The segment size is expected to be generated by the
1781 IEncryptedUploadable before any plaintext is read or ciphertext
1782 produced, so that the segment hashes can be generated with only a
1785 This returns a Deferred which fires with a sequence of hashes, using:
1787 tuple(segment_hashes[first:last])
1789 'num_segments' is used to assert that the number of segments that the
1790 IEncryptedUploadable handled matches the number of segments that the
1791 encoder was expecting.
1793 This method must not be called until the final byte has been read
1794 from read_encrypted(). Once this method is called, read_encrypted()
1795 can never be called again.
1798 def get_plaintext_hash():
1799 """OBSOLETE; Get the hash of the whole plaintext.
1801 This returns a Deferred which fires with a tagged SHA-256 hash of the
1802 whole plaintext, obtained from hashutil.plaintext_hash(data).
1806 """Just like IUploadable.close()."""
1808 class IUploadable(Interface):
1809 def set_upload_status(upload_status):
1810 """Provide an IUploadStatus object that should be filled with status
1811 information. The IUploadable is responsible for setting
1812 key-determination progress ('chk')."""
1814 def set_default_encoding_parameters(params):
1815 """Set the default encoding parameters, which must be a dict mapping
1816 strings to ints. The meaningful keys are 'k', 'happy', 'n', and
1817 'max_segment_size'. These might have an influence on the final
1818 encoding parameters returned by get_all_encoding_parameters(), if the
1819 Uploadable doesn't have more specific preferences.
1821 This call is optional: if it is not used, the Uploadable will use
1822 some built-in defaults. If used, this method must be called before
1823 any other IUploadable methods to have any effect.
1827 """Return a Deferred that will fire with the length of the data to be
1828 uploaded, in bytes. This will be called before the data is actually
1829 used, to compute encoding parameters.
1832 def get_all_encoding_parameters():
1833 """Return a Deferred that fires with a tuple of
1834 (k,happy,n,segment_size). The segment_size will be used as-is, and
1835 must match the following constraints: it must be a multiple of k, and
1836 it shouldn't be unreasonably larger than the file size (if
1837 segment_size is larger than filesize, the difference must be stored
1840 The relative values of k and n allow some IUploadables to request
1841 better redundancy than others (in exchange for consuming more space
1844 Larger values of segment_size reduce hash overhead, while smaller
1845 values reduce memory footprint and cause data to be delivered in
1846 smaller pieces (which may provide a smoother and more predictable
1847 download experience).
1849 The encoder strictly obeys the values returned by this method. To
1850 make an upload use non-default encoding parameters, you must arrange
1851 to control the values that this method returns. One way to influence
1852 them may be to call set_encoding_parameters() before calling
1853 get_all_encoding_parameters().
1856 def get_encryption_key():
1857 """Return a Deferred that fires with a 16-byte AES key. This key will
1858 be used to encrypt the data. The key will also be hashed to derive
1861 Uploadables which want to achieve convergence should hash their file
1862 contents and the serialized_encoding_parameters to form the key
1863 (which of course requires a full pass over the data). Uploadables can
1864 use the upload.ConvergentUploadMixin class to achieve this
1867 Uploadables which do not care about convergence (or do not wish to
1868 make multiple passes over the data) can simply return a
1869 strongly-random 16 byte string.
1871 get_encryption_key() may be called multiple times: the IUploadable is
1872 required to return the same value each time.
1876 """Return a Deferred that fires with a list of strings (perhaps with
1877 only a single element) which, when concatenated together, contain the
1878 next 'length' bytes of data. If EOF is near, this may provide fewer
1879 than 'length' bytes. The total number of bytes provided by read()
1880 before it signals EOF must equal the size provided by get_size().
1882 If the data must be acquired through multiple internal read
1883 operations, returning a list instead of a single string may help to
1884 reduce string copies. However, the length of the concatenated strings
1885 must equal the amount of data requested, unless EOF is encountered.
1886 Long reads, or short reads without EOF, are not allowed. read()
1887 should return the same amount of data as a local disk file read, just
1888 in a different shape and asynchronously.
1890 'length' will typically be equal to (min(get_size(),1MB)/req_shares),
1891 so a 10kB file means length=3kB, 100kB file means length=30kB,
1892 and >=1MB file means length=300kB.
1894 This method provides for a single full pass through the data. Later
1895 use cases may desire multiple passes or access to only parts of the
1896 data (such as a mutable file making small edits-in-place). This API
1897 will be expanded once those use cases are better understood.
1901 """The upload is finished, and whatever filehandle was in use may be
1905 class IMutableUploadable(Interface):
1907 I represent content that is due to be uploaded to a mutable filecap.
1909 # This is somewhat simpler than the IUploadable interface above
1910 # because mutable files do not need to be concerned with possibly
1911 # generating a CHK, nor with per-file keys. It is a subset of the
1912 # methods in IUploadable, though, so we could just as well implement
1913 # the mutable uploadables as IUploadables that don't happen to use
1914 # those methods (with the understanding that the unused methods will
1915 # never be called on such objects)
1918 Returns a Deferred that fires with the size of the content held
1924 Returns a list of strings which, when concatenated, are the next
1925 length bytes of the file, or fewer if there are fewer bytes
1926 between the current location and the end of the file.
1931 The process that used the Uploadable is finished using it, so
1932 the uploadable may be closed.
1935 class IUploadResults(Interface):
1936 """I am returned by immutable upload() methods and contain the results of
1939 Note that some of my methods return empty values (0 or an empty dict)
1940 when called for non-distributed LIT files."""
1942 def get_file_size():
1943 """Return the file size, in bytes."""
1945 """Return the (string) URI of the object uploaded, a CHK readcap."""
1946 def get_ciphertext_fetched():
1947 """Return the number of bytes fetched by the helpe for this upload,
1948 or 0 if the helper did not need to fetch any bytes (or if there was
1950 def get_preexisting_shares():
1951 """Return the number of shares that were already present in the grid."""
1952 def get_pushed_shares():
1953 """Return the number of shares that were uploaded."""
1955 """Return a dict mapping share identifier to set of IServer
1956 instances. This indicates which servers were given which shares. For
1957 immutable files, the shareid is an integer (the share number, from 0
1958 to N-1). For mutable files, it is a string of the form
1959 'seq%d-%s-sh%d', containing the sequence number, the roothash, and
1960 the share number."""
1961 def get_servermap():
1962 """Return dict mapping IServer instance to a set of share numbers."""
1964 """Return dict of timing information, mapping name to seconds. All
1966 total : total upload time, start to finish
1967 storage_index : time to compute the storage index
1968 peer_selection : time to decide which peers will be used
1969 contacting_helper : initial helper query to upload/no-upload decision
1970 helper_total : initial helper query to helper finished pushing
1971 cumulative_fetch : helper waiting for ciphertext requests
1972 total_fetch : helper start to last ciphertext response
1973 cumulative_encoding : just time spent in zfec
1974 cumulative_sending : just time spent waiting for storage servers
1975 hashes_and_close : last segment push to shareholder close
1976 total_encode_and_push : first encode to shareholder close
1978 def get_uri_extension_data():
1979 """Return the dict of UEB data created for this file."""
1980 def get_verifycapstr():
1981 """Return the (string) verify-cap URI for the uploaded object."""
1983 class IDownloadResults(Interface):
1984 """I am created internally by download() methods. I contain a number of
1985 public attributes which contain details about the download process.::
1987 .file_size : the size of the file, in bytes
1988 .servers_used : set of server peerids that were used during download
1989 .server_problems : dict mapping server peerid to a problem string. Only
1990 servers that had problems (bad hashes, disconnects)
1992 .servermap : dict mapping server peerid to a set of share numbers. Only
1993 servers that had any shares are listed here.
1994 .timings : dict of timing information, mapping name to seconds (float)
1995 peer_selection : time to ask servers about shares
1996 servers_peer_selection : dict of peerid to DYHB-query time
1997 uri_extension : time to fetch a copy of the URI extension block
1998 hashtrees : time to fetch the hash trees
1999 segments : time to fetch, decode, and deliver segments
2000 cumulative_fetch : time spent waiting for storage servers
2001 cumulative_decode : just time spent in zfec
2002 cumulative_decrypt : just time spent in decryption
2003 total : total download time, start to finish
2004 fetch_per_server : dict of server to list of per-segment fetch times
2008 class IUploader(Interface):
2009 def upload(uploadable):
2010 """Upload the file. 'uploadable' must impement IUploadable. This
2011 returns a Deferred which fires with an IUploadResults instance, from
2012 which the URI of the file can be obtained as results.uri ."""
2014 def upload_ssk(write_capability, new_version, uploadable):
2015 """TODO: how should this work?"""
2017 class ICheckable(Interface):
2018 def check(monitor, verify=False, add_lease=False):
2019 """Check up on my health, optionally repairing any problems.
2021 This returns a Deferred that fires with an instance that provides
2022 ICheckResults, or None if the object is non-distributed (i.e. LIT
2025 The monitor will be checked periodically to see if the operation has
2026 been cancelled. If so, no new queries will be sent, and the Deferred
2027 will fire (with a OperationCancelledError) immediately.
2029 Filenodes and dirnodes (which provide IFilesystemNode) are also
2030 checkable. Instances that represent verifier-caps will be checkable
2031 but not downloadable. Some objects (like LIT files) do not actually
2032 live in the grid, and their checkers return None (non-distributed
2033 files are always healthy).
2035 If verify=False, a relatively lightweight check will be performed: I
2036 will ask all servers if they have a share for me, and I will believe
2037 whatever they say. If there are at least N distinct shares on the
2038 grid, my results will indicate r.is_healthy()==True. This requires a
2039 roundtrip to each server, but does not transfer very much data, so
2040 the network bandwidth is fairly low.
2042 If verify=True, a more resource-intensive check will be performed:
2043 every share will be downloaded, and the hashes will be validated on
2044 every bit. I will ignore any shares that failed their hash checks. If
2045 there are at least N distinct valid shares on the grid, my results
2046 will indicate r.is_healthy()==True. This requires N/k times as much
2047 download bandwidth (and server disk IO) as a regular download. If a
2048 storage server is holding a corrupt share, or is experiencing memory
2049 failures during retrieval, or is malicious or buggy, then
2050 verification will detect the problem, but checking will not.
2052 If add_lease=True, I will ensure that an up-to-date lease is present
2053 on each share. The lease secrets will be derived from by node secret
2054 (in BASEDIR/private/secret), so either I will add a new lease to the
2055 share, or I will merely renew the lease that I already had. In a
2056 future version of the storage-server protocol (once Accounting has
2057 been implemented), there may be additional options here to define the
2058 kind of lease that is obtained (which account number to claim, etc).
2060 TODO: any problems seen during checking will be reported to the
2061 health-manager.furl, a centralized object which is responsible for
2062 figuring out why files are unhealthy so corrective action can be
2066 def check_and_repair(monitor, verify=False, add_lease=False):
2067 """Like check(), but if the file/directory is not healthy, attempt to
2070 Any non-healthy result will cause an immediate repair operation, to
2071 generate and upload new shares. After repair, the file will be as
2072 healthy as we can make it. Details about what sort of repair is done
2073 will be put in the check-and-repair results. The Deferred will not
2074 fire until the repair is complete.
2076 This returns a Deferred which fires with an instance of
2077 ICheckAndRepairResults."""
2079 class IDeepCheckable(Interface):
2080 def start_deep_check(verify=False, add_lease=False):
2081 """Check upon the health of me and everything I can reach.
2083 This is a recursive form of check(), useable only on dirnodes.
2085 I return a Monitor, with results that are an IDeepCheckResults
2088 TODO: If any of the directories I traverse are unrecoverable, the
2089 Monitor will report failure. If any of the files I check upon are
2090 unrecoverable, those problems will be reported in the
2091 IDeepCheckResults as usual, and the Monitor will not report a
2095 def start_deep_check_and_repair(verify=False, add_lease=False):
2096 """Check upon the health of me and everything I can reach. Repair
2097 anything that isn't healthy.
2099 This is a recursive form of check_and_repair(), useable only on
2102 I return a Monitor, with results that are an
2103 IDeepCheckAndRepairResults object.
2105 TODO: If any of the directories I traverse are unrecoverable, the
2106 Monitor will report failure. If any of the files I check upon are
2107 unrecoverable, those problems will be reported in the
2108 IDeepCheckResults as usual, and the Monitor will not report a
2112 class ICheckResults(Interface):
2113 """I contain the detailed results of a check/verify operation.
2116 def get_storage_index():
2117 """Return a string with the (binary) storage index."""
2118 def get_storage_index_string():
2119 """Return a string with the (printable) abbreviated storage index."""
2121 """Return the (string) URI of the object that was checked."""
2124 """Return a boolean, True if the file/dir is fully healthy, False if
2125 it is damaged in any way. Non-distributed LIT files always return
2128 def is_recoverable():
2129 """Return a boolean, True if the file/dir can be recovered, False if
2130 not. Unrecoverable files are obviously unhealthy. Non-distributed LIT
2131 files always return True."""
2133 def needs_rebalancing():
2134 """Return a boolean, True if the file/dirs reliability could be
2135 improved by moving shares to new servers. Non-distributed LIT files
2136 always return False."""
2138 # the following methods all return None for non-distributed LIT files
2140 def get_encoding_needed():
2141 """Return 'k', the number of shares required for recovery"""
2142 def get_encoding_expected():
2143 """Return 'N', the number of total shares generated"""
2145 def get_share_counter_good():
2146 """Return the number of distinct good shares that were found. For
2147 mutable files, this counts shares for the 'best' version."""
2148 def get_share_counter_wrong():
2149 """For mutable files, return the number of shares for versions other
2150 than the 'best' one (which is defined as being the recoverable
2151 version with the highest sequence number, then the highest roothash).
2152 These are either leftover shares from an older version (perhaps on a
2153 server that was offline when an update occurred), shares from an
2154 unrecoverable newer version, or shares from an alternate current
2155 version that results from an uncoordinated write collision. For a
2156 healthy file, this will equal 0. For immutable files, this will
2159 def get_corrupt_shares():
2160 """Return a list of 'share locators', one for each share that was
2161 found to be corrupt (integrity failure). Each share locator is a list
2162 of (IServer, storage_index, sharenum)."""
2164 def get_incompatible_shares():
2165 """Return a list of 'share locators', one for each share that was
2166 found to be of an unknown format. Each share locator is a list of
2167 (IServer, storage_index, sharenum)."""
2169 def get_servers_responding():
2170 """Return a list of IServer objects, one for each server which
2171 responded to the share query (even if they said they didn't have
2172 shares, and even if they said they did have shares but then didn't
2173 send them when asked, or dropped the connection, or returned a
2174 Failure, and even if they said they did have shares and sent
2175 incorrect ones when asked)"""
2177 def get_host_counter_good_shares():
2178 """Return the number of distinct storage servers with good shares. If
2179 this number is less than get_share_counters()[good], then some shares
2180 are doubled up, increasing the correlation of failures. This
2181 indicates that one or more shares should be moved to an otherwise
2182 unused server, if one is available.
2185 def get_version_counter_recoverable():
2186 """Return the number of recoverable versions of the file. For a
2187 healthy file, this will equal 1."""
2189 def get_version_counter_unrecoverable():
2190 """Return the number of unrecoverable versions of the file. For a
2191 healthy file, this will be 0."""
2194 """Return a dict mapping share identifier to list of IServer objects.
2195 This indicates which servers are holding which shares. For immutable
2196 files, the shareid is an integer (the share number, from 0 to N-1).
2197 For mutable files, it is a string of the form 'seq%d-%s-sh%d',
2198 containing the sequence number, the roothash, and the share number."""
2201 """Return a string with a brief (one-line) summary of the results."""
2204 """Return a list of strings with more detailed results."""
2206 class ICheckAndRepairResults(Interface):
2207 """I contain the detailed results of a check/verify/repair operation.
2209 The IFilesystemNode.check()/verify()/repair() methods all return
2210 instances that provide ICheckAndRepairResults.
2213 def get_storage_index():
2214 """Return a string with the (binary) storage index."""
2215 def get_storage_index_string():
2216 """Return a string with the (printable) abbreviated storage index."""
2217 def get_repair_attempted():
2218 """Return a boolean, True if a repair was attempted. We might not
2219 attempt to repair the file because it was healthy, or healthy enough
2220 (i.e. some shares were missing but not enough to exceed some
2221 threshold), or because we don't know how to repair this object."""
2222 def get_repair_successful():
2223 """Return a boolean, True if repair was attempted and the file/dir
2224 was fully healthy afterwards. False if no repair was attempted or if
2225 a repair attempt failed."""
2226 def get_pre_repair_results():
2227 """Return an ICheckResults instance that describes the state of the
2228 file/dir before any repair was attempted."""
2229 def get_post_repair_results():
2230 """Return an ICheckResults instance that describes the state of the
2231 file/dir after any repair was attempted. If no repair was attempted,
2232 the pre-repair and post-repair results will be identical."""
2235 class IDeepCheckResults(Interface):
2236 """I contain the results of a deep-check operation.
2238 This is returned by a call to ICheckable.deep_check().
2241 def get_root_storage_index_string():
2242 """Return the storage index (abbreviated human-readable string) of
2243 the first object checked."""
2245 """Return a dictionary with the following keys::
2247 count-objects-checked: count of how many objects were checked
2248 count-objects-healthy: how many of those objects were completely
2250 count-objects-unhealthy: how many were damaged in some way
2251 count-objects-unrecoverable: how many were unrecoverable
2252 count-corrupt-shares: how many shares were found to have
2253 corruption, summed over all objects
2257 def get_corrupt_shares():
2258 """Return a set of (IServer, storage_index, sharenum) for all shares
2259 that were found to be corrupt. storage_index is binary."""
2260 def get_all_results():
2261 """Return a dictionary mapping pathname (a tuple of strings, ready to
2262 be slash-joined) to an ICheckResults instance, one for each object
2263 that was checked."""
2265 def get_results_for_storage_index(storage_index):
2266 """Retrive the ICheckResults instance for the given (binary)
2267 storage index. Raises KeyError if there are no results for that
2271 """Return a dictionary with the same keys as
2272 IDirectoryNode.deep_stats()."""
2274 class IDeepCheckAndRepairResults(Interface):
2275 """I contain the results of a deep-check-and-repair operation.
2277 This is returned by a call to ICheckable.deep_check_and_repair().
2280 def get_root_storage_index_string():
2281 """Return the storage index (abbreviated human-readable string) of
2282 the first object checked."""
2284 """Return a dictionary with the following keys::
2286 count-objects-checked: count of how many objects were checked
2287 count-objects-healthy-pre-repair: how many of those objects were
2288 completely healthy (before any
2290 count-objects-unhealthy-pre-repair: how many were damaged in
2292 count-objects-unrecoverable-pre-repair: how many were unrecoverable
2293 count-objects-healthy-post-repair: how many of those objects were
2294 completely healthy (after any
2296 count-objects-unhealthy-post-repair: how many were damaged in
2298 count-objects-unrecoverable-post-repair: how many were
2300 count-repairs-attempted: repairs were attempted on this many
2301 objects. The count-repairs- keys will
2302 always be provided, however unless
2303 repair=true is present, they will all
2305 count-repairs-successful: how many repairs resulted in healthy
2307 count-repairs-unsuccessful: how many repairs resulted did not
2308 results in completely healthy objects
2309 count-corrupt-shares-pre-repair: how many shares were found to
2310 have corruption, summed over all
2311 objects examined (before any
2313 count-corrupt-shares-post-repair: how many shares were found to
2314 have corruption, summed over all
2315 objects examined (after any
2320 """Return a dictionary with the same keys as
2321 IDirectoryNode.deep_stats()."""
2323 def get_corrupt_shares():
2324 """Return a set of (IServer, storage_index, sharenum) for all shares
2325 that were found to be corrupt before any repair was attempted.
2326 storage_index is binary.
2328 def get_remaining_corrupt_shares():
2329 """Return a set of (IServer, storage_index, sharenum) for all shares
2330 that were found to be corrupt after any repair was completed.
2331 storage_index is binary. These are shares that need manual inspection
2332 and probably deletion.
2334 def get_all_results():
2335 """Return a dictionary mapping pathname (a tuple of strings, ready to
2336 be slash-joined) to an ICheckAndRepairResults instance, one for each
2337 object that was checked."""
2339 def get_results_for_storage_index(storage_index):
2340 """Retrive the ICheckAndRepairResults instance for the given (binary)
2341 storage index. Raises KeyError if there are no results for that
2345 class IRepairable(Interface):
2346 def repair(check_results):
2347 """Attempt to repair the given object. Returns a Deferred that fires
2348 with a IRepairResults object.
2350 I must be called with an object that implements ICheckResults, as
2351 proof that you have actually discovered a problem with this file. I
2352 will use the data in the checker results to guide the repair process,
2353 such as which servers provided bad data and should therefore be
2354 avoided. The ICheckResults object is inside the
2355 ICheckAndRepairResults object, which is returned by the
2356 ICheckable.check() method::
2358 d = filenode.check(repair=False)
2359 def _got_results(check_and_repair_results):
2360 check_results = check_and_repair_results.get_pre_repair_results()
2361 return filenode.repair(check_results)
2362 d.addCallback(_got_results)
2366 class IRepairResults(Interface):
2367 """I contain the results of a repair operation."""
2368 def get_successful(self):
2369 """Returns a boolean: True if the repair made the file healthy, False
2370 if not. Repair failure generally indicates a file that has been
2371 damaged beyond repair."""
2374 class IClient(Interface):
2375 def upload(uploadable):
2376 """Upload some data into a CHK, get back the UploadResults for it.
2377 @param uploadable: something that implements IUploadable
2378 @return: a Deferred that fires with the UploadResults instance.
2379 To get the URI for this file, use results.uri .
2382 def create_mutable_file(contents=""):
2383 """Create a new mutable file (with initial) contents, get back the
2386 @param contents: (bytestring, callable, or None): this provides the
2387 initial contents of the mutable file. If 'contents' is a bytestring,
2388 it will be used as-is. If 'contents' is a callable, it will be
2389 invoked with the new MutableFileNode instance and is expected to
2390 return a bytestring with the initial contents of the file (the
2391 callable can use node.get_writekey() to decide how to encrypt the
2392 initial contents, e.g. for a brand new dirnode with initial
2393 children). contents=None is equivalent to an empty string. Using
2394 content_maker= is more efficient than creating a mutable file and
2395 setting its contents in two separate operations.
2397 @return: a Deferred that fires with an IMutableFileNode instance.
2400 def create_dirnode(initial_children={}):
2401 """Create a new unattached dirnode, possibly with initial children.
2403 @param initial_children: dict with keys that are unicode child names,
2404 and values that are (childnode, metadata) tuples.
2406 @return: a Deferred that fires with the new IDirectoryNode instance.
2409 def create_node_from_uri(uri, rouri):
2410 """Create a new IFilesystemNode instance from the uri, synchronously.
2411 @param uri: a string or IURI-providing instance, or None. This could
2412 be for a LiteralFileNode, a CHK file node, a mutable file
2413 node, or a directory node
2414 @param rouri: a string or IURI-providing instance, or None. If the
2415 main uri is None, I will use the rouri instead. If I
2416 recognize the format of the main uri, I will ignore the
2417 rouri (because it can be derived from the writecap).
2419 @return: an instance that provides IFilesystemNode (or more usefully
2420 one of its subclasses). File-specifying URIs will result in
2421 IFileNode-providing instances, like ImmutableFileNode,
2422 LiteralFileNode, or MutableFileNode. Directory-specifying
2423 URIs will result in IDirectoryNode-providing instances, like
2427 class INodeMaker(Interface):
2428 """The NodeMaker is used to create IFilesystemNode instances. It can
2429 accept a filecap/dircap string and return the node right away. It can
2430 also create new nodes (i.e. upload a file, or create a mutable file)
2431 asynchronously. Once you have one of these nodes, you can use other
2432 methods to determine whether it is a file or directory, and to download
2433 or modify its contents.
2435 The NodeMaker encapsulates all the authorities that these
2436 IFilesystemNodes require (like references to the StorageFarmBroker). Each
2437 Tahoe process will typically have a single NodeMaker, but unit tests may
2438 create simplified/mocked forms for testing purposes.
2440 def create_from_cap(writecap, readcap=None, **kwargs):
2441 """I create an IFilesystemNode from the given writecap/readcap. I can
2442 only provide nodes for existing file/directory objects: use my other
2443 methods to create new objects. I return synchronously."""
2445 def create_mutable_file(contents=None, keysize=None):
2446 """I create a new mutable file, and return a Deferred which will fire
2447 with the IMutableFileNode instance when it is ready. If contents= is
2448 provided (a bytestring), it will be used as the initial contents of
2449 the new file, otherwise the file will contain zero bytes. keysize= is
2450 for use by unit tests, to create mutable files that are smaller than
2453 def create_new_mutable_directory(initial_children={}):
2454 """I create a new mutable directory, and return a Deferred which will
2455 fire with the IDirectoryNode instance when it is ready. If
2456 initial_children= is provided (a dict mapping unicode child name to
2457 (childnode, metadata_dict) tuples), the directory will be populated
2458 with those children, otherwise it will be empty."""
2460 class IClientStatus(Interface):
2461 def list_all_uploads():
2462 """Return a list of uploader objects, one for each upload which
2463 currently has an object available (tracked with weakrefs). This is
2464 intended for debugging purposes."""
2465 def list_active_uploads():
2466 """Return a list of active IUploadStatus objects."""
2467 def list_recent_uploads():
2468 """Return a list of IUploadStatus objects for the most recently
2471 def list_all_downloads():
2472 """Return a list of downloader objects, one for each download which
2473 currently has an object available (tracked with weakrefs). This is
2474 intended for debugging purposes."""
2475 def list_active_downloads():
2476 """Return a list of active IDownloadStatus objects."""
2477 def list_recent_downloads():
2478 """Return a list of IDownloadStatus objects for the most recently
2479 started downloads."""
2481 class IUploadStatus(Interface):
2483 """Return a timestamp (float with seconds since epoch) indicating
2484 when the operation was started."""
2485 def get_storage_index():
2486 """Return a string with the (binary) storage index in use on this
2487 upload. Returns None if the storage index has not yet been
2490 """Return an integer with the number of bytes that will eventually
2491 be uploaded for this file. Returns None if the size is not yet known.
2494 """Return True if this upload is using a Helper, False if not."""
2496 """Return a string describing the current state of the upload
2499 """Returns a tuple of floats, (chk, ciphertext, encode_and_push),
2500 each from 0.0 to 1.0 . 'chk' describes how much progress has been
2501 made towards hashing the file to determine a CHK encryption key: if
2502 non-convergent encryption is in use, this will be trivial, otherwise
2503 the whole file must be hashed. 'ciphertext' describes how much of the
2504 ciphertext has been pushed to the helper, and is '1.0' for non-helper
2505 uploads. 'encode_and_push' describes how much of the encode-and-push
2506 process has finished: for helper uploads this is dependent upon the
2507 helper providing progress reports. It might be reasonable to add all
2508 three numbers and report the sum to the user."""
2510 """Return True if the upload is currently active, False if not."""
2512 """Return an instance of UploadResults (which contains timing and
2513 sharemap information). Might return None if the upload is not yet
2516 """Each upload status gets a unique number: this method returns that
2517 number. This provides a handle to this particular upload, so a web
2518 page can generate a suitable hyperlink."""
2520 class IDownloadStatus(Interface):
2522 """Return a timestamp (float with seconds since epoch) indicating
2523 when the operation was started."""
2524 def get_storage_index():
2525 """Return a string with the (binary) storage index in use on this
2526 download. This may be None if there is no storage index (i.e. LIT
2529 """Return an integer with the number of bytes that will eventually be
2530 retrieved for this file. Returns None if the size is not yet known.
2533 """Return True if this download is using a Helper, False if not."""
2535 """Return a string describing the current state of the download
2538 """Returns a float (from 0.0 to 1.0) describing the amount of the
2539 download that has completed. This value will remain at 0.0 until the
2540 first byte of plaintext is pushed to the download target."""
2542 """Return True if the download is currently active, False if not."""
2544 """Each download status gets a unique number: this method returns
2545 that number. This provides a handle to this particular download, so a
2546 web page can generate a suitable hyperlink."""
2548 class IServermapUpdaterStatus(Interface):
2550 class IPublishStatus(Interface):
2552 class IRetrieveStatus(Interface):
2555 class NotCapableError(Exception):
2556 """You have tried to write to a read-only node."""
2558 class BadWriteEnablerError(Exception):
2561 class RIControlClient(RemoteInterface):
2563 def wait_for_client_connections(num_clients=int):
2564 """Do not return until we have connections to at least NUM_CLIENTS
2568 def upload_from_file_to_uri(filename=str,
2569 convergence=ChoiceOf(None,
2570 StringConstraint(2**20))):
2571 """Upload a file to the grid. This accepts a filename (which must be
2572 absolute) that points to a file on the node's local disk. The node will
2573 read the contents of this file, upload it to the grid, then return the
2574 URI at which it was uploaded. If convergence is None then a random
2575 encryption key will be used, else the plaintext will be hashed, then
2576 that hash will be mixed together with the "convergence" string to form
2581 def download_from_uri_to_file(uri=URI, filename=str):
2582 """Download a file from the grid, placing it on the node's local disk
2583 at the given filename (which must be absolute[?]). Returns the
2584 absolute filename where the file was written."""
2589 def get_memory_usage():
2590 """Return a dict describes the amount of memory currently in use. The
2591 keys are 'VmPeak', 'VmSize', and 'VmData'. The values are integers,
2592 measuring memory consupmtion in bytes."""
2593 return DictOf(str, int)
2595 def speed_test(count=int, size=int, mutable=Any()):
2596 """Write 'count' tempfiles to disk, all of the given size. Measure
2597 how long (in seconds) it takes to upload them all to the servers.
2598 Then measure how long it takes to download all of them. If 'mutable'
2599 is 'create', time creation of mutable files. If 'mutable' is
2600 'upload', then time access to the same mutable file instead of
2603 Returns a tuple of (upload_time, download_time).
2605 return (float, float)
2607 def measure_peer_response_time():
2608 """Send a short message to each connected peer, and measure the time
2609 it takes for them to respond to it. This is a rough measure of the
2610 application-level round trip time.
2612 @return: a dictionary mapping peerid to a float (RTT time in seconds)
2615 return DictOf(str, float)
2617 UploadResults = Any() #DictOf(str, str)
2619 class RIEncryptedUploadable(RemoteInterface):
2620 __remote_name__ = "RIEncryptedUploadable.tahoe.allmydata.com"
2625 def get_all_encoding_parameters():
2626 return (int, int, int, long)
2628 def read_encrypted(offset=Offset, length=ReadSize):
2635 class RICHKUploadHelper(RemoteInterface):
2636 __remote_name__ = "RIUploadHelper.tahoe.allmydata.com"
2640 Return a dictionary of version information.
2642 return DictOf(str, Any())
2644 def upload(reader=RIEncryptedUploadable):
2645 return UploadResults
2648 class RIHelper(RemoteInterface):
2649 __remote_name__ = "RIHelper.tahoe.allmydata.com"
2653 Return a dictionary of version information.
2655 return DictOf(str, Any())
2657 def upload_chk(si=StorageIndex):
2658 """See if a file with a given storage index needs uploading. The
2659 helper will ask the appropriate storage servers to see if the file
2660 has already been uploaded. If so, the helper will return a set of
2661 'upload results' that includes whatever hashes are needed to build
2662 the read-cap, and perhaps a truncated sharemap.
2664 If the file has not yet been uploaded (or if it was only partially
2665 uploaded), the helper will return an empty upload-results dictionary
2666 and also an RICHKUploadHelper object that will take care of the
2667 upload process. The client should call upload() on this object and
2668 pass it a reference to an RIEncryptedUploadable object that will
2669 provide ciphertext. When the upload is finished, the upload() method
2670 will finish and return the upload results.
2672 return (UploadResults, ChoiceOf(RICHKUploadHelper, None))
2675 class RIStatsProvider(RemoteInterface):
2676 __remote_name__ = "RIStatsProvider.tahoe.allmydata.com"
2678 Provides access to statistics and monitoring information.
2683 returns a dictionary containing 'counters' and 'stats', each a
2684 dictionary with string counter/stat name keys, and numeric or None values.
2685 counters are monotonically increasing measures of work done, and
2686 stats are instantaneous measures (potentially time averaged
2689 return DictOf(str, DictOf(str, ChoiceOf(float, int, long, None)))
2691 class RIStatsGatherer(RemoteInterface):
2692 __remote_name__ = "RIStatsGatherer.tahoe.allmydata.com"
2694 Provides a monitoring service for centralised collection of stats
2697 def provide(provider=RIStatsProvider, nickname=str):
2699 @param provider: a stats collector instance which should be polled
2700 periodically by the gatherer to collect stats.
2701 @param nickname: a name useful to identify the provided client
2706 class IStatsProducer(Interface):
2709 returns a dictionary, with str keys representing the names of stats
2710 to be monitored, and numeric values.
2713 class RIKeyGenerator(RemoteInterface):
2714 __remote_name__ = "RIKeyGenerator.tahoe.allmydata.com"
2716 Provides a service offering to make RSA key pairs.
2719 def get_rsa_key_pair(key_size=int):
2721 @param key_size: the size of the signature key.
2722 @return: tuple(verifying_key, signing_key)
2724 return TupleOf(str, str)
2727 class FileTooLargeError(Exception):
2730 class IValidatedThingProxy(Interface):
2732 """ Acquire a thing and validate it. Return a deferred which is
2733 eventually fired with self if the thing is valid or errbacked if it
2734 can't be acquired or validated."""
2736 class InsufficientVersionError(Exception):
2737 def __init__(self, needed, got):
2738 self.needed = needed
2741 return "InsufficientVersionError(need '%s', got %s)" % (self.needed,
2744 class EmptyPathnameComponentError(Exception):
2745 """The webapi disallows empty pathname components."""