2 from zope.interface import Interface
3 from foolscap.api import StringConstraint, ListOf, TupleOf, SetOf, DictOf, \
4 ChoiceOf, IntegerConstraint, Any, RemoteInterface, Referenceable
12 Hash = StringConstraint(maxLength=HASH_SIZE,
13 minLength=HASH_SIZE)# binary format 32-byte SHA256 hash
14 Nodeid = StringConstraint(maxLength=20,
15 minLength=20) # binary format 20-byte SHA1 hash
16 FURL = StringConstraint(1000)
17 StorageIndex = StringConstraint(16)
18 URI = StringConstraint(300) # kind of arbitrary
20 MAX_BUCKETS = 256 # per peer -- zfec offers at most 256 shares per file
22 DEFAULT_MAX_SEGMENT_SIZE = 128*1024
24 ShareData = StringConstraint(None)
25 URIExtensionData = StringConstraint(1000)
26 Number = IntegerConstraint(8) # 2**(8*8) == 16EiB ~= 18e18 ~= 18 exabytes
28 ReadSize = int # the 'int' constraint is 2**31 == 2Gib -- large files are processed in not-so-large increments
29 WriteEnablerSecret = Hash # used to protect mutable bucket modifications
30 LeaseRenewSecret = Hash # used to protect bucket lease renewal requests
31 LeaseCancelSecret = Hash # used to protect bucket lease cancellation requests
33 class RIStubClient(RemoteInterface):
34 """Each client publishes a service announcement for a dummy object called
35 the StubClient. This object doesn't actually offer any services, but the
36 announcement helps the Introducer keep track of which clients are
37 subscribed (so the grid admin can keep track of things like the size of
38 the grid and the client versions in use. This is the (empty)
39 RemoteInterface for the StubClient."""
41 class RIBucketWriter(RemoteInterface):
42 """ Objects of this kind live on the server side. """
43 def write(offset=Offset, data=ShareData):
48 If the data that has been written is incomplete or inconsistent then
49 the server will throw the data away, else it will store it for future
55 """Abandon all the data that has been written.
59 class RIBucketReader(RemoteInterface):
60 def read(offset=Offset, length=ReadSize):
63 def advise_corrupt_share(reason=str):
64 """Clients who discover hash failures in shares that they have
65 downloaded from me will use this method to inform me about the
66 failures. I will record their concern so that my operator can
67 manually inspect the shares in question. I return None.
69 This is a wrapper around RIStorageServer.advise_corrupt_share(),
70 which is tied to a specific share, and therefore does not need the
71 extra share-identifying arguments. Please see that method for full
75 TestVector = ListOf(TupleOf(Offset, ReadSize, str, str))
76 # elements are (offset, length, operator, specimen)
77 # operator is one of "lt, le, eq, ne, ge, gt"
78 # nop always passes and is used to fetch data while writing.
79 # you should use length==len(specimen) for everything except nop
80 DataVector = ListOf(TupleOf(Offset, ShareData))
81 # (offset, data). This limits us to 30 writes of 1MiB each per call
82 TestAndWriteVectorsForShares = DictOf(int,
85 ChoiceOf(None, Offset), # new_length
87 ReadVector = ListOf(TupleOf(Offset, ReadSize))
88 ReadData = ListOf(ShareData)
89 # returns data[offset:offset+length] for each element of TestVector
91 class RIStorageServer(RemoteInterface):
92 __remote_name__ = "RIStorageServer.tahoe.allmydata.com"
96 Return a dictionary of version information.
98 return DictOf(str, Any())
100 def allocate_buckets(storage_index=StorageIndex,
101 renew_secret=LeaseRenewSecret,
102 cancel_secret=LeaseCancelSecret,
103 sharenums=SetOf(int, maxLength=MAX_BUCKETS),
104 allocated_size=Offset, canary=Referenceable):
106 @param storage_index: the index of the bucket to be created or
108 @param sharenums: these are the share numbers (probably between 0 and
109 99) that the sender is proposing to store on this
111 @param renew_secret: This is the secret used to protect bucket refresh
112 This secret is generated by the client and
113 stored for later comparison by the server. Each
114 server is given a different secret.
115 @param cancel_secret: Like renew_secret, but protects bucket decref.
116 @param canary: If the canary is lost before close(), the bucket is
118 @return: tuple of (alreadygot, allocated), where alreadygot is what we
119 already have and allocated is what we hereby agree to accept.
120 New leases are added for shares in both lists.
122 return TupleOf(SetOf(int, maxLength=MAX_BUCKETS),
123 DictOf(int, RIBucketWriter, maxKeys=MAX_BUCKETS))
125 def add_lease(storage_index=StorageIndex,
126 renew_secret=LeaseRenewSecret,
127 cancel_secret=LeaseCancelSecret):
129 Add a new lease on the given bucket. If the renew_secret matches an
130 existing lease, that lease will be renewed instead. If there is no
131 bucket for the given storage_index, return silently. (note that in
132 tahoe-1.3.0 and earlier, IndexError was raised if there was no
135 return Any() # returns None now, but future versions might change
137 def renew_lease(storage_index=StorageIndex, renew_secret=LeaseRenewSecret):
139 Renew the lease on a given bucket, resetting the timer to 31 days.
140 Some networks will use this, some will not. If there is no bucket for
141 the given storage_index, IndexError will be raised.
143 For mutable shares, if the given renew_secret does not match an
144 existing lease, IndexError will be raised with a note listing the
145 server-nodeids on the existing leases, so leases on migrated shares
146 can be renewed or cancelled. For immutable shares, IndexError
147 (without the note) will be raised.
151 def get_buckets(storage_index=StorageIndex):
152 return DictOf(int, RIBucketReader, maxKeys=MAX_BUCKETS)
156 def slot_readv(storage_index=StorageIndex,
157 shares=ListOf(int), readv=ReadVector):
158 """Read a vector from the numbered shares associated with the given
159 storage index. An empty shares list means to return data from all
160 known shares. Returns a dictionary with one key per share."""
161 return DictOf(int, ReadData) # shnum -> results
163 def slot_testv_and_readv_and_writev(storage_index=StorageIndex,
164 secrets=TupleOf(WriteEnablerSecret,
167 tw_vectors=TestAndWriteVectorsForShares,
170 """General-purpose test-and-set operation for mutable slots. Perform
171 a bunch of comparisons against the existing shares. If they all pass,
172 then apply a bunch of write vectors to those shares. Then use the
173 read vectors to extract data from all the shares and return the data.
175 This method is, um, large. The goal is to allow clients to update all
176 the shares associated with a mutable file in a single round trip.
178 @param storage_index: the index of the bucket to be created or
180 @param write_enabler: a secret that is stored along with the slot.
181 Writes are accepted from any caller who can
182 present the matching secret. A different secret
183 should be used for each slot*server pair.
184 @param renew_secret: This is the secret used to protect bucket refresh
185 This secret is generated by the client and
186 stored for later comparison by the server. Each
187 server is given a different secret.
188 @param cancel_secret: Like renew_secret, but protects bucket decref.
190 The 'secrets' argument is a tuple of (write_enabler, renew_secret,
191 cancel_secret). The first is required to perform any write. The
192 latter two are used when allocating new shares. To simply acquire a
193 new lease on existing shares, use an empty testv and an empty writev.
195 Each share can have a separate test vector (i.e. a list of
196 comparisons to perform). If all vectors for all shares pass, then all
197 writes for all shares are recorded. Each comparison is a 4-tuple of
198 (offset, length, operator, specimen), which effectively does a bool(
199 (read(offset, length)) OPERATOR specimen ) and only performs the
200 write if all these evaluate to True. Basic test-and-set uses 'eq'.
201 Write-if-newer uses a seqnum and (offset, length, 'lt', specimen).
202 Write-if-same-or-newer uses 'le'.
204 Reads from the end of the container are truncated, and missing shares
205 behave like empty ones, so to assert that a share doesn't exist (for
206 use when creating a new share), use (0, 1, 'eq', '').
208 The write vector will be applied to the given share, expanding it if
209 necessary. A write vector applied to a share number that did not
210 exist previously will cause that share to be created.
212 Each write vector is accompanied by a 'new_length' argument. If
213 new_length is not None, use it to set the size of the container. This
214 can be used to pre-allocate space for a series of upcoming writes, or
215 truncate existing data. If the container is growing, new_length will
216 be applied before datav. If the container is shrinking, it will be
217 applied afterwards. If new_length==0, the share will be deleted.
219 The read vector is used to extract data from all known shares,
220 *before* any writes have been applied. The same vector is used for
221 all shares. This captures the state that was tested by the test
224 This method returns two values: a boolean and a dict. The boolean is
225 True if the write vectors were applied, False if not. The dict is
226 keyed by share number, and each value contains a list of strings, one
227 for each element of the read vector.
229 If the write_enabler is wrong, this will raise BadWriteEnablerError.
230 To enable share migration (using update_write_enabler), the exception
231 will have the nodeid used for the old write enabler embedded in it,
232 in the following string::
234 The write enabler was recorded by nodeid '%s'.
236 Note that the nodeid here is encoded using the same base32 encoding
237 used by Foolscap and allmydata.util.idlib.nodeid_b2a().
240 return TupleOf(bool, DictOf(int, ReadData))
242 def advise_corrupt_share(share_type=str, storage_index=StorageIndex,
243 shnum=int, reason=str):
244 """Clients who discover hash failures in shares that they have
245 downloaded from me will use this method to inform me about the
246 failures. I will record their concern so that my operator can
247 manually inspect the shares in question. I return None.
249 'share_type' is either 'mutable' or 'immutable'. 'storage_index' is a
250 (binary) storage index string, and 'shnum' is the integer share
251 number. 'reason' is a human-readable explanation of the problem,
252 probably including some expected hash values and the computed ones
253 which did not match. Corruption advisories for mutable shares should
254 include a hash of the public key (the same value that appears in the
255 mutable-file verify-cap), since the current share format does not
259 class IStorageBucketWriter(Interface):
261 Objects of this kind live on the client side.
263 def put_block(segmentnum=int, data=ShareData):
264 """@param data: For most segments, this data will be 'blocksize'
265 bytes in length. The last segment might be shorter.
266 @return: a Deferred that fires (with None) when the operation completes
269 def put_plaintext_hashes(hashes=ListOf(Hash)):
271 @return: a Deferred that fires (with None) when the operation completes
274 def put_crypttext_hashes(hashes=ListOf(Hash)):
276 @return: a Deferred that fires (with None) when the operation completes
279 def put_block_hashes(blockhashes=ListOf(Hash)):
281 @return: a Deferred that fires (with None) when the operation completes
284 def put_share_hashes(sharehashes=ListOf(TupleOf(int, Hash))):
286 @return: a Deferred that fires (with None) when the operation completes
289 def put_uri_extension(data=URIExtensionData):
290 """This block of data contains integrity-checking information (hashes
291 of plaintext, crypttext, and shares), as well as encoding parameters
292 that are necessary to recover the data. This is a serialized dict
293 mapping strings to other strings. The hash of this data is kept in
294 the URI and verified before any of the data is used. All buckets for
295 a given file contain identical copies of this data.
297 The serialization format is specified with the following pseudocode:
298 for k in sorted(dict.keys()):
299 assert re.match(r'^[a-zA-Z_\-]+$', k)
300 write(k + ':' + netstring(dict[k]))
302 @return: a Deferred that fires (with None) when the operation completes
306 """Finish writing and close the bucket. The share is not finalized
307 until this method is called: if the uploading client disconnects
308 before calling close(), the partially-written share will be
311 @return: a Deferred that fires (with None) when the operation completes
314 class IStorageBucketReader(Interface):
316 def get_block_data(blocknum=int, blocksize=int, size=int):
317 """Most blocks will be the same size. The last block might be shorter
323 def get_crypttext_hashes():
325 @return: ListOf(Hash)
328 def get_block_hashes(at_least_these=SetOf(int)):
330 @return: ListOf(Hash)
333 def get_share_hashes(at_least_these=SetOf(int)):
335 @return: ListOf(TupleOf(int, Hash))
338 def get_uri_extension():
340 @return: URIExtensionData
343 class IStorageBroker(Interface):
344 def get_servers_for_psi(peer_selection_index):
346 @return: list of IServer instances
348 def get_connected_servers():
350 @return: frozenset of connected IServer instances
352 def get_known_servers():
354 @return: frozenset of IServer instances
356 def get_all_serverids():
358 @return: frozenset of serverid strings
360 def get_nickname_for_serverid(serverid):
362 @return: unicode nickname, or None
365 # methods moved from IntroducerClient, need review
366 def get_all_connections():
367 """Return a frozenset of (nodeid, service_name, rref) tuples, one for
368 each active connection we've established to a remote service. This is
369 mostly useful for unit tests that need to wait until a certain number
370 of connections have been made."""
372 def get_all_connectors():
373 """Return a dict that maps from (nodeid, service_name) to a
374 RemoteServiceConnector instance for all services that we are actively
375 trying to connect to. Each RemoteServiceConnector has the following
378 service_name: the type of service provided, like 'storage'
379 announcement_time: when we first heard about this service
380 last_connect_time: when we last established a connection
381 last_loss_time: when we last lost a connection
383 version: the peer's version, from the most recent connection
384 oldest_supported: the peer's oldest supported version, same
386 rref: the RemoteReference, if connected, otherwise None
387 remote_host: the IAddress, if connected, otherwise None
389 This method is intended for monitoring interfaces, such as a web page
390 which describes connecting and connected peers.
393 def get_all_peerids():
394 """Return a frozenset of all peerids to whom we have a connection (to
395 one or more services) established. Mostly useful for unit tests."""
397 def get_all_connections_for(service_name):
398 """Return a frozenset of (nodeid, service_name, rref) tuples, one
399 for each active connection that provides the given SERVICE_NAME."""
401 def get_permuted_peers(service_name, key):
402 """Returns an ordered list of (peerid, rref) tuples, selecting from
403 the connections that provide SERVICE_NAME, using a hash-based
404 permutation keyed by KEY. This randomizes the service list in a
405 repeatable way, to distribute load over many peers.
409 class IMutableSlotWriter(Interface):
411 The interface for a writer around a mutable slot on a remote server.
413 def set_checkstring(checkstring, *args):
415 Set the checkstring that I will pass to the remote server when
418 @param checkstring A packed checkstring to use.
420 Note that implementations can differ in which semantics they
421 wish to support for set_checkstring -- they can, for example,
422 build the checkstring themselves from its constituents, or
426 def get_checkstring():
428 Get the checkstring that I think currently exists on the remote
432 def put_block(data, segnum, salt):
434 Add a block and salt to the share.
437 def put_encprivey(encprivkey):
439 Add the encrypted private key to the share.
442 def put_blockhashes(blockhashes=list):
444 Add the block hash tree to the share.
447 def put_sharehashes(sharehashes=dict):
449 Add the share hash chain to the share.
454 Return the part of the share that needs to be signed.
457 def put_signature(signature):
459 Add the signature to the share.
462 def put_verification_key(verification_key):
464 Add the verification key to the share.
467 def finish_publishing():
469 Do anything necessary to finish writing the share to a remote
470 server. I require that no further publishing needs to take place
471 after this method has been called.
475 class IURI(Interface):
476 def init_from_string(uri):
477 """Accept a string (as created by my to_string() method) and populate
478 this instance with its data. I am not normally called directly,
479 please use the module-level uri.from_string() function to convert
480 arbitrary URI strings into IURI-providing instances."""
483 """Return False if this URI be used to modify the data. Return True
484 if this URI cannot be used to modify the data."""
487 """Return True if the data can be modified by *somebody* (perhaps
488 someone who has a more powerful URI than this one)."""
490 # TODO: rename to get_read_cap()
492 """Return another IURI instance, which represents a read-only form of
493 this one. If is_readonly() is True, this returns self."""
495 def get_verify_cap():
496 """Return an instance that provides IVerifierURI, which can be used
497 to check on the availability of the file or directory, without
498 providing enough capabilities to actually read or modify the
499 contents. This may return None if the file does not need checking or
500 verification (e.g. LIT URIs).
504 """Return a string of printable ASCII characters, suitable for
505 passing into init_from_string."""
507 class IVerifierURI(Interface, IURI):
508 def init_from_string(uri):
509 """Accept a string (as created by my to_string() method) and populate
510 this instance with its data. I am not normally called directly,
511 please use the module-level uri.from_string() function to convert
512 arbitrary URI strings into IURI-providing instances."""
515 """Return a string of printable ASCII characters, suitable for
516 passing into init_from_string."""
518 class IDirnodeURI(Interface):
519 """I am a URI which represents a dirnode."""
521 class IFileURI(Interface):
522 """I am a URI which represents a filenode."""
524 """Return the length (in bytes) of the file that I represent."""
526 class IImmutableFileURI(IFileURI):
529 class IMutableFileURI(Interface):
530 """I am a URI which represents a mutable filenode."""
531 def get_extension_params():
532 """Return the extension parameters in the URI"""
534 def set_extension_params():
535 """Set the extension parameters that should be in the URI"""
537 class IDirectoryURI(Interface):
540 class IReadonlyDirectoryURI(Interface):
543 class CapConstraintError(Exception):
544 """A constraint on a cap was violated."""
546 class MustBeDeepImmutableError(CapConstraintError):
547 """Mutable children cannot be added to an immutable directory.
548 Also, caps obtained from an immutable directory can trigger this error
549 if they are later found to refer to a mutable object and then used."""
551 class MustBeReadonlyError(CapConstraintError):
552 """Known write caps cannot be specified in a ro_uri field. Also,
553 caps obtained from a ro_uri field can trigger this error if they
554 are later found to be write caps and then used."""
556 class MustNotBeUnknownRWError(CapConstraintError):
557 """Cannot add an unknown child cap specified in a rw_uri field."""
560 class IReadable(Interface):
561 """I represent a readable object -- either an immutable file, or a
562 specific version of a mutable file.
566 """Return True if this reference provides mutable access to the given
567 file or directory (i.e. if you can modify it), or False if not. Note
568 that even if this reference is read-only, someone else may hold a
569 read-write reference to it.
571 For an IReadable returned by get_best_readable_version(), this will
572 always return True, but for instances of subinterfaces such as
573 IMutableFileVersion, it may return False."""
576 """Return True if this file or directory is mutable (by *somebody*,
577 not necessarily you), False if it is is immutable. Note that a file
578 might be mutable overall, but your reference to it might be
579 read-only. On the other hand, all references to an immutable file
580 will be read-only; there are no read-write references to an immutable
583 def get_storage_index():
584 """Return the storage index of the file."""
587 """Return the length (in bytes) of this readable object."""
589 def download_to_data():
590 """Download all of the file contents. I return a Deferred that fires
591 with the contents as a byte string."""
593 def read(consumer, offset=0, size=None):
594 """Download a portion (possibly all) of the file's contents, making
595 them available to the given IConsumer. Return a Deferred that fires
596 (with the consumer) when the consumer is unregistered (either because
597 the last byte has been given to it, or because the consumer threw an
598 exception during write(), possibly because it no longer wants to
599 receive data). The portion downloaded will start at 'offset' and
600 contain 'size' bytes (or the remainder of the file if size==None).
602 The consumer will be used in non-streaming mode: an IPullProducer
603 will be attached to it.
605 The consumer will not receive data right away: several network trips
606 must occur first. The order of events will be::
608 consumer.registerProducer(p, streaming)
609 (if streaming == False)::
610 consumer does p.resumeProducing()
612 consumer does p.resumeProducing()
613 consumer.write(data).. (repeat until all data is written)
614 consumer.unregisterProducer()
615 deferred.callback(consumer)
617 If a download error occurs, or an exception is raised by
618 consumer.registerProducer() or consumer.write(), I will call
619 consumer.unregisterProducer() and then deliver the exception via
620 deferred.errback(). To cancel the download, the consumer should call
621 p.stopProducing(), which will result in an exception being delivered
622 via deferred.errback().
624 See src/allmydata/util/consumer.py for an example of a simple
625 download-to-memory consumer.
629 class IWriteable(Interface):
631 I define methods that callers can use to update SDMF and MDMF
632 mutable files on a Tahoe-LAFS grid.
634 # XXX: For the moment, we have only this. It is possible that we
635 # want to move overwrite() and modify() in here too.
636 def update(data, offset):
638 I write the data from my data argument to the MDMF file,
639 starting at offset. I continue writing data until my data
640 argument is exhausted, appending data to the file as necessary.
642 # assert IMutableUploadable.providedBy(data)
643 # to append data: offset=node.get_size_of_best_version()
644 # do we want to support compacting MDMF?
645 # for an MDMF file, this can be done with O(data.get_size())
646 # memory. For an SDMF file, any modification takes
647 # O(node.get_size_of_best_version()).
650 class IMutableFileVersion(IReadable):
651 """I provide access to a particular version of a mutable file. The
652 access is read/write if I was obtained from a filenode derived from
653 a write cap, or read-only if the filenode was derived from a read cap.
656 def get_sequence_number():
657 """Return the sequence number of this version."""
660 """Return the IMutableFileServerMap instance that was used to create
665 """Return this filenode's writekey, or None if the node does not have
666 write-capability. This may be used to assist with data structures
667 that need to make certain data available only to writers, such as the
668 read-write child caps in dirnodes. The recommended process is to have
669 reader-visible data be submitted to the filenode in the clear (where
670 it will be encrypted by the filenode using the readkey), but encrypt
671 writer-visible data using this writekey.
674 # TODO: Can this be overwrite instead of replace?
675 def replace(new_contents):
676 """Replace the contents of the mutable file, provided that no other
677 node has published (or is attempting to publish, concurrently) a
678 newer version of the file than this one.
680 I will avoid modifying any share that is different than the version
681 given by get_sequence_number(). However, if another node is writing
682 to the file at the same time as me, I may manage to update some shares
683 while they update others. If I see any evidence of this, I will signal
684 UncoordinatedWriteError, and the file will be left in an inconsistent
685 state (possibly the version you provided, possibly the old version,
686 possibly somebody else's version, and possibly a mix of shares from
689 The recommended response to UncoordinatedWriteError is to either
690 return it to the caller (since they failed to coordinate their
691 writes), or to attempt some sort of recovery. It may be sufficient to
692 wait a random interval (with exponential backoff) and repeat your
693 operation. If I do not signal UncoordinatedWriteError, then I was
694 able to write the new version without incident.
696 I return a Deferred that fires (with a PublishStatus object) when the
697 update has completed.
700 def modify(modifier_cb):
701 """Modify the contents of the file, by downloading this version,
702 applying the modifier function (or bound method), then uploading
703 the new version. This will succeed as long as no other node
704 publishes a version between the download and the upload.
705 I return a Deferred that fires (with a PublishStatus object) when
706 the update is complete.
708 The modifier callable will be given three arguments: a string (with
709 the old contents), a 'first_time' boolean, and a servermap. As with
710 download_to_data(), the old contents will be from this version,
711 but the modifier can use the servermap to make other decisions
712 (such as refusing to apply the delta if there are multiple parallel
713 versions, or if there is evidence of a newer unrecoverable version).
714 'first_time' will be True the first time the modifier is called,
715 and False on any subsequent calls.
717 The callable should return a string with the new contents. The
718 callable must be prepared to be called multiple times, and must
719 examine the input string to see if the change that it wants to make
720 is already present in the old version. If it does not need to make
721 any changes, it can either return None, or return its input string.
723 If the modifier raises an exception, it will be returned in the
728 # The hierarchy looks like this:
735 class IFilesystemNode(Interface):
737 """Return the strongest 'cap instance' associated with this node.
738 (writecap for writeable-mutable files/directories, readcap for
739 immutable or readonly-mutable files/directories). To convert this
740 into a string, call .to_string() on the result."""
743 """Return a readonly cap instance for this node. For immutable or
744 readonly nodes, get_cap() and get_readcap() return the same thing."""
746 def get_repair_cap():
747 """Return an IURI instance that can be used to repair the file, or
748 None if this node cannot be repaired (either because it is not
749 distributed, like a LIT file, or because the node does not represent
750 sufficient authority to create a repair-cap, like a read-only RSA
751 mutable file node [which cannot create the correct write-enablers]).
754 def get_verify_cap():
755 """Return an IVerifierURI instance that represents the
756 'verifiy/refresh capability' for this node. The holder of this
757 capability will be able to renew the lease for this node, protecting
758 it from garbage-collection. They will also be able to ask a server if
759 it holds a share for the file or directory.
763 """Return the URI string corresponding to the strongest cap associated
764 with this node. If this node is read-only, the URI will only offer
765 read-only access. If this node is read-write, the URI will offer
768 If you have read-write access to a node and wish to share merely
769 read-only access with others, use get_readonly_uri().
773 """Return the URI string that can be used by others to get write
774 access to this node, if it is writeable. If this is a read-only node,
777 def get_readonly_uri():
778 """Return the URI string that can be used by others to get read-only
779 access to this node. The result is a read-only URI, regardless of
780 whether this node is read-only or read-write.
782 If you have merely read-only access to this node, get_readonly_uri()
783 will return the same thing as get_uri().
786 def get_storage_index():
787 """Return a string with the (binary) storage index in use on this
788 download. This may be None if there is no storage index (i.e. LIT
789 files and directories)."""
792 """Return True if this reference provides mutable access to the given
793 file or directory (i.e. if you can modify it), or False if not. Note
794 that even if this reference is read-only, someone else may hold a
795 read-write reference to it."""
798 """Return True if this file or directory is mutable (by *somebody*,
799 not necessarily you), False if it is is immutable. Note that a file
800 might be mutable overall, but your reference to it might be
801 read-only. On the other hand, all references to an immutable file
802 will be read-only; there are no read-write references to an immutable
807 """Return True if this is an unknown node."""
809 def is_allowed_in_immutable_directory():
810 """Return True if this node is allowed as a child of a deep-immutable
811 directory. This is true if either the node is of a known-immutable type,
812 or it is unknown and read-only.
816 """Raise any error associated with this node."""
818 # XXX: These may not be appropriate outside the context of an IReadable.
820 """Return the length (in bytes) of the data this node represents. For
821 directory nodes, I return the size of the backing store. I return
822 synchronously and do not consult the network, so for mutable objects,
823 I will return the most recently observed size for the object, or None
824 if I don't remember a size. Use get_current_size, which returns a
825 Deferred, if you want more up-to-date information."""
827 def get_current_size():
828 """I return a Deferred that fires with the length (in bytes) of the
829 data this node represents.
832 class IFileNode(IFilesystemNode):
833 """I am a node which represents a file: a sequence of bytes. I am not a
834 container, like IDirectoryNode."""
835 def get_best_readable_version():
836 """Return a Deferred that fires with an IReadable for the 'best'
837 available version of the file. The IReadable provides only read
838 access, even if this filenode was derived from a write cap.
840 For an immutable file, there is only one version. For a mutable
841 file, the 'best' version is the recoverable version with the
842 highest sequence number. If no uncoordinated writes have occurred,
843 and if enough shares are available, then this will be the most
844 recent version that has been uploaded. If no version is recoverable,
845 the Deferred will errback with an UnrecoverableFileError.
848 def download_best_version():
849 """Download the contents of the version that would be returned
850 by get_best_readable_version(). This is equivalent to calling
851 download_to_data() on the IReadable given by that method.
853 I return a Deferred that fires with a byte string when the file
854 has been fully downloaded. To support streaming download, use
855 the 'read' method of IReadable. If no version is recoverable,
856 the Deferred will errback with an UnrecoverableFileError.
859 def get_size_of_best_version():
860 """Find the size of the version that would be returned by
861 get_best_readable_version().
863 I return a Deferred that fires with an integer. If no version
864 is recoverable, the Deferred will errback with an
865 UnrecoverableFileError.
869 class IImmutableFileNode(IFileNode, IReadable):
870 """I am a node representing an immutable file. Immutable files have
874 class IMutableFileNode(IFileNode):
875 """I provide access to a 'mutable file', which retains its identity
876 regardless of what contents are put in it.
878 The consistency-vs-availability problem means that there might be
879 multiple versions of a file present in the grid, some of which might be
880 unrecoverable (i.e. have fewer than 'k' shares). These versions are
881 loosely ordered: each has a sequence number and a hash, and any version
882 with seqnum=N was uploaded by a node which has seen at least one version
885 The 'servermap' (an instance of IMutableFileServerMap) is used to
886 describe the versions that are known to be present in the grid, and which
887 servers are hosting their shares. It is used to represent the 'state of
888 the world', and is used for this purpose by my test-and-set operations.
889 Downloading the contents of the mutable file will also return a
890 servermap. Uploading a new version into the mutable file requires a
891 servermap as input, and the semantics of the replace operation is
892 'replace the file with my new version if it looks like nobody else has
893 changed the file since my previous download'. Because the file is
894 distributed, this is not a perfect test-and-set operation, but it will do
895 its best. If the replace process sees evidence of a simultaneous write,
896 it will signal an UncoordinatedWriteError, so that the caller can take
900 Most readers will want to use the 'best' current version of the file, and
901 should use my 'download_best_version()' method.
903 To unconditionally replace the file, callers should use overwrite(). This
904 is the mode that user-visible mutable files will probably use.
906 To apply some delta to the file, call modify() with a callable modifier
907 function that can apply the modification that you want to make. This is
908 the mode that dirnodes will use, since most directory modification
909 operations can be expressed in terms of deltas to the directory state.
912 Three methods are available for users who need to perform more complex
913 operations. The first is get_servermap(), which returns an up-to-date
914 servermap using a specified mode. The second is download_version(), which
915 downloads a specific version (not necessarily the 'best' one). The third
916 is 'upload', which accepts new contents and a servermap (which must have
917 been updated with MODE_WRITE). The upload method will attempt to apply
918 the new contents as long as no other node has modified the file since the
919 servermap was updated. This might be useful to a caller who wants to
920 merge multiple versions into a single new one.
922 Note that each time the servermap is updated, a specific 'mode' is used,
923 which determines how many peers are queried. To use a servermap for my
924 replace() method, that servermap must have been updated in MODE_WRITE.
925 These modes are defined in allmydata.mutable.common, and consist of
926 MODE_READ, MODE_WRITE, MODE_ANYTHING, and MODE_CHECK. Please look in
927 allmydata/mutable/servermap.py for details about the differences.
929 Mutable files are currently limited in size (about 3.5MB max) and can
930 only be retrieved and updated all-at-once, as a single big string. Future
931 versions of our mutable files will remove this restriction.
933 def get_best_mutable_version():
934 """Return a Deferred that fires with an IMutableFileVersion for
935 the 'best' available version of the file. The best version is
936 the recoverable version with the highest sequence number. If no
937 uncoordinated writes have occurred, and if enough shares are
938 available, then this will be the most recent version that has
941 If no version is recoverable, the Deferred will errback with an
942 UnrecoverableFileError.
945 def overwrite(new_contents):
946 """Unconditionally replace the contents of the mutable file with new
947 ones. This simply chains get_servermap(MODE_WRITE) and upload(). This
948 is only appropriate to use when the new contents of the file are
949 completely unrelated to the old ones, and you do not care about other
952 I return a Deferred that fires (with a PublishStatus object) when the
953 update has completed.
956 def modify(modifier_cb):
957 """Modify the contents of the file, by downloading the current
958 version, applying the modifier function (or bound method), then
959 uploading the new version. I return a Deferred that fires (with a
960 PublishStatus object) when the update is complete.
962 The modifier callable will be given three arguments: a string (with
963 the old contents), a 'first_time' boolean, and a servermap. As with
964 download_best_version(), the old contents will be from the best
965 recoverable version, but the modifier can use the servermap to make
966 other decisions (such as refusing to apply the delta if there are
967 multiple parallel versions, or if there is evidence of a newer
968 unrecoverable version). 'first_time' will be True the first time the
969 modifier is called, and False on any subsequent calls.
971 The callable should return a string with the new contents. The
972 callable must be prepared to be called multiple times, and must
973 examine the input string to see if the change that it wants to make
974 is already present in the old version. If it does not need to make
975 any changes, it can either return None, or return its input string.
977 If the modifier raises an exception, it will be returned in the
981 def get_servermap(mode):
982 """Return a Deferred that fires with an IMutableFileServerMap
983 instance, updated using the given mode.
986 def download_version(servermap, version):
987 """Download a specific version of the file, using the servermap
988 as a guide to where the shares are located.
990 I return a Deferred that fires with the requested contents, or
991 errbacks with UnrecoverableFileError. Note that a servermap which was
992 updated with MODE_ANYTHING or MODE_READ may not know about shares for
993 all versions (those modes stop querying servers as soon as they can
994 fulfil their goals), so you may want to use MODE_CHECK (which checks
995 everything) to get increased visibility.
998 def upload(new_contents, servermap):
999 """Replace the contents of the file with new ones. This requires a
1000 servermap that was previously updated with MODE_WRITE.
1002 I attempt to provide test-and-set semantics, in that I will avoid
1003 modifying any share that is different than the version I saw in the
1004 servermap. However, if another node is writing to the file at the
1005 same time as me, I may manage to update some shares while they update
1006 others. If I see any evidence of this, I will signal
1007 UncoordinatedWriteError, and the file will be left in an inconsistent
1008 state (possibly the version you provided, possibly the old version,
1009 possibly somebody else's version, and possibly a mix of shares from
1012 The recommended response to UncoordinatedWriteError is to either
1013 return it to the caller (since they failed to coordinate their
1014 writes), or to attempt some sort of recovery. It may be sufficient to
1015 wait a random interval (with exponential backoff) and repeat your
1016 operation. If I do not signal UncoordinatedWriteError, then I was
1017 able to write the new version without incident.
1019 I return a Deferred that fires (with a PublishStatus object) when the
1020 publish has completed. I will update the servermap in-place with the
1021 location of all new shares.
1025 """Return this filenode's writekey, or None if the node does not have
1026 write-capability. This may be used to assist with data structures
1027 that need to make certain data available only to writers, such as the
1028 read-write child caps in dirnodes. The recommended process is to have
1029 reader-visible data be submitted to the filenode in the clear (where
1030 it will be encrypted by the filenode using the readkey), but encrypt
1031 writer-visible data using this writekey.
1035 """Returns the mutable file protocol version."""
1037 class NotEnoughSharesError(Exception):
1038 """Download was unable to get enough shares"""
1040 class NoSharesError(Exception):
1041 """Download was unable to get any shares at all."""
1043 class DownloadStopped(Exception):
1046 class UploadUnhappinessError(Exception):
1047 """Upload was unable to satisfy 'servers_of_happiness'"""
1049 class UnableToFetchCriticalDownloadDataError(Exception):
1050 """I was unable to fetch some piece of critical data which is supposed to
1051 be identically present in all shares."""
1053 class NoServersError(Exception):
1054 """Upload wasn't given any servers to work with, usually indicating a
1055 network or Introducer problem."""
1057 class ExistingChildError(Exception):
1058 """A directory node was asked to add or replace a child that already
1059 exists, and overwrite= was set to False."""
1061 class NoSuchChildError(Exception):
1062 """A directory node was asked to fetch a child which does not exist."""
1064 class ChildOfWrongTypeError(Exception):
1065 """An operation was attempted on a child of the wrong type (file or directory)."""
1067 class IDirectoryNode(IFilesystemNode):
1068 """I represent a filesystem node that is a container, with a
1069 name-to-child mapping, holding the tahoe equivalent of a directory. All
1070 child names are unicode strings, and all children are some sort of
1071 IFilesystemNode (a file, subdirectory, or unknown node).
1076 The dirnode ('1') URI returned by this method can be used in
1077 set_uri() on a different directory ('2') to 'mount' a reference to
1078 this directory ('1') under the other ('2'). This URI is just a
1079 string, so it can be passed around through email or other out-of-band
1083 def get_readonly_uri():
1085 The dirnode ('1') URI returned by this method can be used in
1086 set_uri() on a different directory ('2') to 'mount' a reference to
1087 this directory ('1') under the other ('2'). This URI is just a
1088 string, so it can be passed around through email or other out-of-band
1093 """I return a Deferred that fires with a dictionary mapping child
1094 name (a unicode string) to (node, metadata_dict) tuples, in which
1095 'node' is an IFilesystemNode and 'metadata_dict' is a dictionary of
1098 def has_child(name):
1099 """I return a Deferred that fires with a boolean, True if there
1100 exists a child of the given name, False if not. The child name must
1101 be a unicode string."""
1104 """I return a Deferred that fires with a specific named child node,
1105 which is an IFilesystemNode. The child name must be a unicode string.
1106 I raise NoSuchChildError if I do not have a child by that name."""
1108 def get_metadata_for(name):
1109 """I return a Deferred that fires with the metadata dictionary for
1110 a specific named child node. The child name must be a unicode string.
1111 This metadata is stored in the *edge*, not in the child, so it is
1112 attached to the parent dirnode rather than the child node.
1113 I raise NoSuchChildError if I do not have a child by that name."""
1115 def set_metadata_for(name, metadata):
1116 """I replace any existing metadata for the named child with the new
1117 metadata. The child name must be a unicode string. This metadata is
1118 stored in the *edge*, not in the child, so it is attached to the
1119 parent dirnode rather than the child node. I return a Deferred
1120 (that fires with this dirnode) when the operation is complete.
1121 I raise NoSuchChildError if I do not have a child by that name."""
1123 def get_child_at_path(path):
1124 """Transform a child path into an IFilesystemNode.
1126 I perform a recursive series of 'get' operations to find the named
1127 descendant node. I return a Deferred that fires with the node, or
1128 errbacks with NoSuchChildError if the node could not be found.
1130 The path can be either a single string (slash-separated) or a list of
1131 path-name elements. All elements must be unicode strings.
1134 def get_child_and_metadata_at_path(path):
1135 """Transform a child path into an IFilesystemNode and metadata.
1137 I am like get_child_at_path(), but my Deferred fires with a tuple of
1138 (node, metadata). The metadata comes from the last edge. If the path
1139 is empty, the metadata will be an empty dictionary.
1142 def set_uri(name, writecap, readcap=None, metadata=None, overwrite=True):
1143 """I add a child (by writecap+readcap) at the specific name. I return
1144 a Deferred that fires when the operation finishes. If overwrite= is
1145 True, I will replace any existing child of the same name, otherwise
1146 an existing child will cause me to return ExistingChildError. The
1147 child name must be a unicode string.
1149 The child caps could be for a file, or for a directory. If you have
1150 both the writecap and readcap, you should provide both arguments.
1151 If you have only one cap and don't know whether it is read-only,
1152 provide it as the writecap argument and leave the readcap as None.
1153 If you have only one cap that is known to be read-only, provide it
1154 as the readcap argument and leave the writecap as None.
1155 The filecaps are typically obtained from an IFilesystemNode with
1156 get_uri() and get_readonly_uri().
1158 If metadata= is provided, I will use it as the metadata for the named
1159 edge. This will replace any existing metadata. If metadata= is left
1160 as the default value of None, I will set ['mtime'] to the current
1161 time, and I will set ['ctime'] to the current time if there was not
1162 already a child by this name present. This roughly matches the
1163 ctime/mtime semantics of traditional filesystems. See the
1164 "About the metadata" section of webapi.txt for futher information.
1166 If this directory node is read-only, the Deferred will errback with a
1167 NotWriteableError."""
1169 def set_children(entries, overwrite=True):
1170 """Add multiple children (by writecap+readcap) to a directory node.
1171 Takes a dictionary, with childname as keys and (writecap, readcap)
1172 tuples (or (writecap, readcap, metadata) triples) as values. Returns
1173 a Deferred that fires (with this dirnode) when the operation
1174 finishes. This is equivalent to calling set_uri() multiple times, but
1175 is much more efficient. All child names must be unicode strings.
1178 def set_node(name, child, metadata=None, overwrite=True):
1179 """I add a child at the specific name. I return a Deferred that fires
1180 when the operation finishes. This Deferred will fire with the child
1181 node that was just added. I will replace any existing child of the
1182 same name. The child name must be a unicode string. The 'child'
1183 instance must be an instance providing IFilesystemNode.
1185 If metadata= is provided, I will use it as the metadata for the named
1186 edge. This will replace any existing metadata. If metadata= is left
1187 as the default value of None, I will set ['mtime'] to the current
1188 time, and I will set ['ctime'] to the current time if there was not
1189 already a child by this name present. This roughly matches the
1190 ctime/mtime semantics of traditional filesystems. See the
1191 "About the metadata" section of webapi.txt for futher information.
1193 If this directory node is read-only, the Deferred will errback with a
1194 NotWriteableError."""
1196 def set_nodes(entries, overwrite=True):
1197 """Add multiple children to a directory node. Takes a dict mapping
1198 unicode childname to (child_node, metdata) tuples. If metdata=None,
1199 the original metadata is left unmodified. Returns a Deferred that
1200 fires (with this dirnode) when the operation finishes. This is
1201 equivalent to calling set_node() multiple times, but is much more
1204 def add_file(name, uploadable, metadata=None, overwrite=True):
1205 """I upload a file (using the given IUploadable), then attach the
1206 resulting ImmutableFileNode to the directory at the given name. I set
1207 metadata the same way as set_uri and set_node. The child name must be
1210 I return a Deferred that fires (with the IFileNode of the uploaded
1211 file) when the operation completes."""
1213 def delete(name, must_exist=True, must_be_directory=False, must_be_file=False):
1214 """I remove the child at the specific name. I return a Deferred that
1215 fires when the operation finishes. The child name must be a unicode
1216 string. If must_exist is True and I do not have a child by that name,
1217 I raise NoSuchChildError. If must_be_directory is True and the child
1218 is a file, or if must_be_file is True and the child is a directory,
1219 I raise ChildOfWrongTypeError."""
1221 def create_subdirectory(name, initial_children={}, overwrite=True, metadata=None):
1222 """I create and attach a directory at the given name. The new
1223 directory can be empty, or it can be populated with children
1224 according to 'initial_children', which takes a dictionary in the same
1225 format as set_nodes (i.e. mapping unicode child name to (childnode,
1226 metadata) tuples). The child name must be a unicode string. I return
1227 a Deferred that fires (with the new directory node) when the
1228 operation finishes."""
1230 def move_child_to(current_child_name, new_parent, new_child_name=None,
1232 """I take one of my children and move them to a new parent. The child
1233 is referenced by name. On the new parent, the child will live under
1234 'new_child_name', which defaults to 'current_child_name'. TODO: what
1235 should we do about metadata? I return a Deferred that fires when the
1236 operation finishes. The child name must be a unicode string. I raise
1237 NoSuchChildError if I do not have a child by that name."""
1239 def build_manifest():
1240 """I generate a table of everything reachable from this directory.
1241 I also compute deep-stats as described below.
1243 I return a Monitor. The Monitor's results will be a dictionary with
1246 res['manifest']: a list of (path, cap) tuples for all nodes
1247 (directories and files) reachable from this one.
1248 'path' will be a tuple of unicode strings. The
1249 origin dirnode will be represented by an empty path
1251 res['verifycaps']: a list of (printable) verifycap strings, one for
1252 each reachable non-LIT node. This is a set:
1253 it will contain no duplicates.
1254 res['storage-index']: a list of (base32) storage index strings,
1255 one for each reachable non-LIT node. This is
1256 a set: it will contain no duplicates.
1257 res['stats']: a dictionary, the same that is generated by
1258 start_deep_stats() below.
1260 The Monitor will also have an .origin_si attribute with the (binary)
1261 storage index of the starting point.
1264 def start_deep_stats():
1265 """Return a Monitor, examining all nodes (directories and files)
1266 reachable from this one. The Monitor's results will be a dictionary
1267 with the following keys::
1269 count-immutable-files: count of how many CHK files are in the set
1270 count-mutable-files: same, for mutable files (does not include
1272 count-literal-files: same, for LIT files
1273 count-files: sum of the above three
1275 count-directories: count of directories
1277 size-immutable-files: total bytes for all CHK files in the set
1278 size-mutable-files (TODO): same, for current version of all mutable
1279 files, does not include directories
1280 size-literal-files: same, for LIT files
1281 size-directories: size of mutable files used by directories
1283 largest-directory: number of bytes in the largest directory
1284 largest-directory-children: number of children in the largest
1286 largest-immutable-file: number of bytes in the largest CHK file
1288 size-mutable-files is not yet implemented, because it would involve
1289 even more queries than deep_stats does.
1291 The Monitor will also have an .origin_si attribute with the (binary)
1292 storage index of the starting point.
1294 This operation will visit every directory node underneath this one,
1295 and can take a long time to run. On a typical workstation with good
1296 bandwidth, this can examine roughly 15 directories per second (and
1297 takes several minutes of 100% CPU for ~1700 directories).
1300 class ICodecEncoder(Interface):
1301 def set_params(data_size, required_shares, max_shares):
1302 """Set up the parameters of this encoder.
1304 This prepares the encoder to perform an operation that converts a
1305 single block of data into a number of shares, such that a future
1306 ICodecDecoder can use a subset of these shares to recover the
1307 original data. This operation is invoked by calling encode(). Once
1308 the encoding parameters are set up, the encode operation can be
1309 invoked multiple times.
1311 set_params() prepares the encoder to accept blocks of input data that
1312 are exactly 'data_size' bytes in length. The encoder will be prepared
1313 to produce 'max_shares' shares for each encode() operation (although
1314 see the 'desired_share_ids' to use less CPU). The encoding math will
1315 be chosen such that the decoder can get by with as few as
1316 'required_shares' of these shares and still reproduce the original
1317 data. For example, set_params(1000, 5, 5) offers no redundancy at
1318 all, whereas set_params(1000, 1, 10) provides 10x redundancy.
1320 Numerical Restrictions: 'data_size' is required to be an integral
1321 multiple of 'required_shares'. In general, the caller should choose
1322 required_shares and max_shares based upon their reliability
1323 requirements and the number of peers available (the total storage
1324 space used is roughly equal to max_shares*data_size/required_shares),
1325 then choose data_size to achieve the memory footprint desired (larger
1326 data_size means more efficient operation, smaller data_size means
1327 smaller memory footprint).
1329 In addition, 'max_shares' must be equal to or greater than
1330 'required_shares'. Of course, setting them to be equal causes
1331 encode() to degenerate into a particularly slow form of the 'split'
1334 See encode() for more details about how these parameters are used.
1336 set_params() must be called before any other ICodecEncoder methods
1341 """Return the 3-tuple of data_size, required_shares, max_shares"""
1343 def get_encoder_type():
1344 """Return a short string that describes the type of this encoder.
1346 There is required to be a global table of encoder classes. This method
1347 returns an index into this table; the value at this index is an
1348 encoder class, and this encoder is an instance of that class.
1351 def get_block_size():
1352 """Return the length of the shares that encode() will produce.
1355 def encode_proposal(data, desired_share_ids=None):
1356 """Encode some data.
1358 'data' must be a string (or other buffer object), and len(data) must
1359 be equal to the 'data_size' value passed earlier to set_params().
1361 This will return a Deferred that will fire with two lists. The first
1362 is a list of shares, each of which is a string (or other buffer
1363 object) such that len(share) is the same as what get_share_size()
1364 returned earlier. The second is a list of shareids, in which each is
1365 an integer. The lengths of the two lists will always be equal to each
1366 other. The user should take care to keep each share closely
1367 associated with its shareid, as one is useless without the other.
1369 The length of this output list will normally be the same as the value
1370 provided to the 'max_shares' parameter of set_params(). This may be
1371 different if 'desired_share_ids' is provided.
1373 'desired_share_ids', if provided, is required to be a sequence of
1374 ints, each of which is required to be >= 0 and < max_shares. If not
1375 provided, encode() will produce 'max_shares' shares, as if
1376 'desired_share_ids' were set to range(max_shares). You might use this
1377 if you initially thought you were going to use 10 peers, started
1378 encoding, and then two of the peers dropped out: you could use
1379 desired_share_ids= to skip the work (both memory and CPU) of
1380 producing shares for the peers which are no longer available.
1384 def encode(inshares, desired_share_ids=None):
1385 """Encode some data. This may be called multiple times. Each call is
1388 inshares is a sequence of length required_shares, containing buffers
1389 (i.e. strings), where each buffer contains the next contiguous
1390 non-overlapping segment of the input data. Each buffer is required to
1391 be the same length, and the sum of the lengths of the buffers is
1392 required to be exactly the data_size promised by set_params(). (This
1393 implies that the data has to be padded before being passed to
1394 encode(), unless of course it already happens to be an even multiple
1395 of required_shares in length.)
1397 Note: the requirement to break up your data into
1398 'required_shares' chunks of exactly the right length before
1399 calling encode() is surprising from point of view of a user
1400 who doesn't know how FEC works. It feels like an
1401 implementation detail that has leaked outside the abstraction
1402 barrier. Is there a use case in which the data to be encoded
1403 might already be available in pre-segmented chunks, such that
1404 it is faster or less work to make encode() take a list rather
1405 than splitting a single string?
1407 Yes, there is: suppose you are uploading a file with K=64,
1408 N=128, segsize=262,144. Then each in-share will be of size
1409 4096. If you use this .encode() API then your code could first
1410 read each successive 4096-byte chunk from the file and store
1411 each one in a Python string and store each such Python string
1412 in a Python list. Then you could call .encode(), passing that
1413 list as "inshares". The encoder would generate the other 64
1414 "secondary shares" and return to you a new list containing
1415 references to the same 64 Python strings that you passed in
1416 (as the primary shares) plus references to the new 64 Python
1419 (You could even imagine that your code could use readv() so
1420 that the operating system can arrange to get all of those
1421 bytes copied from the file into the Python list of Python
1422 strings as efficiently as possible instead of having a loop
1423 written in C or in Python to copy the next part of the file
1424 into the next string.)
1426 On the other hand if you instead use the .encode_proposal()
1427 API (above), then your code can first read in all of the
1428 262,144 bytes of the segment from the file into a Python
1429 string, then call .encode_proposal() passing the segment data
1430 as the "data" argument. The encoder would basically first
1431 split the "data" argument into a list of 64 in-shares of 4096
1432 byte each, and then do the same thing that .encode() does. So
1433 this would result in a little bit more copying of data and a
1434 little bit higher of a "maximum memory usage" during the
1435 process, although it might or might not make a practical
1436 difference for our current use cases.
1438 Note that "inshares" is a strange name for the parameter if
1439 you think of the parameter as being just for feeding in data
1440 to the codec. It makes more sense if you think of the result
1441 of this encoding as being the set of shares from inshares plus
1442 an extra set of "secondary shares" (or "check shares"). It is
1443 a surprising name! If the API is going to be surprising then
1444 the name should be surprising. If we switch to
1445 encode_proposal() above then we should also switch to an
1448 'desired_share_ids', if provided, is required to be a sequence of
1449 ints, each of which is required to be >= 0 and < max_shares. If not
1450 provided, encode() will produce 'max_shares' shares, as if
1451 'desired_share_ids' were set to range(max_shares). You might use this
1452 if you initially thought you were going to use 10 peers, started
1453 encoding, and then two of the peers dropped out: you could use
1454 desired_share_ids= to skip the work (both memory and CPU) of
1455 producing shares for the peers which are no longer available.
1457 For each call, encode() will return a Deferred that fires with two
1458 lists, one containing shares and the other containing the shareids.
1459 The get_share_size() method can be used to determine the length of
1460 the share strings returned by encode(). Each shareid is a small
1461 integer, exactly as passed into 'desired_share_ids' (or
1462 range(max_shares), if desired_share_ids was not provided).
1464 The shares and their corresponding shareids are required to be kept
1465 together during storage and retrieval. Specifically, the share data is
1466 useless by itself: the decoder needs to be told which share is which
1467 by providing it with both the shareid and the actual share data.
1469 This function will allocate an amount of memory roughly equal to::
1471 (max_shares - required_shares) * get_share_size()
1473 When combined with the memory that the caller must allocate to
1474 provide the input data, this leads to a memory footprint roughly
1475 equal to the size of the resulting encoded shares (i.e. the expansion
1476 factor times the size of the input segment).
1481 # returning a list of (shareidN,shareN) tuples instead of a pair of
1482 # lists (shareids..,shares..). Brian thought the tuples would
1483 # encourage users to keep the share and shareid together throughout
1484 # later processing, Zooko pointed out that the code to iterate
1485 # through two lists is not really more complicated than using a list
1486 # of tuples and there's also a performance improvement
1488 # having 'data_size' not required to be an integral multiple of
1489 # 'required_shares'. Doing this would require encode() to perform
1490 # padding internally, and we'd prefer to have any padding be done
1491 # explicitly by the caller. Yes, it is an abstraction leak, but
1492 # hopefully not an onerous one.
1495 class ICodecDecoder(Interface):
1496 def set_params(data_size, required_shares, max_shares):
1497 """Set the params. They have to be exactly the same ones that were
1498 used for encoding."""
1500 def get_needed_shares():
1501 """Return the number of shares needed to reconstruct the data.
1502 set_params() is required to be called before this."""
1504 def decode(some_shares, their_shareids):
1505 """Decode a partial list of shares into data.
1507 'some_shares' is required to be a sequence of buffers of sharedata, a
1508 subset of the shares returned by ICodecEncode.encode(). Each share is
1509 required to be of the same length. The i'th element of their_shareids
1510 is required to be the shareid of the i'th buffer in some_shares.
1512 This returns a Deferred which fires with a sequence of buffers. This
1513 sequence will contain all of the segments of the original data, in
1514 order. The sum of the lengths of all of the buffers will be the
1515 'data_size' value passed into the original ICodecEncode.set_params()
1516 call. To get back the single original input block of data, use
1517 ''.join(output_buffers), or you may wish to simply write them in
1518 order to an output file.
1520 Note that some of the elements in the result sequence may be
1521 references to the elements of the some_shares input sequence. In
1522 particular, this means that if those share objects are mutable (e.g.
1523 arrays) and if they are changed, then both the input (the
1524 'some_shares' parameter) and the output (the value given when the
1525 deferred is triggered) will change.
1527 The length of 'some_shares' is required to be exactly the value of
1528 'required_shares' passed into the original ICodecEncode.set_params()
1532 class IEncoder(Interface):
1533 """I take an object that provides IEncryptedUploadable, which provides
1534 encrypted data, and a list of shareholders. I then encode, hash, and
1535 deliver shares to those shareholders. I will compute all the necessary
1536 Merkle hash trees that are necessary to validate the crypttext that
1537 eventually comes back from the shareholders. I provide the URI Extension
1538 Block Hash, and the encoding parameters, both of which must be included
1541 I do not choose shareholders, that is left to the IUploader. I must be
1542 given a dict of RemoteReferences to storage buckets that are ready and
1543 willing to receive data.
1547 """Specify the number of bytes that will be encoded. This must be
1548 peformed before get_serialized_params() can be called.
1550 def set_params(params):
1551 """Override the default encoding parameters. 'params' is a tuple of
1552 (k,d,n), where 'k' is the number of required shares, 'd' is the
1553 servers_of_happiness, and 'n' is the total number of shares that will
1556 Encoding parameters can be set in three ways. 1: The Encoder class
1557 provides defaults (3/7/10). 2: the Encoder can be constructed with
1558 an 'options' dictionary, in which the
1559 needed_and_happy_and_total_shares' key can be a (k,d,n) tuple. 3:
1560 set_params((k,d,n)) can be called.
1562 If you intend to use set_params(), you must call it before
1563 get_share_size or get_param are called.
1566 def set_encrypted_uploadable(u):
1567 """Provide a source of encrypted upload data. 'u' must implement
1568 IEncryptedUploadable.
1570 When this is called, the IEncryptedUploadable will be queried for its
1571 length and the storage_index that should be used.
1573 This returns a Deferred that fires with this Encoder instance.
1575 This must be performed before start() can be called.
1578 def get_param(name):
1579 """Return an encoding parameter, by name.
1581 'storage_index': return a string with the (16-byte truncated SHA-256
1582 hash) storage index to which these shares should be
1585 'share_counts': return a tuple describing how many shares are used:
1586 (needed_shares, servers_of_happiness, total_shares)
1588 'num_segments': return an int with the number of segments that
1591 'segment_size': return an int with the size of each segment.
1593 'block_size': return the size of the individual blocks that will
1594 be delivered to a shareholder's put_block() method. By
1595 knowing this, the shareholder will be able to keep all
1596 blocks in a single file and still provide random access
1597 when reading them. # TODO: can we avoid exposing this?
1599 'share_size': an int with the size of the data that will be stored
1600 on each shareholder. This is aggregate amount of data
1601 that will be sent to the shareholder, summed over all
1602 the put_block() calls I will ever make. It is useful to
1603 determine this size before asking potential
1604 shareholders whether they will grant a lease or not,
1605 since their answers will depend upon how much space we
1606 need. TODO: this might also include some amount of
1607 overhead, like the size of all the hashes. We need to
1608 decide whether this is useful or not.
1610 'serialized_params': a string with a concise description of the
1611 codec name and its parameters. This may be passed
1612 into the IUploadable to let it make sure that
1613 the same file encoded with different parameters
1614 will result in different storage indexes.
1616 Once this is called, set_size() and set_params() may not be called.
1619 def set_shareholders(shareholders, servermap):
1620 """Tell the encoder where to put the encoded shares. 'shareholders'
1621 must be a dictionary that maps share number (an integer ranging from
1622 0 to n-1) to an instance that provides IStorageBucketWriter.
1623 'servermap' is a dictionary that maps share number (as defined above)
1624 to a set of peerids. This must be performed before start() can be
1628 """Begin the encode/upload process. This involves reading encrypted
1629 data from the IEncryptedUploadable, encoding it, uploading the shares
1630 to the shareholders, then sending the hash trees.
1632 set_encrypted_uploadable() and set_shareholders() must be called
1633 before this can be invoked.
1635 This returns a Deferred that fires with a verify cap when the upload
1636 process is complete. The verifycap, plus the encryption key, is
1637 sufficient to construct the read cap.
1640 class IDecoder(Interface):
1641 """I take a list of shareholders and some setup information, then
1642 download, validate, decode, and decrypt data from them, writing the
1643 results to an output file.
1645 I do not locate the shareholders, that is left to the IDownloader. I must
1646 be given a dict of RemoteReferences to storage buckets that are ready to
1651 """I take a file-like object (providing write and close) to which all
1652 the plaintext data will be written.
1654 TODO: producer/consumer . Maybe write() should return a Deferred that
1655 indicates when it will accept more data? But probably having the
1656 IDecoder be a producer is easier to glue to IConsumer pieces.
1659 def set_shareholders(shareholders):
1660 """I take a dictionary that maps share identifiers (small integers)
1661 to RemoteReferences that provide RIBucketReader. This must be called
1665 """I start the download. This process involves retrieving data and
1666 hash chains from the shareholders, using the hashes to validate the
1667 data, decoding the shares into segments, decrypting the segments,
1668 then writing the resulting plaintext to the output file.
1670 I return a Deferred that will fire (with self) when the download is
1674 class IDownloadTarget(Interface):
1675 # Note that if the IDownloadTarget is also an IConsumer, the downloader
1676 # will register itself as a producer. This allows the target to invoke
1677 # downloader.pauseProducing, resumeProducing, and stopProducing.
1679 """Called before any calls to write() or close(). If an error
1680 occurs before any data is available, fail() may be called without
1681 a previous call to open().
1683 'size' is the length of the file being downloaded, in bytes."""
1686 """Output some data to the target."""
1688 """Inform the target that there is no more data to be written."""
1690 """fail() is called to indicate that the download has failed. 'why'
1691 is a Failure object indicating what went wrong. No further methods
1692 will be invoked on the IDownloadTarget after fail()."""
1693 def register_canceller(cb):
1694 """The CiphertextDownloader uses this to register a no-argument function
1695 that the target can call to cancel the download. Once this canceller
1696 is invoked, no further calls to write() or close() will be made."""
1698 """When the CiphertextDownloader is done, this finish() function will be
1699 called. Whatever it returns will be returned to the invoker of
1700 Downloader.download.
1703 class IDownloader(Interface):
1704 def download(uri, target):
1705 """Perform a CHK download, sending the data to the given target.
1706 'target' must provide IDownloadTarget.
1708 Returns a Deferred that fires (with the results of target.finish)
1709 when the download is finished, or errbacks if something went wrong."""
1711 class IEncryptedUploadable(Interface):
1712 def set_upload_status(upload_status):
1713 """Provide an IUploadStatus object that should be filled with status
1714 information. The IEncryptedUploadable is responsible for setting
1715 key-determination progress ('chk'), size, storage_index, and
1716 ciphertext-fetch progress. It may delegate some of this
1717 responsibility to others, in particular to the IUploadable."""
1720 """This behaves just like IUploadable.get_size()."""
1722 def get_all_encoding_parameters():
1723 """Return a Deferred that fires with a tuple of
1724 (k,happy,n,segment_size). The segment_size will be used as-is, and
1725 must match the following constraints: it must be a multiple of k, and
1726 it shouldn't be unreasonably larger than the file size (if
1727 segment_size is larger than filesize, the difference must be stored
1730 This usually passes through to the IUploadable method of the same
1733 The encoder strictly obeys the values returned by this method. To
1734 make an upload use non-default encoding parameters, you must arrange
1735 to control the values that this method returns.
1738 def get_storage_index():
1739 """Return a Deferred that fires with a 16-byte storage index.
1742 def read_encrypted(length, hash_only):
1743 """This behaves just like IUploadable.read(), but returns crypttext
1744 instead of plaintext. If hash_only is True, then this discards the
1745 data (and returns an empty list); this improves efficiency when
1746 resuming an interrupted upload (where we need to compute the
1747 plaintext hashes, but don't need the redundant encrypted data)."""
1749 def get_plaintext_hashtree_leaves(first, last, num_segments):
1750 """OBSOLETE; Get the leaf nodes of a merkle hash tree over the
1751 plaintext segments, i.e. get the tagged hashes of the given segments.
1752 The segment size is expected to be generated by the
1753 IEncryptedUploadable before any plaintext is read or ciphertext
1754 produced, so that the segment hashes can be generated with only a
1757 This returns a Deferred which fires with a sequence of hashes, using:
1759 tuple(segment_hashes[first:last])
1761 'num_segments' is used to assert that the number of segments that the
1762 IEncryptedUploadable handled matches the number of segments that the
1763 encoder was expecting.
1765 This method must not be called until the final byte has been read
1766 from read_encrypted(). Once this method is called, read_encrypted()
1767 can never be called again.
1770 def get_plaintext_hash():
1771 """OBSOLETE; Get the hash of the whole plaintext.
1773 This returns a Deferred which fires with a tagged SHA-256 hash of the
1774 whole plaintext, obtained from hashutil.plaintext_hash(data).
1778 """Just like IUploadable.close()."""
1780 class IUploadable(Interface):
1781 def set_upload_status(upload_status):
1782 """Provide an IUploadStatus object that should be filled with status
1783 information. The IUploadable is responsible for setting
1784 key-determination progress ('chk')."""
1786 def set_default_encoding_parameters(params):
1787 """Set the default encoding parameters, which must be a dict mapping
1788 strings to ints. The meaningful keys are 'k', 'happy', 'n', and
1789 'max_segment_size'. These might have an influence on the final
1790 encoding parameters returned by get_all_encoding_parameters(), if the
1791 Uploadable doesn't have more specific preferences.
1793 This call is optional: if it is not used, the Uploadable will use
1794 some built-in defaults. If used, this method must be called before
1795 any other IUploadable methods to have any effect.
1799 """Return a Deferred that will fire with the length of the data to be
1800 uploaded, in bytes. This will be called before the data is actually
1801 used, to compute encoding parameters.
1804 def get_all_encoding_parameters():
1805 """Return a Deferred that fires with a tuple of
1806 (k,happy,n,segment_size). The segment_size will be used as-is, and
1807 must match the following constraints: it must be a multiple of k, and
1808 it shouldn't be unreasonably larger than the file size (if
1809 segment_size is larger than filesize, the difference must be stored
1812 The relative values of k and n allow some IUploadables to request
1813 better redundancy than others (in exchange for consuming more space
1816 Larger values of segment_size reduce hash overhead, while smaller
1817 values reduce memory footprint and cause data to be delivered in
1818 smaller pieces (which may provide a smoother and more predictable
1819 download experience).
1821 The encoder strictly obeys the values returned by this method. To
1822 make an upload use non-default encoding parameters, you must arrange
1823 to control the values that this method returns. One way to influence
1824 them may be to call set_encoding_parameters() before calling
1825 get_all_encoding_parameters().
1828 def get_encryption_key():
1829 """Return a Deferred that fires with a 16-byte AES key. This key will
1830 be used to encrypt the data. The key will also be hashed to derive
1833 Uploadables which want to achieve convergence should hash their file
1834 contents and the serialized_encoding_parameters to form the key
1835 (which of course requires a full pass over the data). Uploadables can
1836 use the upload.ConvergentUploadMixin class to achieve this
1839 Uploadables which do not care about convergence (or do not wish to
1840 make multiple passes over the data) can simply return a
1841 strongly-random 16 byte string.
1843 get_encryption_key() may be called multiple times: the IUploadable is
1844 required to return the same value each time.
1848 """Return a Deferred that fires with a list of strings (perhaps with
1849 only a single element) which, when concatenated together, contain the
1850 next 'length' bytes of data. If EOF is near, this may provide fewer
1851 than 'length' bytes. The total number of bytes provided by read()
1852 before it signals EOF must equal the size provided by get_size().
1854 If the data must be acquired through multiple internal read
1855 operations, returning a list instead of a single string may help to
1856 reduce string copies. However, the length of the concatenated strings
1857 must equal the amount of data requested, unless EOF is encountered.
1858 Long reads, or short reads without EOF, are not allowed. read()
1859 should return the same amount of data as a local disk file read, just
1860 in a different shape and asynchronously.
1862 'length' will typically be equal to (min(get_size(),1MB)/req_shares),
1863 so a 10kB file means length=3kB, 100kB file means length=30kB,
1864 and >=1MB file means length=300kB.
1866 This method provides for a single full pass through the data. Later
1867 use cases may desire multiple passes or access to only parts of the
1868 data (such as a mutable file making small edits-in-place). This API
1869 will be expanded once those use cases are better understood.
1873 """The upload is finished, and whatever filehandle was in use may be
1877 class IMutableUploadable(Interface):
1879 I represent content that is due to be uploaded to a mutable filecap.
1881 # This is somewhat simpler than the IUploadable interface above
1882 # because mutable files do not need to be concerned with possibly
1883 # generating a CHK, nor with per-file keys. It is a subset of the
1884 # methods in IUploadable, though, so we could just as well implement
1885 # the mutable uploadables as IUploadables that don't happen to use
1886 # those methods (with the understanding that the unused methods will
1887 # never be called on such objects)
1890 Returns a Deferred that fires with the size of the content held
1896 Returns a list of strings which, when concatenated, are the next
1897 length bytes of the file, or fewer if there are fewer bytes
1898 between the current location and the end of the file.
1903 The process that used the Uploadable is finished using it, so
1904 the uploadable may be closed.
1907 class IUploadResults(Interface):
1908 """I am returned by upload() methods. I contain a number of public
1909 attributes which can be read to determine the results of the upload. Some
1910 of these are functional, some are timing information. All of these may be
1913 .file_size : the size of the file, in bytes
1914 .uri : the CHK read-cap for the file
1915 .ciphertext_fetched : how many bytes were fetched by the helper
1916 .sharemap: dict mapping share identifier to set of serverids
1917 (binary strings). This indicates which servers were given
1918 which shares. For immutable files, the shareid is an
1919 integer (the share number, from 0 to N-1). For mutable
1920 files, it is a string of the form 'seq%d-%s-sh%d',
1921 containing the sequence number, the roothash, and the
1923 .servermap : dict mapping server peerid to a set of share numbers
1924 .timings : dict of timing information, mapping name to seconds (float)
1925 total : total upload time, start to finish
1926 storage_index : time to compute the storage index
1927 peer_selection : time to decide which peers will be used
1928 contacting_helper : initial helper query to upload/no-upload decision
1929 existence_check : helper pre-upload existence check
1930 helper_total : initial helper query to helper finished pushing
1931 cumulative_fetch : helper waiting for ciphertext requests
1932 total_fetch : helper start to last ciphertext response
1933 cumulative_encoding : just time spent in zfec
1934 cumulative_sending : just time spent waiting for storage servers
1935 hashes_and_close : last segment push to shareholder close
1936 total_encode_and_push : first encode to shareholder close
1940 class IDownloadResults(Interface):
1941 """I am created internally by download() methods. I contain a number of
1942 public attributes which contain details about the download process.::
1944 .file_size : the size of the file, in bytes
1945 .servers_used : set of server peerids that were used during download
1946 .server_problems : dict mapping server peerid to a problem string. Only
1947 servers that had problems (bad hashes, disconnects)
1949 .servermap : dict mapping server peerid to a set of share numbers. Only
1950 servers that had any shares are listed here.
1951 .timings : dict of timing information, mapping name to seconds (float)
1952 peer_selection : time to ask servers about shares
1953 servers_peer_selection : dict of peerid to DYHB-query time
1954 uri_extension : time to fetch a copy of the URI extension block
1955 hashtrees : time to fetch the hash trees
1956 segments : time to fetch, decode, and deliver segments
1957 cumulative_fetch : time spent waiting for storage servers
1958 cumulative_decode : just time spent in zfec
1959 cumulative_decrypt : just time spent in decryption
1960 total : total download time, start to finish
1961 fetch_per_server : dict of peerid to list of per-segment fetch times
1965 class IUploader(Interface):
1966 def upload(uploadable):
1967 """Upload the file. 'uploadable' must impement IUploadable. This
1968 returns a Deferred which fires with an IUploadResults instance, from
1969 which the URI of the file can be obtained as results.uri ."""
1971 def upload_ssk(write_capability, new_version, uploadable):
1972 """TODO: how should this work?"""
1974 class ICheckable(Interface):
1975 def check(monitor, verify=False, add_lease=False):
1976 """Check up on my health, optionally repairing any problems.
1978 This returns a Deferred that fires with an instance that provides
1979 ICheckResults, or None if the object is non-distributed (i.e. LIT
1982 The monitor will be checked periodically to see if the operation has
1983 been cancelled. If so, no new queries will be sent, and the Deferred
1984 will fire (with a OperationCancelledError) immediately.
1986 Filenodes and dirnodes (which provide IFilesystemNode) are also
1987 checkable. Instances that represent verifier-caps will be checkable
1988 but not downloadable. Some objects (like LIT files) do not actually
1989 live in the grid, and their checkers return None (non-distributed
1990 files are always healthy).
1992 If verify=False, a relatively lightweight check will be performed: I
1993 will ask all servers if they have a share for me, and I will believe
1994 whatever they say. If there are at least N distinct shares on the
1995 grid, my results will indicate r.is_healthy()==True. This requires a
1996 roundtrip to each server, but does not transfer very much data, so
1997 the network bandwidth is fairly low.
1999 If verify=True, a more resource-intensive check will be performed:
2000 every share will be downloaded, and the hashes will be validated on
2001 every bit. I will ignore any shares that failed their hash checks. If
2002 there are at least N distinct valid shares on the grid, my results
2003 will indicate r.is_healthy()==True. This requires N/k times as much
2004 download bandwidth (and server disk IO) as a regular download. If a
2005 storage server is holding a corrupt share, or is experiencing memory
2006 failures during retrieval, or is malicious or buggy, then
2007 verification will detect the problem, but checking will not.
2009 If add_lease=True, I will ensure that an up-to-date lease is present
2010 on each share. The lease secrets will be derived from by node secret
2011 (in BASEDIR/private/secret), so either I will add a new lease to the
2012 share, or I will merely renew the lease that I already had. In a
2013 future version of the storage-server protocol (once Accounting has
2014 been implemented), there may be additional options here to define the
2015 kind of lease that is obtained (which account number to claim, etc).
2017 TODO: any problems seen during checking will be reported to the
2018 health-manager.furl, a centralized object which is responsible for
2019 figuring out why files are unhealthy so corrective action can be
2023 def check_and_repair(monitor, verify=False, add_lease=False):
2024 """Like check(), but if the file/directory is not healthy, attempt to
2027 Any non-healthy result will cause an immediate repair operation, to
2028 generate and upload new shares. After repair, the file will be as
2029 healthy as we can make it. Details about what sort of repair is done
2030 will be put in the check-and-repair results. The Deferred will not
2031 fire until the repair is complete.
2033 This returns a Deferred which fires with an instance of
2034 ICheckAndRepairResults."""
2036 class IDeepCheckable(Interface):
2037 def start_deep_check(verify=False, add_lease=False):
2038 """Check upon the health of me and everything I can reach.
2040 This is a recursive form of check(), useable only on dirnodes.
2042 I return a Monitor, with results that are an IDeepCheckResults
2045 TODO: If any of the directories I traverse are unrecoverable, the
2046 Monitor will report failure. If any of the files I check upon are
2047 unrecoverable, those problems will be reported in the
2048 IDeepCheckResults as usual, and the Monitor will not report a
2052 def start_deep_check_and_repair(verify=False, add_lease=False):
2053 """Check upon the health of me and everything I can reach. Repair
2054 anything that isn't healthy.
2056 This is a recursive form of check_and_repair(), useable only on
2059 I return a Monitor, with results that are an
2060 IDeepCheckAndRepairResults object.
2062 TODO: If any of the directories I traverse are unrecoverable, the
2063 Monitor will report failure. If any of the files I check upon are
2064 unrecoverable, those problems will be reported in the
2065 IDeepCheckResults as usual, and the Monitor will not report a
2069 class ICheckResults(Interface):
2070 """I contain the detailed results of a check/verify operation.
2073 def get_storage_index():
2074 """Return a string with the (binary) storage index."""
2075 def get_storage_index_string():
2076 """Return a string with the (printable) abbreviated storage index."""
2078 """Return the (string) URI of the object that was checked."""
2081 """Return a boolean, True if the file/dir is fully healthy, False if
2082 it is damaged in any way. Non-distributed LIT files always return
2085 def is_recoverable():
2086 """Return a boolean, True if the file/dir can be recovered, False if
2087 not. Unrecoverable files are obviously unhealthy. Non-distributed LIT
2088 files always return True."""
2090 def needs_rebalancing():
2091 """Return a boolean, True if the file/dir's reliability could be
2092 improved by moving shares to new servers. Non-distributed LIT files
2093 always return False."""
2097 """Return a dictionary that describes the state of the file/dir. LIT
2098 files always return an empty dictionary. Normal files and directories
2099 return a dictionary with the following keys (note that these use
2100 binary strings rather than base32-encoded ones) (also note that for
2101 mutable files, these counts are for the 'best' version):
2103 count-shares-good: the number of distinct good shares that were found
2104 count-shares-needed: 'k', the number of shares required for recovery
2105 count-shares-expected: 'N', the number of total shares generated
2106 count-good-share-hosts: the number of distinct storage servers with
2107 good shares. If this number is less than
2108 count-shares-good, then some shares are
2109 doubled up, increasing the correlation of
2110 failures. This indicates that one or more
2111 shares should be moved to an otherwise unused
2112 server, if one is available.
2113 count-corrupt-shares: the number of shares with integrity failures
2114 list-corrupt-shares: a list of 'share locators', one for each share
2115 that was found to be corrupt. Each share
2116 locator is a list of (serverid, storage_index,
2118 count-incompatible-shares: the number of shares which are of a share
2119 format unknown to this checker
2120 list-incompatible-shares: a list of 'share locators', one for each
2121 share that was found to be of an unknown
2122 format. Each share locator is a list of
2123 (serverid, storage_index, sharenum).
2124 servers-responding: list of (binary) storage server identifiers,
2125 one for each server which responded to the share
2126 query (even if they said they didn't have
2127 shares, and even if they said they did have
2128 shares but then didn't send them when asked, or
2129 dropped the connection, or returned a Failure,
2130 and even if they said they did have shares and
2131 sent incorrect ones when asked)
2132 sharemap: dict mapping share identifier to list of serverids
2133 (binary strings). This indicates which servers are holding
2134 which shares. For immutable files, the shareid is an
2135 integer (the share number, from 0 to N-1). For mutable
2136 files, it is a string of the form 'seq%d-%s-sh%d',
2137 containing the sequence number, the roothash, and the
2140 The following keys are most relevant for mutable files, but immutable
2141 files will provide sensible values too::
2143 count-wrong-shares: the number of shares for versions other than the
2144 'best' one (which is defined as being the
2145 recoverable version with the highest sequence
2146 number, then the highest roothash). These are
2147 either leftover shares from an older version
2148 (perhaps on a server that was offline when an
2149 update occurred), shares from an unrecoverable
2150 newer version, or shares from an alternate
2151 current version that results from an
2152 uncoordinated write collision. For a healthy
2153 file, this will equal 0.
2155 count-recoverable-versions: the number of recoverable versions of
2156 the file. For a healthy file, this will
2159 count-unrecoverable-versions: the number of unrecoverable versions
2160 of the file. For a healthy file, this
2166 """Return a string with a brief (one-line) summary of the results."""
2169 """Return a list of strings with more detailed results."""
2171 class ICheckAndRepairResults(Interface):
2172 """I contain the detailed results of a check/verify/repair operation.
2174 The IFilesystemNode.check()/verify()/repair() methods all return
2175 instances that provide ICheckAndRepairResults.
2178 def get_storage_index():
2179 """Return a string with the (binary) storage index."""
2180 def get_storage_index_string():
2181 """Return a string with the (printable) abbreviated storage index."""
2182 def get_repair_attempted():
2183 """Return a boolean, True if a repair was attempted. We might not
2184 attempt to repair the file because it was healthy, or healthy enough
2185 (i.e. some shares were missing but not enough to exceed some
2186 threshold), or because we don't know how to repair this object."""
2187 def get_repair_successful():
2188 """Return a boolean, True if repair was attempted and the file/dir
2189 was fully healthy afterwards. False if no repair was attempted or if
2190 a repair attempt failed."""
2191 def get_pre_repair_results():
2192 """Return an ICheckResults instance that describes the state of the
2193 file/dir before any repair was attempted."""
2194 def get_post_repair_results():
2195 """Return an ICheckResults instance that describes the state of the
2196 file/dir after any repair was attempted. If no repair was attempted,
2197 the pre-repair and post-repair results will be identical."""
2200 class IDeepCheckResults(Interface):
2201 """I contain the results of a deep-check operation.
2203 This is returned by a call to ICheckable.deep_check().
2206 def get_root_storage_index_string():
2207 """Return the storage index (abbreviated human-readable string) of
2208 the first object checked."""
2210 """Return a dictionary with the following keys::
2212 count-objects-checked: count of how many objects were checked
2213 count-objects-healthy: how many of those objects were completely
2215 count-objects-unhealthy: how many were damaged in some way
2216 count-objects-unrecoverable: how many were unrecoverable
2217 count-corrupt-shares: how many shares were found to have
2218 corruption, summed over all objects
2222 def get_corrupt_shares():
2223 """Return a set of (serverid, storage_index, sharenum) for all shares
2224 that were found to be corrupt. Both serverid and storage_index are
2227 def get_all_results():
2228 """Return a dictionary mapping pathname (a tuple of strings, ready to
2229 be slash-joined) to an ICheckResults instance, one for each object
2230 that was checked."""
2232 def get_results_for_storage_index(storage_index):
2233 """Retrive the ICheckResults instance for the given (binary)
2234 storage index. Raises KeyError if there are no results for that
2238 """Return a dictionary with the same keys as
2239 IDirectoryNode.deep_stats()."""
2241 class IDeepCheckAndRepairResults(Interface):
2242 """I contain the results of a deep-check-and-repair operation.
2244 This is returned by a call to ICheckable.deep_check_and_repair().
2247 def get_root_storage_index_string():
2248 """Return the storage index (abbreviated human-readable string) of
2249 the first object checked."""
2251 """Return a dictionary with the following keys::
2253 count-objects-checked: count of how many objects were checked
2254 count-objects-healthy-pre-repair: how many of those objects were
2255 completely healthy (before any
2257 count-objects-unhealthy-pre-repair: how many were damaged in
2259 count-objects-unrecoverable-pre-repair: how many were unrecoverable
2260 count-objects-healthy-post-repair: how many of those objects were
2261 completely healthy (after any
2263 count-objects-unhealthy-post-repair: how many were damaged in
2265 count-objects-unrecoverable-post-repair: how many were
2267 count-repairs-attempted: repairs were attempted on this many
2268 objects. The count-repairs- keys will
2269 always be provided, however unless
2270 repair=true is present, they will all
2272 count-repairs-successful: how many repairs resulted in healthy
2274 count-repairs-unsuccessful: how many repairs resulted did not
2275 results in completely healthy objects
2276 count-corrupt-shares-pre-repair: how many shares were found to
2277 have corruption, summed over all
2278 objects examined (before any
2280 count-corrupt-shares-post-repair: how many shares were found to
2281 have corruption, summed over all
2282 objects examined (after any
2287 """Return a dictionary with the same keys as
2288 IDirectoryNode.deep_stats()."""
2290 def get_corrupt_shares():
2291 """Return a set of (serverid, storage_index, sharenum) for all shares
2292 that were found to be corrupt before any repair was attempted. Both
2293 serverid and storage_index are binary.
2295 def get_remaining_corrupt_shares():
2296 """Return a set of (serverid, storage_index, sharenum) for all shares
2297 that were found to be corrupt after any repair was completed. Both
2298 serverid and storage_index are binary. These are shares that need
2299 manual inspection and probably deletion.
2301 def get_all_results():
2302 """Return a dictionary mapping pathname (a tuple of strings, ready to
2303 be slash-joined) to an ICheckAndRepairResults instance, one for each
2304 object that was checked."""
2306 def get_results_for_storage_index(storage_index):
2307 """Retrive the ICheckAndRepairResults instance for the given (binary)
2308 storage index. Raises KeyError if there are no results for that
2312 class IRepairable(Interface):
2313 def repair(check_results):
2314 """Attempt to repair the given object. Returns a Deferred that fires
2315 with a IRepairResults object.
2317 I must be called with an object that implements ICheckResults, as
2318 proof that you have actually discovered a problem with this file. I
2319 will use the data in the checker results to guide the repair process,
2320 such as which servers provided bad data and should therefore be
2321 avoided. The ICheckResults object is inside the
2322 ICheckAndRepairResults object, which is returned by the
2323 ICheckable.check() method::
2325 d = filenode.check(repair=False)
2326 def _got_results(check_and_repair_results):
2327 check_results = check_and_repair_results.get_pre_repair_results()
2328 return filenode.repair(check_results)
2329 d.addCallback(_got_results)
2333 class IRepairResults(Interface):
2334 """I contain the results of a repair operation."""
2335 def get_successful(self):
2336 """Returns a boolean: True if the repair made the file healthy, False
2337 if not. Repair failure generally indicates a file that has been
2338 damaged beyond repair."""
2341 class IClient(Interface):
2342 def upload(uploadable):
2343 """Upload some data into a CHK, get back the UploadResults for it.
2344 @param uploadable: something that implements IUploadable
2345 @return: a Deferred that fires with the UploadResults instance.
2346 To get the URI for this file, use results.uri .
2349 def create_mutable_file(contents=""):
2350 """Create a new mutable file (with initial) contents, get back the
2353 @param contents: (bytestring, callable, or None): this provides the
2354 initial contents of the mutable file. If 'contents' is a bytestring,
2355 it will be used as-is. If 'contents' is a callable, it will be
2356 invoked with the new MutableFileNode instance and is expected to
2357 return a bytestring with the initial contents of the file (the
2358 callable can use node.get_writekey() to decide how to encrypt the
2359 initial contents, e.g. for a brand new dirnode with initial
2360 children). contents=None is equivalent to an empty string. Using
2361 content_maker= is more efficient than creating a mutable file and
2362 setting its contents in two separate operations.
2364 @return: a Deferred that fires with an IMutableFileNode instance.
2367 def create_dirnode(initial_children={}):
2368 """Create a new unattached dirnode, possibly with initial children.
2370 @param initial_children: dict with keys that are unicode child names,
2371 and values that are (childnode, metadata) tuples.
2373 @return: a Deferred that fires with the new IDirectoryNode instance.
2376 def create_node_from_uri(uri, rouri):
2377 """Create a new IFilesystemNode instance from the uri, synchronously.
2378 @param uri: a string or IURI-providing instance, or None. This could
2379 be for a LiteralFileNode, a CHK file node, a mutable file
2380 node, or a directory node
2381 @param rouri: a string or IURI-providing instance, or None. If the
2382 main uri is None, I will use the rouri instead. If I
2383 recognize the format of the main uri, I will ignore the
2384 rouri (because it can be derived from the writecap).
2386 @return: an instance that provides IFilesystemNode (or more usefully
2387 one of its subclasses). File-specifying URIs will result in
2388 IFileNode-providing instances, like ImmutableFileNode,
2389 LiteralFileNode, or MutableFileNode. Directory-specifying
2390 URIs will result in IDirectoryNode-providing instances, like
2394 class INodeMaker(Interface):
2395 """The NodeMaker is used to create IFilesystemNode instances. It can
2396 accept a filecap/dircap string and return the node right away. It can
2397 also create new nodes (i.e. upload a file, or create a mutable file)
2398 asynchronously. Once you have one of these nodes, you can use other
2399 methods to determine whether it is a file or directory, and to download
2400 or modify its contents.
2402 The NodeMaker encapsulates all the authorities that these
2403 IFilesystemNodes require (like references to the StorageFarmBroker). Each
2404 Tahoe process will typically have a single NodeMaker, but unit tests may
2405 create simplified/mocked forms for testing purposes.
2407 def create_from_cap(writecap, readcap=None, **kwargs):
2408 """I create an IFilesystemNode from the given writecap/readcap. I can
2409 only provide nodes for existing file/directory objects: use my other
2410 methods to create new objects. I return synchronously."""
2412 def create_mutable_file(contents=None, keysize=None):
2413 """I create a new mutable file, and return a Deferred which will fire
2414 with the IMutableFileNode instance when it is ready. If contents= is
2415 provided (a bytestring), it will be used as the initial contents of
2416 the new file, otherwise the file will contain zero bytes. keysize= is
2417 for use by unit tests, to create mutable files that are smaller than
2420 def create_new_mutable_directory(initial_children={}):
2421 """I create a new mutable directory, and return a Deferred which will
2422 fire with the IDirectoryNode instance when it is ready. If
2423 initial_children= is provided (a dict mapping unicode child name to
2424 (childnode, metadata_dict) tuples), the directory will be populated
2425 with those children, otherwise it will be empty."""
2427 class IClientStatus(Interface):
2428 def list_all_uploads():
2429 """Return a list of uploader objects, one for each upload which
2430 currently has an object available (tracked with weakrefs). This is
2431 intended for debugging purposes."""
2432 def list_active_uploads():
2433 """Return a list of active IUploadStatus objects."""
2434 def list_recent_uploads():
2435 """Return a list of IUploadStatus objects for the most recently
2438 def list_all_downloads():
2439 """Return a list of downloader objects, one for each download which
2440 currently has an object available (tracked with weakrefs). This is
2441 intended for debugging purposes."""
2442 def list_active_downloads():
2443 """Return a list of active IDownloadStatus objects."""
2444 def list_recent_downloads():
2445 """Return a list of IDownloadStatus objects for the most recently
2446 started downloads."""
2448 class IUploadStatus(Interface):
2450 """Return a timestamp (float with seconds since epoch) indicating
2451 when the operation was started."""
2452 def get_storage_index():
2453 """Return a string with the (binary) storage index in use on this
2454 upload. Returns None if the storage index has not yet been
2457 """Return an integer with the number of bytes that will eventually
2458 be uploaded for this file. Returns None if the size is not yet known.
2461 """Return True if this upload is using a Helper, False if not."""
2463 """Return a string describing the current state of the upload
2466 """Returns a tuple of floats, (chk, ciphertext, encode_and_push),
2467 each from 0.0 to 1.0 . 'chk' describes how much progress has been
2468 made towards hashing the file to determine a CHK encryption key: if
2469 non-convergent encryption is in use, this will be trivial, otherwise
2470 the whole file must be hashed. 'ciphertext' describes how much of the
2471 ciphertext has been pushed to the helper, and is '1.0' for non-helper
2472 uploads. 'encode_and_push' describes how much of the encode-and-push
2473 process has finished: for helper uploads this is dependent upon the
2474 helper providing progress reports. It might be reasonable to add all
2475 three numbers and report the sum to the user."""
2477 """Return True if the upload is currently active, False if not."""
2479 """Return an instance of UploadResults (which contains timing and
2480 sharemap information). Might return None if the upload is not yet
2483 """Each upload status gets a unique number: this method returns that
2484 number. This provides a handle to this particular upload, so a web
2485 page can generate a suitable hyperlink."""
2487 class IDownloadStatus(Interface):
2489 """Return a timestamp (float with seconds since epoch) indicating
2490 when the operation was started."""
2491 def get_storage_index():
2492 """Return a string with the (binary) storage index in use on this
2493 download. This may be None if there is no storage index (i.e. LIT
2496 """Return an integer with the number of bytes that will eventually be
2497 retrieved for this file. Returns None if the size is not yet known.
2500 """Return True if this download is using a Helper, False if not."""
2502 """Return a string describing the current state of the download
2505 """Returns a float (from 0.0 to 1.0) describing the amount of the
2506 download that has completed. This value will remain at 0.0 until the
2507 first byte of plaintext is pushed to the download target."""
2509 """Return True if the download is currently active, False if not."""
2511 """Each download status gets a unique number: this method returns
2512 that number. This provides a handle to this particular download, so a
2513 web page can generate a suitable hyperlink."""
2515 class IServermapUpdaterStatus(Interface):
2517 class IPublishStatus(Interface):
2519 class IRetrieveStatus(Interface):
2522 class NotCapableError(Exception):
2523 """You have tried to write to a read-only node."""
2525 class BadWriteEnablerError(Exception):
2528 class RIControlClient(RemoteInterface):
2530 def wait_for_client_connections(num_clients=int):
2531 """Do not return until we have connections to at least NUM_CLIENTS
2535 def upload_from_file_to_uri(filename=str,
2536 convergence=ChoiceOf(None,
2537 StringConstraint(2**20))):
2538 """Upload a file to the grid. This accepts a filename (which must be
2539 absolute) that points to a file on the node's local disk. The node will
2540 read the contents of this file, upload it to the grid, then return the
2541 URI at which it was uploaded. If convergence is None then a random
2542 encryption key will be used, else the plaintext will be hashed, then
2543 that hash will be mixed together with the "convergence" string to form
2548 def download_from_uri_to_file(uri=URI, filename=str):
2549 """Download a file from the grid, placing it on the node's local disk
2550 at the given filename (which must be absolute[?]). Returns the
2551 absolute filename where the file was written."""
2556 def get_memory_usage():
2557 """Return a dict describes the amount of memory currently in use. The
2558 keys are 'VmPeak', 'VmSize', and 'VmData'. The values are integers,
2559 measuring memory consupmtion in bytes."""
2560 return DictOf(str, int)
2562 def speed_test(count=int, size=int, mutable=Any()):
2563 """Write 'count' tempfiles to disk, all of the given size. Measure
2564 how long (in seconds) it takes to upload them all to the servers.
2565 Then measure how long it takes to download all of them. If 'mutable'
2566 is 'create', time creation of mutable files. If 'mutable' is
2567 'upload', then time access to the same mutable file instead of
2570 Returns a tuple of (upload_time, download_time).
2572 return (float, float)
2574 def measure_peer_response_time():
2575 """Send a short message to each connected peer, and measure the time
2576 it takes for them to respond to it. This is a rough measure of the
2577 application-level round trip time.
2579 @return: a dictionary mapping peerid to a float (RTT time in seconds)
2582 return DictOf(str, float)
2584 UploadResults = Any() #DictOf(str, str)
2586 class RIEncryptedUploadable(RemoteInterface):
2587 __remote_name__ = "RIEncryptedUploadable.tahoe.allmydata.com"
2592 def get_all_encoding_parameters():
2593 return (int, int, int, long)
2595 def read_encrypted(offset=Offset, length=ReadSize):
2602 class RICHKUploadHelper(RemoteInterface):
2603 __remote_name__ = "RIUploadHelper.tahoe.allmydata.com"
2607 Return a dictionary of version information.
2609 return DictOf(str, Any())
2611 def upload(reader=RIEncryptedUploadable):
2612 return UploadResults
2615 class RIHelper(RemoteInterface):
2616 __remote_name__ = "RIHelper.tahoe.allmydata.com"
2620 Return a dictionary of version information.
2622 return DictOf(str, Any())
2624 def upload_chk(si=StorageIndex):
2625 """See if a file with a given storage index needs uploading. The
2626 helper will ask the appropriate storage servers to see if the file
2627 has already been uploaded. If so, the helper will return a set of
2628 'upload results' that includes whatever hashes are needed to build
2629 the read-cap, and perhaps a truncated sharemap.
2631 If the file has not yet been uploaded (or if it was only partially
2632 uploaded), the helper will return an empty upload-results dictionary
2633 and also an RICHKUploadHelper object that will take care of the
2634 upload process. The client should call upload() on this object and
2635 pass it a reference to an RIEncryptedUploadable object that will
2636 provide ciphertext. When the upload is finished, the upload() method
2637 will finish and return the upload results.
2639 return (UploadResults, ChoiceOf(RICHKUploadHelper, None))
2642 class RIStatsProvider(RemoteInterface):
2643 __remote_name__ = "RIStatsProvider.tahoe.allmydata.com"
2645 Provides access to statistics and monitoring information.
2650 returns a dictionary containing 'counters' and 'stats', each a
2651 dictionary with string counter/stat name keys, and numeric or None values.
2652 counters are monotonically increasing measures of work done, and
2653 stats are instantaneous measures (potentially time averaged
2656 return DictOf(str, DictOf(str, ChoiceOf(float, int, long, None)))
2658 class RIStatsGatherer(RemoteInterface):
2659 __remote_name__ = "RIStatsGatherer.tahoe.allmydata.com"
2661 Provides a monitoring service for centralised collection of stats
2664 def provide(provider=RIStatsProvider, nickname=str):
2666 @param provider: a stats collector instance which should be polled
2667 periodically by the gatherer to collect stats.
2668 @param nickname: a name useful to identify the provided client
2673 class IStatsProducer(Interface):
2676 returns a dictionary, with str keys representing the names of stats
2677 to be monitored, and numeric values.
2680 class RIKeyGenerator(RemoteInterface):
2681 __remote_name__ = "RIKeyGenerator.tahoe.allmydata.com"
2683 Provides a service offering to make RSA key pairs.
2686 def get_rsa_key_pair(key_size=int):
2688 @param key_size: the size of the signature key.
2689 @return: tuple(verifying_key, signing_key)
2691 return TupleOf(str, str)
2694 class FileTooLargeError(Exception):
2697 class IValidatedThingProxy(Interface):
2699 """ Acquire a thing and validate it. Return a deferred which is
2700 eventually fired with self if the thing is valid or errbacked if it
2701 can't be acquired or validated."""
2703 class InsufficientVersionError(Exception):
2704 def __init__(self, needed, got):
2705 self.needed = needed
2708 return "InsufficientVersionError(need '%s', got %s)" % (self.needed,
2711 class EmptyPathnameComponentError(Exception):
2712 """The webapi disallows empty pathname components."""