2 from zope.interface import Interface, Attribute
3 from foolscap.api import StringConstraint, ListOf, TupleOf, SetOf, DictOf, \
4 ChoiceOf, IntegerConstraint, Any, RemoteInterface, Referenceable
12 Hash = StringConstraint(maxLength=HASH_SIZE,
13 minLength=HASH_SIZE)# binary format 32-byte SHA256 hash
14 Nodeid = StringConstraint(maxLength=20,
15 minLength=20) # binary format 20-byte SHA1 hash
16 FURL = StringConstraint(1000)
17 StorageIndex = StringConstraint(16)
18 URI = StringConstraint(300) # kind of arbitrary
20 MAX_BUCKETS = 256 # per peer -- zfec offers at most 256 shares per file
22 DEFAULT_MAX_SEGMENT_SIZE = 128*1024
24 ShareData = StringConstraint(None)
25 URIExtensionData = StringConstraint(1000)
26 Number = IntegerConstraint(8) # 2**(8*8) == 16EiB ~= 18e18 ~= 18 exabytes
28 ReadSize = int # the 'int' constraint is 2**31 == 2Gib -- large files are processed in not-so-large increments
29 WriteEnablerSecret = Hash # used to protect mutable share modifications
30 LeaseRenewSecret = Hash # used to protect lease renewal requests
31 LeaseCancelSecret = Hash # was used to protect lease cancellation requests
34 class RIBucketWriter(RemoteInterface):
35 """ Objects of this kind live on the server side. """
36 def write(offset=Offset, data=ShareData):
41 If the data that has been written is incomplete or inconsistent then
42 the server will throw the data away, else it will store it for future
48 """Abandon all the data that has been written.
53 class RIBucketReader(RemoteInterface):
54 def read(offset=Offset, length=ReadSize):
57 def advise_corrupt_share(reason=str):
58 """Clients who discover hash failures in shares that they have
59 downloaded from me will use this method to inform me about the
60 failures. I will record their concern so that my operator can
61 manually inspect the shares in question. I return None.
63 This is a wrapper around RIStorageServer.advise_corrupt_share()
64 that is tied to a specific share, and therefore does not need the
65 extra share-identifying arguments. Please see that method for full
70 TestVector = ListOf(TupleOf(Offset, ReadSize, str, str))
71 # elements are (offset, length, operator, specimen)
72 # operator is one of "lt, le, eq, ne, ge, gt"
73 # nop always passes and is used to fetch data while writing.
74 # you should use length==len(specimen) for everything except nop
75 DataVector = ListOf(TupleOf(Offset, ShareData))
76 # (offset, data). This limits us to 30 writes of 1MiB each per call
77 TestAndWriteVectorsForShares = DictOf(int,
80 ChoiceOf(None, Offset), # new_length
82 ReadVector = ListOf(TupleOf(Offset, ReadSize))
83 ReadData = ListOf(ShareData)
84 # returns data[offset:offset+length] for each element of TestVector
87 class RIStorageServer(RemoteInterface):
88 __remote_name__ = "RIStorageServer.tahoe.allmydata.com"
92 Return a dictionary of version information.
94 return DictOf(str, Any())
96 def allocate_buckets(storage_index=StorageIndex,
97 renew_secret=LeaseRenewSecret,
98 cancel_secret=LeaseCancelSecret,
99 sharenums=SetOf(int, maxLength=MAX_BUCKETS),
100 allocated_size=Offset, canary=Referenceable):
102 @param storage_index: the index of the bucket to be created or
104 @param sharenums: these are the share numbers (probably between 0 and
105 99) that the sender is proposing to store on this
107 @param renew_secret: This is the secret used to protect bucket refresh
108 This secret is generated by the client and
109 stored for later comparison by the server. Each
110 server is given a different secret.
111 @param cancel_secret: This no longer allows lease cancellation, but
112 must still be a unique value identifying the
113 lease. XXX stop relying on it to be unique.
114 @param canary: If the canary is lost before close(), the bucket is
116 @return: tuple of (alreadygot, allocated), where alreadygot is what we
117 already have and allocated is what we hereby agree to accept.
118 New leases are added for shares in both lists.
120 return TupleOf(SetOf(int, maxLength=MAX_BUCKETS),
121 DictOf(int, RIBucketWriter, maxKeys=MAX_BUCKETS))
123 def add_lease(storage_index=StorageIndex,
124 renew_secret=LeaseRenewSecret,
125 cancel_secret=LeaseCancelSecret):
127 Add a new lease on the given bucket. If the renew_secret matches an
128 existing lease, that lease will be renewed instead. If there is no
129 bucket for the given storage_index, return silently. (note that in
130 tahoe-1.3.0 and earlier, IndexError was raised if there was no
133 return Any() # returns None now, but future versions might change
135 def renew_lease(storage_index=StorageIndex, renew_secret=LeaseRenewSecret):
137 Renew the lease on a given bucket, resetting the timer to 31 days.
138 Some networks will use this, some will not. If there is no bucket for
139 the given storage_index, IndexError will be raised.
141 For mutable shares, if the given renew_secret does not match an
142 existing lease, IndexError will be raised with a note listing the
143 server-nodeids on the existing leases, so leases on migrated shares
144 can be renewed. For immutable shares, IndexError (without the note)
149 def get_buckets(storage_index=StorageIndex):
150 return DictOf(int, RIBucketReader, maxKeys=MAX_BUCKETS)
154 def slot_readv(storage_index=StorageIndex,
155 shares=ListOf(int), readv=ReadVector):
156 """Read a vector from the numbered shares associated with the given
157 storage index. An empty shares list means to return data from all
158 known shares. Returns a dictionary with one key per share."""
159 return DictOf(int, ReadData) # shnum -> results
161 def slot_testv_and_readv_and_writev(storage_index=StorageIndex,
162 secrets=TupleOf(WriteEnablerSecret,
165 tw_vectors=TestAndWriteVectorsForShares,
169 General-purpose test-read-and-set operation for mutable slots:
170 (1) For submitted shnums, compare the test vectors against extant
171 shares, or against an empty share for shnums that do not exist.
172 (2) Use the read vectors to extract "old data" from extant shares.
173 (3) If all tests in (1) passed, then apply the write vectors
174 (possibly creating new shares).
175 (4) Return whether the tests passed, and the "old data", which does
176 not include any modifications made by the writes.
178 The operation does not interleave with other operations on the same
181 This method is, um, large. The goal is to allow clients to update all
182 the shares associated with a mutable file in a single round trip.
184 @param storage_index: the index of the bucket to be created or
186 @param write_enabler: a secret that is stored along with the slot.
187 Writes are accepted from any caller who can
188 present the matching secret. A different secret
189 should be used for each slot*server pair.
190 @param renew_secret: This is the secret used to protect bucket refresh
191 This secret is generated by the client and
192 stored for later comparison by the server. Each
193 server is given a different secret.
194 @param cancel_secret: This no longer allows lease cancellation, but
195 must still be a unique value identifying the
196 lease. XXX stop relying on it to be unique.
198 The 'secrets' argument is a tuple of (write_enabler, renew_secret,
199 cancel_secret). The first is required to perform any write. The
200 latter two are used when allocating new shares. To simply acquire a
201 new lease on existing shares, use an empty testv and an empty writev.
203 Each share can have a separate test vector (i.e. a list of
204 comparisons to perform). If all vectors for all shares pass, then all
205 writes for all shares are recorded. Each comparison is a 4-tuple of
206 (offset, length, operator, specimen), which effectively does a
207 bool( (read(offset, length)) OPERATOR specimen ) and only performs
208 the write if all these evaluate to True. Basic test-and-set uses 'eq'.
209 Write-if-newer uses a seqnum and (offset, length, 'lt', specimen).
210 Write-if-same-or-newer uses 'le'.
212 Reads from the end of the container are truncated, and missing shares
213 behave like empty ones, so to assert that a share doesn't exist (for
214 use when creating a new share), use (0, 1, 'eq', '').
216 The write vector will be applied to the given share, expanding it if
217 necessary. A write vector applied to a share number that did not
218 exist previously will cause that share to be created. Write vectors
219 must not overlap (if they do, this will either cause an error or
220 apply them in an unspecified order). Duplicate write vectors, with
221 the same offset and data, are currently tolerated but are not
224 In Tahoe-LAFS v1.8.3 or later (except 1.9.0a1), if you send a write
225 vector whose offset is beyond the end of the current data, the space
226 between the end of the current data and the beginning of the write
227 vector will be filled with zero bytes. In earlier versions the
228 contents of this space was unspecified (and might end up containing
229 secrets). Storage servers with the new zero-filling behavior will
230 advertise a true value for the 'fills-holes-with-zero-bytes' key
231 (under 'http://allmydata.org/tahoe/protocols/storage/v1') in their
234 Each write vector is accompanied by a 'new_length' argument, which
235 can be used to truncate the data. If new_length is not None and it is
236 less than the current size of the data (after applying all write
237 vectors), then the data will be truncated to new_length. If
238 new_length==0, the share will be deleted.
240 In Tahoe-LAFS v1.8.2 and earlier, new_length could also be used to
241 enlarge the file by sending a number larger than the size of the data
242 after applying all write vectors. That behavior was not used, and as
243 of Tahoe-LAFS v1.8.3 it no longer works and the new_length is ignored
246 If a storage client knows that the server supports zero-filling, for
247 example from the 'fills-holes-with-zero-bytes' key in its version
248 information, it can extend the file efficiently by writing a single
249 zero byte just before the new end-of-file. Otherwise it must
250 explicitly write zeroes to all bytes between the old and new
251 end-of-file. In any case it should avoid sending new_length larger
252 than the size of the data after applying all write vectors.
254 The read vector is used to extract data from all known shares,
255 *before* any writes have been applied. The same read vector is used
256 for all shares. This captures the state that was tested by the test
257 vector, for extant shares.
259 This method returns two values: a boolean and a dict. The boolean is
260 True if the write vectors were applied, False if not. The dict is
261 keyed by share number, and each value contains a list of strings, one
262 for each element of the read vector.
264 If the write_enabler is wrong, this will raise BadWriteEnablerError.
265 To enable share migration (using update_write_enabler), the exception
266 will have the nodeid used for the old write enabler embedded in it,
267 in the following string::
269 The write enabler was recorded by nodeid '%s'.
271 Note that the nodeid here is encoded using the same base32 encoding
272 used by Foolscap and allmydata.util.idlib.nodeid_b2a().
274 return TupleOf(bool, DictOf(int, ReadData))
276 def advise_corrupt_share(share_type=str, storage_index=StorageIndex,
277 shnum=int, reason=str):
278 """Clients who discover hash failures in shares that they have
279 downloaded from me will use this method to inform me about the
280 failures. I will record their concern so that my operator can
281 manually inspect the shares in question. I return None.
283 'share_type' is either 'mutable' or 'immutable'. 'storage_index' is a
284 (binary) storage index string, and 'shnum' is the integer share
285 number. 'reason' is a human-readable explanation of the problem,
286 probably including some expected hash values and the computed ones
287 that did not match. Corruption advisories for mutable shares should
288 include a hash of the public key (the same value that appears in the
289 mutable-file verify-cap), since the current share format does not
294 class IStorageBucketWriter(Interface):
296 Objects of this kind live on the client side.
298 def put_block(segmentnum, data):
300 @param segmentnum=int
301 @param data=ShareData: For most segments, this data will be 'blocksize'
302 bytes in length. The last segment might be shorter.
303 @return: a Deferred that fires (with None) when the operation completes
306 def put_crypttext_hashes(hashes):
308 @param hashes=ListOf(Hash)
309 @return: a Deferred that fires (with None) when the operation completes
312 def put_block_hashes(blockhashes):
314 @param blockhashes=ListOf(Hash)
315 @return: a Deferred that fires (with None) when the operation completes
318 def put_share_hashes(sharehashes):
320 @param sharehashes=ListOf(TupleOf(int, Hash))
321 @return: a Deferred that fires (with None) when the operation completes
324 def put_uri_extension(data):
325 """This block of data contains integrity-checking information (hashes
326 of plaintext, crypttext, and shares), as well as encoding parameters
327 that are necessary to recover the data. This is a serialized dict
328 mapping strings to other strings. The hash of this data is kept in
329 the URI and verified before any of the data is used. All buckets for
330 a given file contain identical copies of this data.
332 The serialization format is specified with the following pseudocode:
333 for k in sorted(dict.keys()):
334 assert re.match(r'^[a-zA-Z_\-]+$', k)
335 write(k + ':' + netstring(dict[k]))
337 @param data=URIExtensionData
338 @return: a Deferred that fires (with None) when the operation completes
342 """Finish writing and close the bucket. The share is not finalized
343 until this method is called: if the uploading client disconnects
344 before calling close(), the partially-written share will be
347 @return: a Deferred that fires (with None) when the operation completes
351 class IStorageBucketReader(Interface):
352 def get_block_data(blocknum, blocksize, size):
353 """Most blocks will be the same size. The last block might be shorter
362 def get_crypttext_hashes():
364 @return: ListOf(Hash)
367 def get_block_hashes(at_least_these=()):
369 @param at_least_these=SetOf(int)
370 @return: ListOf(Hash)
373 def get_share_hashes():
375 @return: ListOf(TupleOf(int, Hash))
378 def get_uri_extension():
380 @return: URIExtensionData
384 class IStorageBroker(Interface):
385 def get_servers_for_psi(peer_selection_index):
387 @return: list of IServer instances
389 def get_connected_servers():
391 @return: frozenset of connected IServer instances
393 def get_known_servers():
395 @return: frozenset of IServer instances
397 def get_all_serverids():
399 @return: frozenset of serverid strings
401 def get_nickname_for_serverid(serverid):
403 @return: unicode nickname, or None
406 # methods moved from IntroducerClient, need review
407 def get_all_connections():
408 """Return a frozenset of (nodeid, service_name, rref) tuples, one for
409 each active connection we've established to a remote service. This is
410 mostly useful for unit tests that need to wait until a certain number
411 of connections have been made."""
413 def get_all_connectors():
414 """Return a dict that maps from (nodeid, service_name) to a
415 RemoteServiceConnector instance for all services that we are actively
416 trying to connect to. Each RemoteServiceConnector has the following
419 service_name: the type of service provided, like 'storage'
420 last_connect_time: when we last established a connection
421 last_loss_time: when we last lost a connection
423 version: the peer's version, from the most recent connection
424 oldest_supported: the peer's oldest supported version, same
426 rref: the RemoteReference, if connected, otherwise None
427 remote_host: the IAddress, if connected, otherwise None
429 This method is intended for monitoring interfaces, such as a web page
430 that describes connecting and connected peers.
433 def get_all_peerids():
434 """Return a frozenset of all peerids to whom we have a connection (to
435 one or more services) established. Mostly useful for unit tests."""
437 def get_all_connections_for(service_name):
438 """Return a frozenset of (nodeid, service_name, rref) tuples, one
439 for each active connection that provides the given SERVICE_NAME."""
441 def get_permuted_peers(service_name, key):
442 """Returns an ordered list of (peerid, rref) tuples, selecting from
443 the connections that provide SERVICE_NAME, using a hash-based
444 permutation keyed by KEY. This randomizes the service list in a
445 repeatable way, to distribute load over many peers.
449 class IDisplayableServer(Interface):
460 class IServer(IDisplayableServer):
461 """I live in the client, and represent a single server."""
462 def start_connecting(tub, trigger_cb):
466 """Once a server is connected, I return a RemoteReference.
467 Before a server is connected for the first time, I return None.
469 Note that the rref I return will start producing DeadReferenceErrors
470 once the connection is lost.
474 class IMutableSlotWriter(Interface):
476 The interface for a writer around a mutable slot on a remote server.
478 def set_checkstring(seqnum_or_checkstring, root_hash=None, salt=None):
480 Set the checkstring that I will pass to the remote server when
483 @param checkstring A packed checkstring to use.
485 Note that implementations can differ in which semantics they
486 wish to support for set_checkstring -- they can, for example,
487 build the checkstring themselves from its constituents, or
491 def get_checkstring():
493 Get the checkstring that I think currently exists on the remote
497 def put_block(data, segnum, salt):
499 Add a block and salt to the share.
502 def put_encprivkey(encprivkey):
504 Add the encrypted private key to the share.
507 def put_blockhashes(blockhashes):
509 @param blockhashes=list
510 Add the block hash tree to the share.
513 def put_sharehashes(sharehashes):
515 @param sharehashes=dict
516 Add the share hash chain to the share.
521 Return the part of the share that needs to be signed.
524 def put_signature(signature):
526 Add the signature to the share.
529 def put_verification_key(verification_key):
531 Add the verification key to the share.
534 def finish_publishing():
536 Do anything necessary to finish writing the share to a remote
537 server. I require that no further publishing needs to take place
538 after this method has been called.
542 class IURI(Interface):
543 def init_from_string(uri):
544 """Accept a string (as created by my to_string() method) and populate
545 this instance with its data. I am not normally called directly,
546 please use the module-level uri.from_string() function to convert
547 arbitrary URI strings into IURI-providing instances."""
550 """Return False if this URI be used to modify the data. Return True
551 if this URI cannot be used to modify the data."""
554 """Return True if the data can be modified by *somebody* (perhaps
555 someone who has a more powerful URI than this one)."""
557 # TODO: rename to get_read_cap()
559 """Return another IURI instance that represents a read-only form of
560 this one. If is_readonly() is True, this returns self."""
562 def get_verify_cap():
563 """Return an instance that provides IVerifierURI, which can be used
564 to check on the availability of the file or directory, without
565 providing enough capabilities to actually read or modify the
566 contents. This may return None if the file does not need checking or
567 verification (e.g. LIT URIs).
571 """Return a string of printable ASCII characters, suitable for
572 passing into init_from_string."""
575 class IVerifierURI(Interface, IURI):
576 def init_from_string(uri):
577 """Accept a string (as created by my to_string() method) and populate
578 this instance with its data. I am not normally called directly,
579 please use the module-level uri.from_string() function to convert
580 arbitrary URI strings into IURI-providing instances."""
583 """Return a string of printable ASCII characters, suitable for
584 passing into init_from_string."""
587 class IDirnodeURI(Interface):
588 """I am a URI that represents a dirnode."""
591 class IFileURI(Interface):
592 """I am a URI that represents a filenode."""
594 """Return the length (in bytes) of the file that I represent."""
597 class IImmutableFileURI(IFileURI):
600 class IMutableFileURI(Interface):
603 class IDirectoryURI(Interface):
606 class IReadonlyDirectoryURI(Interface):
610 class CapConstraintError(Exception):
611 """A constraint on a cap was violated."""
613 class MustBeDeepImmutableError(CapConstraintError):
614 """Mutable children cannot be added to an immutable directory.
615 Also, caps obtained from an immutable directory can trigger this error
616 if they are later found to refer to a mutable object and then used."""
618 class MustBeReadonlyError(CapConstraintError):
619 """Known write caps cannot be specified in a ro_uri field. Also,
620 caps obtained from a ro_uri field can trigger this error if they
621 are later found to be write caps and then used."""
623 class MustNotBeUnknownRWError(CapConstraintError):
624 """Cannot add an unknown child cap specified in a rw_uri field."""
627 class IProgress(Interface):
629 Remembers progress measured in arbitrary units. Users of these
630 instances must call ``set_progress_total`` at least once before
631 progress can be valid, and must use the same units for both
632 ``set_progress_total`` and ``set_progress calls``.
635 :class:`allmydata.util.progress.PercentProgress`
638 progress = Attribute(
639 "Current amount of progress (in percentage)"
642 def set_progress(self, value):
644 Sets the current amount of progress.
646 Arbitrary units, but must match units used for
650 def set_progress_total(self, value):
652 Sets the total amount of expected progress
654 Arbitrary units, but must be same units as used when calling
655 set_progress() on this instance)..
659 class IReadable(Interface):
660 """I represent a readable object -- either an immutable file, or a
661 specific version of a mutable file.
665 """Return True if this reference provides mutable access to the given
666 file or directory (i.e. if you can modify it), or False if not. Note
667 that even if this reference is read-only, someone else may hold a
668 read-write reference to it.
670 For an IReadable returned by get_best_readable_version(), this will
671 always return True, but for instances of subinterfaces such as
672 IMutableFileVersion, it may return False."""
675 """Return True if this file or directory is mutable (by *somebody*,
676 not necessarily you), False if it is is immutable. Note that a file
677 might be mutable overall, but your reference to it might be
678 read-only. On the other hand, all references to an immutable file
679 will be read-only; there are no read-write references to an immutable
682 def get_storage_index():
683 """Return the storage index of the file."""
686 """Return the length (in bytes) of this readable object."""
688 def download_to_data(progress=None):
689 """Download all of the file contents. I return a Deferred that fires
690 with the contents as a byte string.
692 :param progress: None or IProgress implementer
695 def read(consumer, offset=0, size=None):
696 """Download a portion (possibly all) of the file's contents, making
697 them available to the given IConsumer. Return a Deferred that fires
698 (with the consumer) when the consumer is unregistered (either because
699 the last byte has been given to it, or because the consumer threw an
700 exception during write(), possibly because it no longer wants to
701 receive data). The portion downloaded will start at 'offset' and
702 contain 'size' bytes (or the remainder of the file if size==None). It
703 is an error to read beyond the end of the file: callers must use
704 get_size() and clip any non-default offset= and size= parameters. It
705 is permissible to read zero bytes.
707 The consumer will be used in non-streaming mode: an IPullProducer
708 will be attached to it.
710 The consumer will not receive data right away: several network trips
711 must occur first. The order of events will be::
713 consumer.registerProducer(p, streaming)
714 (if streaming == False)::
715 consumer does p.resumeProducing()
717 consumer does p.resumeProducing()
718 consumer.write(data).. (repeat until all data is written)
719 consumer.unregisterProducer()
720 deferred.callback(consumer)
722 If a download error occurs, or an exception is raised by
723 consumer.registerProducer() or consumer.write(), I will call
724 consumer.unregisterProducer() and then deliver the exception via
725 deferred.errback(). To cancel the download, the consumer should call
726 p.stopProducing(), which will result in an exception being delivered
727 via deferred.errback().
729 See src/allmydata/util/consumer.py for an example of a simple
730 download-to-memory consumer.
734 class IWriteable(Interface):
736 I define methods that callers can use to update SDMF and MDMF
737 mutable files on a Tahoe-LAFS grid.
739 # XXX: For the moment, we have only this. It is possible that we
740 # want to move overwrite() and modify() in here too.
741 def update(data, offset):
743 I write the data from my data argument to the MDMF file,
744 starting at offset. I continue writing data until my data
745 argument is exhausted, appending data to the file as necessary.
747 # assert IMutableUploadable.providedBy(data)
748 # to append data: offset=node.get_size_of_best_version()
749 # do we want to support compacting MDMF?
750 # for an MDMF file, this can be done with O(data.get_size())
751 # memory. For an SDMF file, any modification takes
752 # O(node.get_size_of_best_version()).
755 class IMutableFileVersion(IReadable):
756 """I provide access to a particular version of a mutable file. The
757 access is read/write if I was obtained from a filenode derived from
758 a write cap, or read-only if the filenode was derived from a read cap.
761 def get_sequence_number():
762 """Return the sequence number of this version."""
765 """Return the IMutableFileServerMap instance that was used to create
770 """Return this filenode's writekey, or None if the node does not have
771 write-capability. This may be used to assist with data structures
772 that need to make certain data available only to writers, such as the
773 read-write child caps in dirnodes. The recommended process is to have
774 reader-visible data be submitted to the filenode in the clear (where
775 it will be encrypted by the filenode using the readkey), but encrypt
776 writer-visible data using this writekey.
779 def overwrite(new_contents):
780 """Replace the contents of the mutable file, provided that no other
781 node has published (or is attempting to publish, concurrently) a
782 newer version of the file than this one.
784 I will avoid modifying any share that is different than the version
785 given by get_sequence_number(). However, if another node is writing
786 to the file at the same time as me, I may manage to update some shares
787 while they update others. If I see any evidence of this, I will signal
788 UncoordinatedWriteError, and the file will be left in an inconsistent
789 state (possibly the version you provided, possibly the old version,
790 possibly somebody else's version, and possibly a mix of shares from
793 The recommended response to UncoordinatedWriteError is to either
794 return it to the caller (since they failed to coordinate their
795 writes), or to attempt some sort of recovery. It may be sufficient to
796 wait a random interval (with exponential backoff) and repeat your
797 operation. If I do not signal UncoordinatedWriteError, then I was
798 able to write the new version without incident.
800 I return a Deferred that fires (with a PublishStatus object) when the
801 update has completed.
804 def modify(modifier_cb):
805 """Modify the contents of the file, by downloading this version,
806 applying the modifier function (or bound method), then uploading
807 the new version. This will succeed as long as no other node
808 publishes a version between the download and the upload.
809 I return a Deferred that fires (with a PublishStatus object) when
810 the update is complete.
812 The modifier callable will be given three arguments: a string (with
813 the old contents), a 'first_time' boolean, and a servermap. As with
814 download_to_data(), the old contents will be from this version,
815 but the modifier can use the servermap to make other decisions
816 (such as refusing to apply the delta if there are multiple parallel
817 versions, or if there is evidence of a newer unrecoverable version).
818 'first_time' will be True the first time the modifier is called,
819 and False on any subsequent calls.
821 The callable should return a string with the new contents. The
822 callable must be prepared to be called multiple times, and must
823 examine the input string to see if the change that it wants to make
824 is already present in the old version. If it does not need to make
825 any changes, it can either return None, or return its input string.
827 If the modifier raises an exception, it will be returned in the
832 # The hierarchy looks like this:
839 class IFilesystemNode(Interface):
841 """Return the strongest 'cap instance' associated with this node.
842 (writecap for writeable-mutable files/directories, readcap for
843 immutable or readonly-mutable files/directories). To convert this
844 into a string, call .to_string() on the result."""
847 """Return a readonly cap instance for this node. For immutable or
848 readonly nodes, get_cap() and get_readcap() return the same thing."""
850 def get_repair_cap():
851 """Return an IURI instance that can be used to repair the file, or
852 None if this node cannot be repaired (either because it is not
853 distributed, like a LIT file, or because the node does not represent
854 sufficient authority to create a repair-cap, like a read-only RSA
855 mutable file node [which cannot create the correct write-enablers]).
858 def get_verify_cap():
859 """Return an IVerifierURI instance that represents the
860 'verifiy/refresh capability' for this node. The holder of this
861 capability will be able to renew the lease for this node, protecting
862 it from garbage-collection. They will also be able to ask a server if
863 it holds a share for the file or directory.
867 """Return the URI string corresponding to the strongest cap associated
868 with this node. If this node is read-only, the URI will only offer
869 read-only access. If this node is read-write, the URI will offer
872 If you have read-write access to a node and wish to share merely
873 read-only access with others, use get_readonly_uri().
877 """Return the URI string that can be used by others to get write
878 access to this node, if it is writeable. If this is a read-only node,
881 def get_readonly_uri():
882 """Return the URI string that can be used by others to get read-only
883 access to this node. The result is a read-only URI, regardless of
884 whether this node is read-only or read-write.
886 If you have merely read-only access to this node, get_readonly_uri()
887 will return the same thing as get_uri().
890 def get_storage_index():
891 """Return a string with the (binary) storage index in use on this
892 download. This may be None if there is no storage index (i.e. LIT
893 files and directories)."""
896 """Return True if this reference provides mutable access to the given
897 file or directory (i.e. if you can modify it), or False if not. Note
898 that even if this reference is read-only, someone else may hold a
899 read-write reference to it."""
902 """Return True if this file or directory is mutable (by *somebody*,
903 not necessarily you), False if it is is immutable. Note that a file
904 might be mutable overall, but your reference to it might be
905 read-only. On the other hand, all references to an immutable file
906 will be read-only; there are no read-write references to an immutable
911 """Return True if this is an unknown node."""
913 def is_allowed_in_immutable_directory():
914 """Return True if this node is allowed as a child of a deep-immutable
915 directory. This is true if either the node is of a known-immutable type,
916 or it is unknown and read-only.
920 """Raise any error associated with this node."""
922 # XXX: These may not be appropriate outside the context of an IReadable.
924 """Return the length (in bytes) of the data this node represents. For
925 directory nodes, I return the size of the backing store. I return
926 synchronously and do not consult the network, so for mutable objects,
927 I will return the most recently observed size for the object, or None
928 if I don't remember a size. Use get_current_size, which returns a
929 Deferred, if you want more up-to-date information."""
931 def get_current_size():
932 """I return a Deferred that fires with the length (in bytes) of the
933 data this node represents.
937 class IFileNode(IFilesystemNode):
938 """I am a node that represents a file: a sequence of bytes. I am not a
939 container, like IDirectoryNode."""
940 def get_best_readable_version():
941 """Return a Deferred that fires with an IReadable for the 'best'
942 available version of the file. The IReadable provides only read
943 access, even if this filenode was derived from a write cap.
945 For an immutable file, there is only one version. For a mutable
946 file, the 'best' version is the recoverable version with the
947 highest sequence number. If no uncoordinated writes have occurred,
948 and if enough shares are available, then this will be the most
949 recent version that has been uploaded. If no version is recoverable,
950 the Deferred will errback with an UnrecoverableFileError.
953 def download_best_version(progress=None):
954 """Download the contents of the version that would be returned
955 by get_best_readable_version(). This is equivalent to calling
956 download_to_data() on the IReadable given by that method.
958 progress is anything that implements IProgress
960 I return a Deferred that fires with a byte string when the file
961 has been fully downloaded. To support streaming download, use
962 the 'read' method of IReadable. If no version is recoverable,
963 the Deferred will errback with an UnrecoverableFileError.
966 def get_size_of_best_version():
967 """Find the size of the version that would be returned by
968 get_best_readable_version().
970 I return a Deferred that fires with an integer. If no version
971 is recoverable, the Deferred will errback with an
972 UnrecoverableFileError.
976 class IImmutableFileNode(IFileNode, IReadable):
977 """I am a node representing an immutable file. Immutable files have
981 class IMutableFileNode(IFileNode):
982 """I provide access to a 'mutable file', which retains its identity
983 regardless of what contents are put in it.
985 The consistency-vs-availability problem means that there might be
986 multiple versions of a file present in the grid, some of which might be
987 unrecoverable (i.e. have fewer than 'k' shares). These versions are
988 loosely ordered: each has a sequence number and a hash, and any version
989 with seqnum=N was uploaded by a node that has seen at least one version
992 The 'servermap' (an instance of IMutableFileServerMap) is used to
993 describe the versions that are known to be present in the grid, and which
994 servers are hosting their shares. It is used to represent the 'state of
995 the world', and is used for this purpose by my test-and-set operations.
996 Downloading the contents of the mutable file will also return a
997 servermap. Uploading a new version into the mutable file requires a
998 servermap as input, and the semantics of the replace operation is
999 'replace the file with my new version if it looks like nobody else has
1000 changed the file since my previous download'. Because the file is
1001 distributed, this is not a perfect test-and-set operation, but it will do
1002 its best. If the replace process sees evidence of a simultaneous write,
1003 it will signal an UncoordinatedWriteError, so that the caller can take
1007 Most readers will want to use the 'best' current version of the file, and
1008 should use my 'download_best_version()' method.
1010 To unconditionally replace the file, callers should use overwrite(). This
1011 is the mode that user-visible mutable files will probably use.
1013 To apply some delta to the file, call modify() with a callable modifier
1014 function that can apply the modification that you want to make. This is
1015 the mode that dirnodes will use, since most directory modification
1016 operations can be expressed in terms of deltas to the directory state.
1019 Three methods are available for users who need to perform more complex
1020 operations. The first is get_servermap(), which returns an up-to-date
1021 servermap using a specified mode. The second is download_version(), which
1022 downloads a specific version (not necessarily the 'best' one). The third
1023 is 'upload', which accepts new contents and a servermap (which must have
1024 been updated with MODE_WRITE). The upload method will attempt to apply
1025 the new contents as long as no other node has modified the file since the
1026 servermap was updated. This might be useful to a caller who wants to
1027 merge multiple versions into a single new one.
1029 Note that each time the servermap is updated, a specific 'mode' is used,
1030 which determines how many peers are queried. To use a servermap for my
1031 replace() method, that servermap must have been updated in MODE_WRITE.
1032 These modes are defined in allmydata.mutable.common, and consist of
1033 MODE_READ, MODE_WRITE, MODE_ANYTHING, and MODE_CHECK. Please look in
1034 allmydata/mutable/servermap.py for details about the differences.
1036 Mutable files are currently limited in size (about 3.5MB max) and can
1037 only be retrieved and updated all-at-once, as a single big string. Future
1038 versions of our mutable files will remove this restriction.
1040 def get_best_mutable_version():
1041 """Return a Deferred that fires with an IMutableFileVersion for
1042 the 'best' available version of the file. The best version is
1043 the recoverable version with the highest sequence number. If no
1044 uncoordinated writes have occurred, and if enough shares are
1045 available, then this will be the most recent version that has
1048 If no version is recoverable, the Deferred will errback with an
1049 UnrecoverableFileError.
1052 def overwrite(new_contents):
1053 """Unconditionally replace the contents of the mutable file with new
1054 ones. This simply chains get_servermap(MODE_WRITE) and upload(). This
1055 is only appropriate to use when the new contents of the file are
1056 completely unrelated to the old ones, and you do not care about other
1059 I return a Deferred that fires (with a PublishStatus object) when the
1060 update has completed.
1063 def modify(modifier_cb):
1064 """Modify the contents of the file, by downloading the current
1065 version, applying the modifier function (or bound method), then
1066 uploading the new version. I return a Deferred that fires (with a
1067 PublishStatus object) when the update is complete.
1069 The modifier callable will be given three arguments: a string (with
1070 the old contents), a 'first_time' boolean, and a servermap. As with
1071 download_best_version(), the old contents will be from the best
1072 recoverable version, but the modifier can use the servermap to make
1073 other decisions (such as refusing to apply the delta if there are
1074 multiple parallel versions, or if there is evidence of a newer
1075 unrecoverable version). 'first_time' will be True the first time the
1076 modifier is called, and False on any subsequent calls.
1078 The callable should return a string with the new contents. The
1079 callable must be prepared to be called multiple times, and must
1080 examine the input string to see if the change that it wants to make
1081 is already present in the old version. If it does not need to make
1082 any changes, it can either return None, or return its input string.
1084 If the modifier raises an exception, it will be returned in the
1088 def get_servermap(mode):
1089 """Return a Deferred that fires with an IMutableFileServerMap
1090 instance, updated using the given mode.
1093 def download_version(servermap, version):
1094 """Download a specific version of the file, using the servermap
1095 as a guide to where the shares are located.
1097 I return a Deferred that fires with the requested contents, or
1098 errbacks with UnrecoverableFileError. Note that a servermap that was
1099 updated with MODE_ANYTHING or MODE_READ may not know about shares for
1100 all versions (those modes stop querying servers as soon as they can
1101 fulfil their goals), so you may want to use MODE_CHECK (which checks
1102 everything) to get increased visibility.
1105 def upload(new_contents, servermap, progress=None):
1106 """Replace the contents of the file with new ones. This requires a
1107 servermap that was previously updated with MODE_WRITE.
1109 I attempt to provide test-and-set semantics, in that I will avoid
1110 modifying any share that is different than the version I saw in the
1111 servermap. However, if another node is writing to the file at the
1112 same time as me, I may manage to update some shares while they update
1113 others. If I see any evidence of this, I will signal
1114 UncoordinatedWriteError, and the file will be left in an inconsistent
1115 state (possibly the version you provided, possibly the old version,
1116 possibly somebody else's version, and possibly a mix of shares from
1119 The recommended response to UncoordinatedWriteError is to either
1120 return it to the caller (since they failed to coordinate their
1121 writes), or to attempt some sort of recovery. It may be sufficient to
1122 wait a random interval (with exponential backoff) and repeat your
1123 operation. If I do not signal UncoordinatedWriteError, then I was
1124 able to write the new version without incident.
1126 ``progress`` is either None or an IProgress provider
1128 I return a Deferred that fires (with a PublishStatus object) when the
1129 publish has completed. I will update the servermap in-place with the
1130 location of all new shares.
1134 """Return this filenode's writekey, or None if the node does not have
1135 write-capability. This may be used to assist with data structures
1136 that need to make certain data available only to writers, such as the
1137 read-write child caps in dirnodes. The recommended process is to have
1138 reader-visible data be submitted to the filenode in the clear (where
1139 it will be encrypted by the filenode using the readkey), but encrypt
1140 writer-visible data using this writekey.
1144 """Returns the mutable file protocol version."""
1147 class NotEnoughSharesError(Exception):
1148 """Download was unable to get enough shares"""
1150 class NoSharesError(Exception):
1151 """Download was unable to get any shares at all."""
1153 class DownloadStopped(Exception):
1156 class UploadUnhappinessError(Exception):
1157 """Upload was unable to satisfy 'servers_of_happiness'"""
1159 class UnableToFetchCriticalDownloadDataError(Exception):
1160 """I was unable to fetch some piece of critical data that is supposed to
1161 be identically present in all shares."""
1163 class NoServersError(Exception):
1164 """Upload wasn't given any servers to work with, usually indicating a
1165 network or Introducer problem."""
1167 class ExistingChildError(Exception):
1168 """A directory node was asked to add or replace a child that already
1169 exists, and overwrite= was set to False."""
1171 class NoSuchChildError(Exception):
1172 """A directory node was asked to fetch a child that does not exist."""
1174 # avoid UnicodeEncodeErrors when converting to str
1175 return self.__repr__()
1177 class ChildOfWrongTypeError(Exception):
1178 """An operation was attempted on a child of the wrong type (file or directory)."""
1181 class IDirectoryNode(IFilesystemNode):
1182 """I represent a filesystem node that is a container, with a
1183 name-to-child mapping, holding the tahoe equivalent of a directory. All
1184 child names are unicode strings, and all children are some sort of
1185 IFilesystemNode (a file, subdirectory, or unknown node).
1190 The dirnode ('1') URI returned by this method can be used in
1191 set_uri() on a different directory ('2') to 'mount' a reference to
1192 this directory ('1') under the other ('2'). This URI is just a
1193 string, so it can be passed around through email or other out-of-band
1197 def get_readonly_uri():
1199 The dirnode ('1') URI returned by this method can be used in
1200 set_uri() on a different directory ('2') to 'mount' a reference to
1201 this directory ('1') under the other ('2'). This URI is just a
1202 string, so it can be passed around through email or other out-of-band
1207 """I return a Deferred that fires with a dictionary mapping child
1208 name (a unicode string) to (node, metadata_dict) tuples, in which
1209 'node' is an IFilesystemNode and 'metadata_dict' is a dictionary of
1212 def has_child(name):
1213 """I return a Deferred that fires with a boolean, True if there
1214 exists a child of the given name, False if not. The child name must
1215 be a unicode string."""
1218 """I return a Deferred that fires with a specific named child node,
1219 which is an IFilesystemNode. The child name must be a unicode string.
1220 I raise NoSuchChildError if I do not have a child by that name."""
1222 def get_metadata_for(name):
1223 """I return a Deferred that fires with the metadata dictionary for
1224 a specific named child node. The child name must be a unicode string.
1225 This metadata is stored in the *edge*, not in the child, so it is
1226 attached to the parent dirnode rather than the child node.
1227 I raise NoSuchChildError if I do not have a child by that name."""
1229 def set_metadata_for(name, metadata):
1230 """I replace any existing metadata for the named child with the new
1231 metadata. The child name must be a unicode string. This metadata is
1232 stored in the *edge*, not in the child, so it is attached to the
1233 parent dirnode rather than the child node. I return a Deferred
1234 (that fires with this dirnode) when the operation is complete.
1235 I raise NoSuchChildError if I do not have a child by that name."""
1237 def get_child_at_path(path):
1238 """Transform a child path into an IFilesystemNode.
1240 I perform a recursive series of 'get' operations to find the named
1241 descendant node. I return a Deferred that fires with the node, or
1242 errbacks with NoSuchChildError if the node could not be found.
1244 The path can be either a single string (slash-separated) or a list of
1245 path-name elements. All elements must be unicode strings.
1248 def get_child_and_metadata_at_path(path):
1249 """Transform a child path into an IFilesystemNode and metadata.
1251 I am like get_child_at_path(), but my Deferred fires with a tuple of
1252 (node, metadata). The metadata comes from the last edge. If the path
1253 is empty, the metadata will be an empty dictionary.
1256 def set_uri(name, writecap, readcap=None, metadata=None, overwrite=True):
1257 """I add a child (by writecap+readcap) at the specific name. I return
1258 a Deferred that fires when the operation finishes. If overwrite= is
1259 True, I will replace any existing child of the same name, otherwise
1260 an existing child will cause me to return ExistingChildError. The
1261 child name must be a unicode string.
1263 The child caps could be for a file, or for a directory. If you have
1264 both the writecap and readcap, you should provide both arguments.
1265 If you have only one cap and don't know whether it is read-only,
1266 provide it as the writecap argument and leave the readcap as None.
1267 If you have only one cap that is known to be read-only, provide it
1268 as the readcap argument and leave the writecap as None.
1269 The filecaps are typically obtained from an IFilesystemNode with
1270 get_uri() and get_readonly_uri().
1272 If metadata= is provided, I will use it as the metadata for the named
1273 edge. This will replace any existing metadata. If metadata= is left
1274 as the default value of None, I will set ['mtime'] to the current
1275 time, and I will set ['ctime'] to the current time if there was not
1276 already a child by this name present. This roughly matches the
1277 ctime/mtime semantics of traditional filesystems. See the
1278 "About the metadata" section of webapi.txt for futher information.
1280 If this directory node is read-only, the Deferred will errback with a
1281 NotWriteableError."""
1283 def set_children(entries, overwrite=True):
1284 """Add multiple children (by writecap+readcap) to a directory node.
1285 Takes a dictionary, with childname as keys and (writecap, readcap)
1286 tuples (or (writecap, readcap, metadata) triples) as values. Returns
1287 a Deferred that fires (with this dirnode) when the operation
1288 finishes. This is equivalent to calling set_uri() multiple times, but
1289 is much more efficient. All child names must be unicode strings.
1292 def set_node(name, child, metadata=None, overwrite=True):
1293 """I add a child at the specific name. I return a Deferred that fires
1294 when the operation finishes. This Deferred will fire with the child
1295 node that was just added. I will replace any existing child of the
1296 same name. The child name must be a unicode string. The 'child'
1297 instance must be an instance providing IFilesystemNode.
1299 If metadata= is provided, I will use it as the metadata for the named
1300 edge. This will replace any existing metadata. If metadata= is left
1301 as the default value of None, I will set ['mtime'] to the current
1302 time, and I will set ['ctime'] to the current time if there was not
1303 already a child by this name present. This roughly matches the
1304 ctime/mtime semantics of traditional filesystems. See the
1305 "About the metadata" section of webapi.txt for futher information.
1307 If this directory node is read-only, the Deferred will errback with a
1308 NotWriteableError."""
1310 def set_nodes(entries, overwrite=True):
1311 """Add multiple children to a directory node. Takes a dict mapping
1312 unicode childname to (child_node, metdata) tuples. If metdata=None,
1313 the original metadata is left unmodified. Returns a Deferred that
1314 fires (with this dirnode) when the operation finishes. This is
1315 equivalent to calling set_node() multiple times, but is much more
1318 def add_file(name, uploadable, metadata=None, overwrite=True, progress=None):
1319 """I upload a file (using the given IUploadable), then attach the
1320 resulting ImmutableFileNode to the directory at the given name. I set
1321 metadata the same way as set_uri and set_node. The child name must be
1324 ``progress`` either provides IProgress or is None
1326 I return a Deferred that fires (with the IFileNode of the uploaded
1327 file) when the operation completes."""
1329 def delete(name, must_exist=True, must_be_directory=False, must_be_file=False):
1330 """I remove the child at the specific name. I return a Deferred that
1331 fires when the operation finishes. The child name must be a unicode
1332 string. If must_exist is True and I do not have a child by that name,
1333 I raise NoSuchChildError. If must_be_directory is True and the child
1334 is a file, or if must_be_file is True and the child is a directory,
1335 I raise ChildOfWrongTypeError."""
1337 def create_subdirectory(name, initial_children={}, overwrite=True,
1338 mutable=True, mutable_version=None, metadata=None):
1339 """I create and attach a directory at the given name. The new
1340 directory can be empty, or it can be populated with children
1341 according to 'initial_children', which takes a dictionary in the same
1342 format as set_nodes (i.e. mapping unicode child name to (childnode,
1343 metadata) tuples). The child name must be a unicode string. I return
1344 a Deferred that fires (with the new directory node) when the
1345 operation finishes."""
1347 def move_child_to(current_child_name, new_parent, new_child_name=None,
1349 """I take one of my children and move them to a new parent. The child
1350 is referenced by name. On the new parent, the child will live under
1351 'new_child_name', which defaults to 'current_child_name'. TODO: what
1352 should we do about metadata? I return a Deferred that fires when the
1353 operation finishes. The child name must be a unicode string. I raise
1354 NoSuchChildError if I do not have a child by that name."""
1356 def build_manifest():
1357 """I generate a table of everything reachable from this directory.
1358 I also compute deep-stats as described below.
1360 I return a Monitor. The Monitor's results will be a dictionary with
1363 res['manifest']: a list of (path, cap) tuples for all nodes
1364 (directories and files) reachable from this one.
1365 'path' will be a tuple of unicode strings. The
1366 origin dirnode will be represented by an empty path
1368 res['verifycaps']: a list of (printable) verifycap strings, one for
1369 each reachable non-LIT node. This is a set:
1370 it will contain no duplicates.
1371 res['storage-index']: a list of (base32) storage index strings,
1372 one for each reachable non-LIT node. This is
1373 a set: it will contain no duplicates.
1374 res['stats']: a dictionary, the same that is generated by
1375 start_deep_stats() below.
1377 The Monitor will also have an .origin_si attribute with the (binary)
1378 storage index of the starting point.
1381 def start_deep_stats():
1382 """Return a Monitor, examining all nodes (directories and files)
1383 reachable from this one. The Monitor's results will be a dictionary
1384 with the following keys::
1386 count-immutable-files: count of how many CHK files are in the set
1387 count-mutable-files: same, for mutable files (does not include
1389 count-literal-files: same, for LIT files
1390 count-files: sum of the above three
1392 count-directories: count of directories
1394 size-immutable-files: total bytes for all CHK files in the set
1395 size-mutable-files (TODO): same, for current version of all mutable
1396 files, does not include directories
1397 size-literal-files: same, for LIT files
1398 size-directories: size of mutable files used by directories
1400 largest-directory: number of bytes in the largest directory
1401 largest-directory-children: number of children in the largest
1403 largest-immutable-file: number of bytes in the largest CHK file
1405 size-mutable-files is not yet implemented, because it would involve
1406 even more queries than deep_stats does.
1408 The Monitor will also have an .origin_si attribute with the (binary)
1409 storage index of the starting point.
1411 This operation will visit every directory node underneath this one,
1412 and can take a long time to run. On a typical workstation with good
1413 bandwidth, this can examine roughly 15 directories per second (and
1414 takes several minutes of 100% CPU for ~1700 directories).
1418 class ICodecEncoder(Interface):
1419 def set_params(data_size, required_shares, max_shares):
1420 """Set up the parameters of this encoder.
1422 This prepares the encoder to perform an operation that converts a
1423 single block of data into a number of shares, such that a future
1424 ICodecDecoder can use a subset of these shares to recover the
1425 original data. This operation is invoked by calling encode(). Once
1426 the encoding parameters are set up, the encode operation can be
1427 invoked multiple times.
1429 set_params() prepares the encoder to accept blocks of input data that
1430 are exactly 'data_size' bytes in length. The encoder will be prepared
1431 to produce 'max_shares' shares for each encode() operation (although
1432 see the 'desired_share_ids' to use less CPU). The encoding math will
1433 be chosen such that the decoder can get by with as few as
1434 'required_shares' of these shares and still reproduce the original
1435 data. For example, set_params(1000, 5, 5) offers no redundancy at
1436 all, whereas set_params(1000, 1, 10) provides 10x redundancy.
1438 Numerical Restrictions: 'data_size' is required to be an integral
1439 multiple of 'required_shares'. In general, the caller should choose
1440 required_shares and max_shares based upon their reliability
1441 requirements and the number of peers available (the total storage
1442 space used is roughly equal to max_shares*data_size/required_shares),
1443 then choose data_size to achieve the memory footprint desired (larger
1444 data_size means more efficient operation, smaller data_size means
1445 smaller memory footprint).
1447 In addition, 'max_shares' must be equal to or greater than
1448 'required_shares'. Of course, setting them to be equal causes
1449 encode() to degenerate into a particularly slow form of the 'split'
1452 See encode() for more details about how these parameters are used.
1454 set_params() must be called before any other ICodecEncoder methods
1459 """Return the 3-tuple of data_size, required_shares, max_shares"""
1461 def get_encoder_type():
1462 """Return a short string that describes the type of this encoder.
1464 There is required to be a global table of encoder classes. This method
1465 returns an index into this table; the value at this index is an
1466 encoder class, and this encoder is an instance of that class.
1469 def get_block_size():
1470 """Return the length of the shares that encode() will produce.
1473 def encode_proposal(data, desired_share_ids=None):
1474 """Encode some data.
1476 'data' must be a string (or other buffer object), and len(data) must
1477 be equal to the 'data_size' value passed earlier to set_params().
1479 This will return a Deferred that will fire with two lists. The first
1480 is a list of shares, each of which is a string (or other buffer
1481 object) such that len(share) is the same as what get_share_size()
1482 returned earlier. The second is a list of shareids, in which each is
1483 an integer. The lengths of the two lists will always be equal to each
1484 other. The user should take care to keep each share closely
1485 associated with its shareid, as one is useless without the other.
1487 The length of this output list will normally be the same as the value
1488 provided to the 'max_shares' parameter of set_params(). This may be
1489 different if 'desired_share_ids' is provided.
1491 'desired_share_ids', if provided, is required to be a sequence of
1492 ints, each of which is required to be >= 0 and < max_shares. If not
1493 provided, encode() will produce 'max_shares' shares, as if
1494 'desired_share_ids' were set to range(max_shares). You might use this
1495 if you initially thought you were going to use 10 peers, started
1496 encoding, and then two of the peers dropped out: you could use
1497 desired_share_ids= to skip the work (both memory and CPU) of
1498 producing shares for the peers that are no longer available.
1502 def encode(inshares, desired_share_ids=None):
1503 """Encode some data. This may be called multiple times. Each call is
1506 inshares is a sequence of length required_shares, containing buffers
1507 (i.e. strings), where each buffer contains the next contiguous
1508 non-overlapping segment of the input data. Each buffer is required to
1509 be the same length, and the sum of the lengths of the buffers is
1510 required to be exactly the data_size promised by set_params(). (This
1511 implies that the data has to be padded before being passed to
1512 encode(), unless of course it already happens to be an even multiple
1513 of required_shares in length.)
1515 Note: the requirement to break up your data into
1516 'required_shares' chunks of exactly the right length before
1517 calling encode() is surprising from point of view of a user
1518 who doesn't know how FEC works. It feels like an
1519 implementation detail that has leaked outside the abstraction
1520 barrier. Is there a use case in which the data to be encoded
1521 might already be available in pre-segmented chunks, such that
1522 it is faster or less work to make encode() take a list rather
1523 than splitting a single string?
1525 Yes, there is: suppose you are uploading a file with K=64,
1526 N=128, segsize=262,144. Then each in-share will be of size
1527 4096. If you use this .encode() API then your code could first
1528 read each successive 4096-byte chunk from the file and store
1529 each one in a Python string and store each such Python string
1530 in a Python list. Then you could call .encode(), passing that
1531 list as "inshares". The encoder would generate the other 64
1532 "secondary shares" and return to you a new list containing
1533 references to the same 64 Python strings that you passed in
1534 (as the primary shares) plus references to the new 64 Python
1537 (You could even imagine that your code could use readv() so
1538 that the operating system can arrange to get all of those
1539 bytes copied from the file into the Python list of Python
1540 strings as efficiently as possible instead of having a loop
1541 written in C or in Python to copy the next part of the file
1542 into the next string.)
1544 On the other hand if you instead use the .encode_proposal()
1545 API (above), then your code can first read in all of the
1546 262,144 bytes of the segment from the file into a Python
1547 string, then call .encode_proposal() passing the segment data
1548 as the "data" argument. The encoder would basically first
1549 split the "data" argument into a list of 64 in-shares of 4096
1550 byte each, and then do the same thing that .encode() does. So
1551 this would result in a little bit more copying of data and a
1552 little bit higher of a "maximum memory usage" during the
1553 process, although it might or might not make a practical
1554 difference for our current use cases.
1556 Note that "inshares" is a strange name for the parameter if
1557 you think of the parameter as being just for feeding in data
1558 to the codec. It makes more sense if you think of the result
1559 of this encoding as being the set of shares from inshares plus
1560 an extra set of "secondary shares" (or "check shares"). It is
1561 a surprising name! If the API is going to be surprising then
1562 the name should be surprising. If we switch to
1563 encode_proposal() above then we should also switch to an
1566 'desired_share_ids', if provided, is required to be a sequence of
1567 ints, each of which is required to be >= 0 and < max_shares. If not
1568 provided, encode() will produce 'max_shares' shares, as if
1569 'desired_share_ids' were set to range(max_shares). You might use this
1570 if you initially thought you were going to use 10 peers, started
1571 encoding, and then two of the peers dropped out: you could use
1572 desired_share_ids= to skip the work (both memory and CPU) of
1573 producing shares for the peers that are no longer available.
1575 For each call, encode() will return a Deferred that fires with two
1576 lists, one containing shares and the other containing the shareids.
1577 The get_share_size() method can be used to determine the length of
1578 the share strings returned by encode(). Each shareid is a small
1579 integer, exactly as passed into 'desired_share_ids' (or
1580 range(max_shares), if desired_share_ids was not provided).
1582 The shares and their corresponding shareids are required to be kept
1583 together during storage and retrieval. Specifically, the share data is
1584 useless by itself: the decoder needs to be told which share is which
1585 by providing it with both the shareid and the actual share data.
1587 This function will allocate an amount of memory roughly equal to::
1589 (max_shares - required_shares) * get_share_size()
1591 When combined with the memory that the caller must allocate to
1592 provide the input data, this leads to a memory footprint roughly
1593 equal to the size of the resulting encoded shares (i.e. the expansion
1594 factor times the size of the input segment).
1599 # returning a list of (shareidN,shareN) tuples instead of a pair of
1600 # lists (shareids..,shares..). Brian thought the tuples would
1601 # encourage users to keep the share and shareid together throughout
1602 # later processing, Zooko pointed out that the code to iterate
1603 # through two lists is not really more complicated than using a list
1604 # of tuples and there's also a performance improvement
1606 # having 'data_size' not required to be an integral multiple of
1607 # 'required_shares'. Doing this would require encode() to perform
1608 # padding internally, and we'd prefer to have any padding be done
1609 # explicitly by the caller. Yes, it is an abstraction leak, but
1610 # hopefully not an onerous one.
1613 class ICodecDecoder(Interface):
1614 def set_params(data_size, required_shares, max_shares):
1615 """Set the params. They have to be exactly the same ones that were
1616 used for encoding."""
1618 def get_needed_shares():
1619 """Return the number of shares needed to reconstruct the data.
1620 set_params() is required to be called before this."""
1622 def decode(some_shares, their_shareids):
1623 """Decode a partial list of shares into data.
1625 'some_shares' is required to be a sequence of buffers of sharedata, a
1626 subset of the shares returned by ICodecEncode.encode(). Each share is
1627 required to be of the same length. The i'th element of their_shareids
1628 is required to be the shareid of the i'th buffer in some_shares.
1630 This returns a Deferred that fires with a sequence of buffers. This
1631 sequence will contain all of the segments of the original data, in
1632 order. The sum of the lengths of all of the buffers will be the
1633 'data_size' value passed into the original ICodecEncode.set_params()
1634 call. To get back the single original input block of data, use
1635 ''.join(output_buffers), or you may wish to simply write them in
1636 order to an output file.
1638 Note that some of the elements in the result sequence may be
1639 references to the elements of the some_shares input sequence. In
1640 particular, this means that if those share objects are mutable (e.g.
1641 arrays) and if they are changed, then both the input (the
1642 'some_shares' parameter) and the output (the value given when the
1643 deferred is triggered) will change.
1645 The length of 'some_shares' is required to be exactly the value of
1646 'required_shares' passed into the original ICodecEncode.set_params()
1651 class IEncoder(Interface):
1652 """I take an object that provides IEncryptedUploadable, which provides
1653 encrypted data, and a list of shareholders. I then encode, hash, and
1654 deliver shares to those shareholders. I will compute all the necessary
1655 Merkle hash trees that are necessary to validate the crypttext that
1656 eventually comes back from the shareholders. I provide the URI Extension
1657 Block Hash, and the encoding parameters, both of which must be included
1660 I do not choose shareholders, that is left to the IUploader. I must be
1661 given a dict of RemoteReferences to storage buckets that are ready and
1662 willing to receive data.
1666 """Specify the number of bytes that will be encoded. This must be
1667 peformed before get_serialized_params() can be called.
1670 def set_encrypted_uploadable(u):
1671 """Provide a source of encrypted upload data. 'u' must implement
1672 IEncryptedUploadable.
1674 When this is called, the IEncryptedUploadable will be queried for its
1675 length and the storage_index that should be used.
1677 This returns a Deferred that fires with this Encoder instance.
1679 This must be performed before start() can be called.
1682 def get_param(name):
1683 """Return an encoding parameter, by name.
1685 'storage_index': return a string with the (16-byte truncated SHA-256
1686 hash) storage index to which these shares should be
1689 'share_counts': return a tuple describing how many shares are used:
1690 (needed_shares, servers_of_happiness, total_shares)
1692 'num_segments': return an int with the number of segments that
1695 'segment_size': return an int with the size of each segment.
1697 'block_size': return the size of the individual blocks that will
1698 be delivered to a shareholder's put_block() method. By
1699 knowing this, the shareholder will be able to keep all
1700 blocks in a single file and still provide random access
1701 when reading them. # TODO: can we avoid exposing this?
1703 'share_size': an int with the size of the data that will be stored
1704 on each shareholder. This is aggregate amount of data
1705 that will be sent to the shareholder, summed over all
1706 the put_block() calls I will ever make. It is useful to
1707 determine this size before asking potential
1708 shareholders whether they will grant a lease or not,
1709 since their answers will depend upon how much space we
1710 need. TODO: this might also include some amount of
1711 overhead, like the size of all the hashes. We need to
1712 decide whether this is useful or not.
1714 'serialized_params': a string with a concise description of the
1715 codec name and its parameters. This may be passed
1716 into the IUploadable to let it make sure that
1717 the same file encoded with different parameters
1718 will result in different storage indexes.
1720 Once this is called, set_size() and set_params() may not be called.
1723 def set_shareholders(shareholders, servermap):
1724 """Tell the encoder where to put the encoded shares. 'shareholders'
1725 must be a dictionary that maps share number (an integer ranging from
1726 0 to n-1) to an instance that provides IStorageBucketWriter.
1727 'servermap' is a dictionary that maps share number (as defined above)
1728 to a set of peerids. This must be performed before start() can be
1732 """Begin the encode/upload process. This involves reading encrypted
1733 data from the IEncryptedUploadable, encoding it, uploading the shares
1734 to the shareholders, then sending the hash trees.
1736 set_encrypted_uploadable() and set_shareholders() must be called
1737 before this can be invoked.
1739 This returns a Deferred that fires with a verify cap when the upload
1740 process is complete. The verifycap, plus the encryption key, is
1741 sufficient to construct the read cap.
1745 class IDecoder(Interface):
1746 """I take a list of shareholders and some setup information, then
1747 download, validate, decode, and decrypt data from them, writing the
1748 results to an output file.
1750 I do not locate the shareholders, that is left to the IDownloader. I must
1751 be given a dict of RemoteReferences to storage buckets that are ready to
1756 """I take a file-like object (providing write and close) to which all
1757 the plaintext data will be written.
1759 TODO: producer/consumer . Maybe write() should return a Deferred that
1760 indicates when it will accept more data? But probably having the
1761 IDecoder be a producer is easier to glue to IConsumer pieces.
1764 def set_shareholders(shareholders):
1765 """I take a dictionary that maps share identifiers (small integers)
1766 to RemoteReferences that provide RIBucketReader. This must be called
1770 """I start the download. This process involves retrieving data and
1771 hash chains from the shareholders, using the hashes to validate the
1772 data, decoding the shares into segments, decrypting the segments,
1773 then writing the resulting plaintext to the output file.
1775 I return a Deferred that will fire (with self) when the download is
1780 class IDownloadTarget(Interface):
1781 # Note that if the IDownloadTarget is also an IConsumer, the downloader
1782 # will register itself as a producer. This allows the target to invoke
1783 # downloader.pauseProducing, resumeProducing, and stopProducing.
1785 """Called before any calls to write() or close(). If an error
1786 occurs before any data is available, fail() may be called without
1787 a previous call to open().
1789 'size' is the length of the file being downloaded, in bytes."""
1792 """Output some data to the target."""
1795 """Inform the target that there is no more data to be written."""
1798 """fail() is called to indicate that the download has failed. 'why'
1799 is a Failure object indicating what went wrong. No further methods
1800 will be invoked on the IDownloadTarget after fail()."""
1802 def register_canceller(cb):
1803 """The CiphertextDownloader uses this to register a no-argument function
1804 that the target can call to cancel the download. Once this canceller
1805 is invoked, no further calls to write() or close() will be made."""
1808 """When the CiphertextDownloader is done, this finish() function will be
1809 called. Whatever it returns will be returned to the invoker of
1810 Downloader.download.
1814 class IDownloader(Interface):
1815 def download(uri, target):
1816 """Perform a CHK download, sending the data to the given target.
1817 'target' must provide IDownloadTarget.
1819 Returns a Deferred that fires (with the results of target.finish)
1820 when the download is finished, or errbacks if something went wrong."""
1823 class IEncryptedUploadable(Interface):
1824 def set_upload_status(upload_status):
1825 """Provide an IUploadStatus object that should be filled with status
1826 information. The IEncryptedUploadable is responsible for setting
1827 key-determination progress ('chk'), size, storage_index, and
1828 ciphertext-fetch progress. It may delegate some of this
1829 responsibility to others, in particular to the IUploadable."""
1832 """This behaves just like IUploadable.get_size()."""
1834 def get_all_encoding_parameters():
1835 """Return a Deferred that fires with a tuple of
1836 (k,happy,n,segment_size). The segment_size will be used as-is, and
1837 must match the following constraints: it must be a multiple of k, and
1838 it shouldn't be unreasonably larger than the file size (if
1839 segment_size is larger than filesize, the difference must be stored
1842 This usually passes through to the IUploadable method of the same
1845 The encoder strictly obeys the values returned by this method. To
1846 make an upload use non-default encoding parameters, you must arrange
1847 to control the values that this method returns.
1850 def get_storage_index():
1851 """Return a Deferred that fires with a 16-byte storage index.
1854 def read_encrypted(length, hash_only):
1855 """This behaves just like IUploadable.read(), but returns crypttext
1856 instead of plaintext. If hash_only is True, then this discards the
1857 data (and returns an empty list); this improves efficiency when
1858 resuming an interrupted upload (where we need to compute the
1859 plaintext hashes, but don't need the redundant encrypted data)."""
1862 """Just like IUploadable.close()."""
1865 class IUploadable(Interface):
1866 def set_upload_status(upload_status):
1867 """Provide an IUploadStatus object that should be filled with status
1868 information. The IUploadable is responsible for setting
1869 key-determination progress ('chk')."""
1871 def set_default_encoding_parameters(params):
1872 """Set the default encoding parameters, which must be a dict mapping
1873 strings to ints. The meaningful keys are 'k', 'happy', 'n', and
1874 'max_segment_size'. These might have an influence on the final
1875 encoding parameters returned by get_all_encoding_parameters(), if the
1876 Uploadable doesn't have more specific preferences.
1878 This call is optional: if it is not used, the Uploadable will use
1879 some built-in defaults. If used, this method must be called before
1880 any other IUploadable methods to have any effect.
1884 """Return a Deferred that will fire with the length of the data to be
1885 uploaded, in bytes. This will be called before the data is actually
1886 used, to compute encoding parameters.
1889 def get_all_encoding_parameters():
1890 """Return a Deferred that fires with a tuple of
1891 (k,happy,n,segment_size). The segment_size will be used as-is, and
1892 must match the following constraints: it must be a multiple of k, and
1893 it shouldn't be unreasonably larger than the file size (if
1894 segment_size is larger than filesize, the difference must be stored
1897 The relative values of k and n allow some IUploadables to request
1898 better redundancy than others (in exchange for consuming more space
1901 Larger values of segment_size reduce hash overhead, while smaller
1902 values reduce memory footprint and cause data to be delivered in
1903 smaller pieces (which may provide a smoother and more predictable
1904 download experience).
1906 The encoder strictly obeys the values returned by this method. To
1907 make an upload use non-default encoding parameters, you must arrange
1908 to control the values that this method returns. One way to influence
1909 them may be to call set_encoding_parameters() before calling
1910 get_all_encoding_parameters().
1913 def get_encryption_key():
1914 """Return a Deferred that fires with a 16-byte AES key. This key will
1915 be used to encrypt the data. The key will also be hashed to derive
1918 Uploadables that want to achieve convergence should hash their file
1919 contents and the serialized_encoding_parameters to form the key
1920 (which of course requires a full pass over the data). Uploadables can
1921 use the upload.ConvergentUploadMixin class to achieve this
1924 Uploadables that do not care about convergence (or do not wish to
1925 make multiple passes over the data) can simply return a
1926 strongly-random 16 byte string.
1928 get_encryption_key() may be called multiple times: the IUploadable is
1929 required to return the same value each time.
1933 """Return a Deferred that fires with a list of strings (perhaps with
1934 only a single element) that, when concatenated together, contain the
1935 next 'length' bytes of data. If EOF is near, this may provide fewer
1936 than 'length' bytes. The total number of bytes provided by read()
1937 before it signals EOF must equal the size provided by get_size().
1939 If the data must be acquired through multiple internal read
1940 operations, returning a list instead of a single string may help to
1941 reduce string copies. However, the length of the concatenated strings
1942 must equal the amount of data requested, unless EOF is encountered.
1943 Long reads, or short reads without EOF, are not allowed. read()
1944 should return the same amount of data as a local disk file read, just
1945 in a different shape and asynchronously.
1947 'length' will typically be equal to (min(get_size(),1MB)/req_shares),
1948 so a 10kB file means length=3kB, 100kB file means length=30kB,
1949 and >=1MB file means length=300kB.
1951 This method provides for a single full pass through the data. Later
1952 use cases may desire multiple passes or access to only parts of the
1953 data (such as a mutable file making small edits-in-place). This API
1954 will be expanded once those use cases are better understood.
1958 """The upload is finished, and whatever filehandle was in use may be
1962 class IMutableUploadable(Interface):
1964 I represent content that is due to be uploaded to a mutable filecap.
1966 # This is somewhat simpler than the IUploadable interface above
1967 # because mutable files do not need to be concerned with possibly
1968 # generating a CHK, nor with per-file keys. It is a subset of the
1969 # methods in IUploadable, though, so we could just as well implement
1970 # the mutable uploadables as IUploadables that don't happen to use
1971 # those methods (with the understanding that the unused methods will
1972 # never be called on such objects)
1975 Returns a Deferred that fires with the size of the content held
1981 Returns a list of strings that, when concatenated, are the next
1982 length bytes of the file, or fewer if there are fewer bytes
1983 between the current location and the end of the file.
1988 The process that used the Uploadable is finished using it, so
1989 the uploadable may be closed.
1993 class IUploadResults(Interface):
1994 """I am returned by immutable upload() methods and contain the results of
1997 Note that some of my methods return empty values (0 or an empty dict)
1998 when called for non-distributed LIT files."""
2000 def get_file_size():
2001 """Return the file size, in bytes."""
2004 """Return the (string) URI of the object uploaded, a CHK readcap."""
2006 def get_ciphertext_fetched():
2007 """Return the number of bytes fetched by the helpe for this upload,
2008 or 0 if the helper did not need to fetch any bytes (or if there was
2011 def get_preexisting_shares():
2012 """Return the number of shares that were already present in the grid."""
2014 def get_pushed_shares():
2015 """Return the number of shares that were uploaded."""
2018 """Return a dict mapping share identifier to set of IServer
2019 instances. This indicates which servers were given which shares. For
2020 immutable files, the shareid is an integer (the share number, from 0
2021 to N-1). For mutable files, it is a string of the form
2022 'seq%d-%s-sh%d', containing the sequence number, the roothash, and
2023 the share number."""
2025 def get_servermap():
2026 """Return dict mapping IServer instance to a set of share numbers."""
2029 """Return dict of timing information, mapping name to seconds. All
2031 total : total upload time, start to finish
2032 storage_index : time to compute the storage index
2033 peer_selection : time to decide which peers will be used
2034 contacting_helper : initial helper query to upload/no-upload decision
2035 helper_total : initial helper query to helper finished pushing
2036 cumulative_fetch : helper waiting for ciphertext requests
2037 total_fetch : helper start to last ciphertext response
2038 cumulative_encoding : just time spent in zfec
2039 cumulative_sending : just time spent waiting for storage servers
2040 hashes_and_close : last segment push to shareholder close
2041 total_encode_and_push : first encode to shareholder close
2044 def get_uri_extension_data():
2045 """Return the dict of UEB data created for this file."""
2047 def get_verifycapstr():
2048 """Return the (string) verify-cap URI for the uploaded object."""
2051 class IDownloadResults(Interface):
2052 """I am created internally by download() methods. I contain a number of
2053 public attributes that contain details about the download process.::
2055 .file_size : the size of the file, in bytes
2056 .servers_used : set of server peerids that were used during download
2057 .server_problems : dict mapping server peerid to a problem string. Only
2058 servers that had problems (bad hashes, disconnects)
2060 .servermap : dict mapping server peerid to a set of share numbers. Only
2061 servers that had any shares are listed here.
2062 .timings : dict of timing information, mapping name to seconds (float)
2063 peer_selection : time to ask servers about shares
2064 servers_peer_selection : dict of peerid to DYHB-query time
2065 uri_extension : time to fetch a copy of the URI extension block
2066 hashtrees : time to fetch the hash trees
2067 segments : time to fetch, decode, and deliver segments
2068 cumulative_fetch : time spent waiting for storage servers
2069 cumulative_decode : just time spent in zfec
2070 cumulative_decrypt : just time spent in decryption
2071 total : total download time, start to finish
2072 fetch_per_server : dict of server to list of per-segment fetch times
2076 class IUploader(Interface):
2077 def upload(uploadable):
2078 """Upload the file. 'uploadable' must impement IUploadable. This
2079 returns a Deferred that fires with an IUploadResults instance, from
2080 which the URI of the file can be obtained as results.uri ."""
2083 class ICheckable(Interface):
2084 def check(monitor, verify=False, add_lease=False):
2085 """Check up on my health, optionally repairing any problems.
2087 This returns a Deferred that fires with an instance that provides
2088 ICheckResults, or None if the object is non-distributed (i.e. LIT
2091 The monitor will be checked periodically to see if the operation has
2092 been cancelled. If so, no new queries will be sent, and the Deferred
2093 will fire (with a OperationCancelledError) immediately.
2095 Filenodes and dirnodes (which provide IFilesystemNode) are also
2096 checkable. Instances that represent verifier-caps will be checkable
2097 but not downloadable. Some objects (like LIT files) do not actually
2098 live in the grid, and their checkers return None (non-distributed
2099 files are always healthy).
2101 If verify=False, a relatively lightweight check will be performed: I
2102 will ask all servers if they have a share for me, and I will believe
2103 whatever they say. If there are at least N distinct shares on the
2104 grid, my results will indicate r.is_healthy()==True. This requires a
2105 roundtrip to each server, but does not transfer very much data, so
2106 the network bandwidth is fairly low.
2108 If verify=True, a more resource-intensive check will be performed:
2109 every share will be downloaded, and the hashes will be validated on
2110 every bit. I will ignore any shares that failed their hash checks. If
2111 there are at least N distinct valid shares on the grid, my results
2112 will indicate r.is_healthy()==True. This requires N/k times as much
2113 download bandwidth (and server disk IO) as a regular download. If a
2114 storage server is holding a corrupt share, or is experiencing memory
2115 failures during retrieval, or is malicious or buggy, then
2116 verification will detect the problem, but checking will not.
2118 If add_lease=True, I will ensure that an up-to-date lease is present
2119 on each share. The lease secrets will be derived from by node secret
2120 (in BASEDIR/private/secret), so either I will add a new lease to the
2121 share, or I will merely renew the lease that I already had. In a
2122 future version of the storage-server protocol (once Accounting has
2123 been implemented), there may be additional options here to define the
2124 kind of lease that is obtained (which account number to claim, etc).
2126 TODO: any problems seen during checking will be reported to the
2127 health-manager.furl, a centralized object that is responsible for
2128 figuring out why files are unhealthy so corrective action can be
2132 def check_and_repair(monitor, verify=False, add_lease=False):
2133 """Like check(), but if the file/directory is not healthy, attempt to
2136 Any non-healthy result will cause an immediate repair operation, to
2137 generate and upload new shares. After repair, the file will be as
2138 healthy as we can make it. Details about what sort of repair is done
2139 will be put in the check-and-repair results. The Deferred will not
2140 fire until the repair is complete.
2142 This returns a Deferred that fires with an instance of
2143 ICheckAndRepairResults."""
2146 class IDeepCheckable(Interface):
2147 def start_deep_check(verify=False, add_lease=False):
2148 """Check upon the health of me and everything I can reach.
2150 This is a recursive form of check(), useable only on dirnodes.
2152 I return a Monitor, with results that are an IDeepCheckResults
2155 TODO: If any of the directories I traverse are unrecoverable, the
2156 Monitor will report failure. If any of the files I check upon are
2157 unrecoverable, those problems will be reported in the
2158 IDeepCheckResults as usual, and the Monitor will not report a
2162 def start_deep_check_and_repair(verify=False, add_lease=False):
2163 """Check upon the health of me and everything I can reach. Repair
2164 anything that isn't healthy.
2166 This is a recursive form of check_and_repair(), useable only on
2169 I return a Monitor, with results that are an
2170 IDeepCheckAndRepairResults object.
2172 TODO: If any of the directories I traverse are unrecoverable, the
2173 Monitor will report failure. If any of the files I check upon are
2174 unrecoverable, those problems will be reported in the
2175 IDeepCheckResults as usual, and the Monitor will not report a
2180 class ICheckResults(Interface):
2181 """I contain the detailed results of a check/verify operation.
2184 def get_storage_index():
2185 """Return a string with the (binary) storage index."""
2187 def get_storage_index_string():
2188 """Return a string with the (printable) abbreviated storage index."""
2191 """Return the (string) URI of the object that was checked."""
2194 """Return a boolean, True if the file/dir is fully healthy, False if
2195 it is damaged in any way. Non-distributed LIT files always return
2198 def is_recoverable():
2199 """Return a boolean, True if the file/dir can be recovered, False if
2200 not. Unrecoverable files are obviously unhealthy. Non-distributed LIT
2201 files always return True."""
2203 # the following methods all return None for non-distributed LIT files
2205 def get_happiness():
2206 """Return the happiness count of the file."""
2208 def get_encoding_needed():
2209 """Return 'k', the number of shares required for recovery."""
2211 def get_encoding_expected():
2212 """Return 'N', the number of total shares generated."""
2214 def get_share_counter_good():
2215 """Return the number of distinct good shares that were found. For
2216 mutable files, this counts shares for the 'best' version."""
2218 def get_share_counter_wrong():
2219 """For mutable files, return the number of shares for versions other
2220 than the 'best' one (which is defined as being the recoverable
2221 version with the highest sequence number, then the highest roothash).
2222 These are either leftover shares from an older version (perhaps on a
2223 server that was offline when an update occurred), shares from an
2224 unrecoverable newer version, or shares from an alternate current
2225 version that results from an uncoordinated write collision. For a
2226 healthy file, this will equal 0. For immutable files, this will
2229 def get_corrupt_shares():
2230 """Return a list of 'share locators', one for each share that was
2231 found to be corrupt (integrity failure). Each share locator is a list
2232 of (IServer, storage_index, sharenum)."""
2234 def get_incompatible_shares():
2235 """Return a list of 'share locators', one for each share that was
2236 found to be of an unknown format. Each share locator is a list of
2237 (IServer, storage_index, sharenum)."""
2239 def get_servers_responding():
2240 """Return a list of IServer objects, one for each server that
2241 responded to the share query (even if they said they didn't have
2242 shares, and even if they said they did have shares but then didn't
2243 send them when asked, or dropped the connection, or returned a
2244 Failure, and even if they said they did have shares and sent
2245 incorrect ones when asked)"""
2247 def get_host_counter_good_shares():
2248 """Return the number of distinct storage servers with good shares. If
2249 this number is less than get_share_counters()[good], then some shares
2250 are doubled up, increasing the correlation of failures. This
2251 indicates that one or more shares should be moved to an otherwise
2252 unused server, if one is available.
2255 def get_version_counter_recoverable():
2256 """Return the number of recoverable versions of the file. For a
2257 healthy file, this will equal 1."""
2259 def get_version_counter_unrecoverable():
2260 """Return the number of unrecoverable versions of the file. For a
2261 healthy file, this will be 0."""
2264 """Return a dict mapping share identifier to list of IServer objects.
2265 This indicates which servers are holding which shares. For immutable
2266 files, the shareid is an integer (the share number, from 0 to N-1).
2267 For mutable files, it is a string of the form 'seq%d-%s-sh%d',
2268 containing the sequence number, the roothash, and the share number."""
2271 """Return a string with a brief (one-line) summary of the results."""
2274 """Return a list of strings with more detailed results."""
2277 class ICheckAndRepairResults(Interface):
2278 """I contain the detailed results of a check/verify/repair operation.
2280 The IFilesystemNode.check()/verify()/repair() methods all return
2281 instances that provide ICheckAndRepairResults.
2284 def get_storage_index():
2285 """Return a string with the (binary) storage index."""
2287 def get_storage_index_string():
2288 """Return a string with the (printable) abbreviated storage index."""
2290 def get_repair_attempted():
2291 """Return a boolean, True if a repair was attempted. We might not
2292 attempt to repair the file because it was healthy, or healthy enough
2293 (i.e. some shares were missing but not enough to exceed some
2294 threshold), or because we don't know how to repair this object."""
2296 def get_repair_successful():
2297 """Return a boolean, True if repair was attempted and the file/dir
2298 was fully healthy afterwards. False if no repair was attempted or if
2299 a repair attempt failed."""
2301 def get_pre_repair_results():
2302 """Return an ICheckResults instance that describes the state of the
2303 file/dir before any repair was attempted."""
2305 def get_post_repair_results():
2306 """Return an ICheckResults instance that describes the state of the
2307 file/dir after any repair was attempted. If no repair was attempted,
2308 the pre-repair and post-repair results will be identical."""
2311 class IDeepCheckResults(Interface):
2312 """I contain the results of a deep-check operation.
2314 This is returned by a call to ICheckable.deep_check().
2317 def get_root_storage_index_string():
2318 """Return the storage index (abbreviated human-readable string) of
2319 the first object checked."""
2322 """Return a dictionary with the following keys::
2324 count-objects-checked: count of how many objects were checked
2325 count-objects-healthy: how many of those objects were completely
2327 count-objects-unhealthy: how many were damaged in some way
2328 count-objects-unrecoverable: how many were unrecoverable
2329 count-corrupt-shares: how many shares were found to have
2330 corruption, summed over all objects
2334 def get_corrupt_shares():
2335 """Return a set of (IServer, storage_index, sharenum) for all shares
2336 that were found to be corrupt. storage_index is binary."""
2338 def get_all_results():
2339 """Return a dictionary mapping pathname (a tuple of strings, ready to
2340 be slash-joined) to an ICheckResults instance, one for each object
2341 that was checked."""
2343 def get_results_for_storage_index(storage_index):
2344 """Retrive the ICheckResults instance for the given (binary)
2345 storage index. Raises KeyError if there are no results for that
2349 """Return a dictionary with the same keys as
2350 IDirectoryNode.deep_stats()."""
2353 class IDeepCheckAndRepairResults(Interface):
2354 """I contain the results of a deep-check-and-repair operation.
2356 This is returned by a call to ICheckable.deep_check_and_repair().
2359 def get_root_storage_index_string():
2360 """Return the storage index (abbreviated human-readable string) of
2361 the first object checked."""
2364 """Return a dictionary with the following keys::
2366 count-objects-checked: count of how many objects were checked
2367 count-objects-healthy-pre-repair: how many of those objects were
2368 completely healthy (before any
2370 count-objects-unhealthy-pre-repair: how many were damaged in
2372 count-objects-unrecoverable-pre-repair: how many were unrecoverable
2373 count-objects-healthy-post-repair: how many of those objects were
2374 completely healthy (after any
2376 count-objects-unhealthy-post-repair: how many were damaged in
2378 count-objects-unrecoverable-post-repair: how many were
2380 count-repairs-attempted: repairs were attempted on this many
2381 objects. The count-repairs- keys will
2382 always be provided, however unless
2383 repair=true is present, they will all
2385 count-repairs-successful: how many repairs resulted in healthy
2387 count-repairs-unsuccessful: how many repairs resulted did not
2388 results in completely healthy objects
2389 count-corrupt-shares-pre-repair: how many shares were found to
2390 have corruption, summed over all
2391 objects examined (before any
2393 count-corrupt-shares-post-repair: how many shares were found to
2394 have corruption, summed over all
2395 objects examined (after any
2400 """Return a dictionary with the same keys as
2401 IDirectoryNode.deep_stats()."""
2403 def get_corrupt_shares():
2404 """Return a set of (IServer, storage_index, sharenum) for all shares
2405 that were found to be corrupt before any repair was attempted.
2406 storage_index is binary.
2408 def get_remaining_corrupt_shares():
2409 """Return a set of (IServer, storage_index, sharenum) for all shares
2410 that were found to be corrupt after any repair was completed.
2411 storage_index is binary. These are shares that need manual inspection
2412 and probably deletion.
2414 def get_all_results():
2415 """Return a dictionary mapping pathname (a tuple of strings, ready to
2416 be slash-joined) to an ICheckAndRepairResults instance, one for each
2417 object that was checked."""
2419 def get_results_for_storage_index(storage_index):
2420 """Retrive the ICheckAndRepairResults instance for the given (binary)
2421 storage index. Raises KeyError if there are no results for that
2425 class IRepairable(Interface):
2426 def repair(check_results):
2427 """Attempt to repair the given object. Returns a Deferred that fires
2428 with a IRepairResults object.
2430 I must be called with an object that implements ICheckResults, as
2431 proof that you have actually discovered a problem with this file. I
2432 will use the data in the checker results to guide the repair process,
2433 such as which servers provided bad data and should therefore be
2434 avoided. The ICheckResults object is inside the
2435 ICheckAndRepairResults object, which is returned by the
2436 ICheckable.check() method::
2438 d = filenode.check(repair=False)
2439 def _got_results(check_and_repair_results):
2440 check_results = check_and_repair_results.get_pre_repair_results()
2441 return filenode.repair(check_results)
2442 d.addCallback(_got_results)
2447 class IRepairResults(Interface):
2448 """I contain the results of a repair operation."""
2449 def get_successful():
2450 """Returns a boolean: True if the repair made the file healthy, False
2451 if not. Repair failure generally indicates a file that has been
2452 damaged beyond repair."""
2455 class IClient(Interface):
2456 def upload(uploadable):
2457 """Upload some data into a CHK, get back the UploadResults for it.
2458 @param uploadable: something that implements IUploadable
2459 @return: a Deferred that fires with the UploadResults instance.
2460 To get the URI for this file, use results.uri .
2463 def create_mutable_file(contents=""):
2464 """Create a new mutable file (with initial) contents, get back the
2467 @param contents: (bytestring, callable, or None): this provides the
2468 initial contents of the mutable file. If 'contents' is a bytestring,
2469 it will be used as-is. If 'contents' is a callable, it will be
2470 invoked with the new MutableFileNode instance and is expected to
2471 return a bytestring with the initial contents of the file (the
2472 callable can use node.get_writekey() to decide how to encrypt the
2473 initial contents, e.g. for a brand new dirnode with initial
2474 children). contents=None is equivalent to an empty string. Using
2475 content_maker= is more efficient than creating a mutable file and
2476 setting its contents in two separate operations.
2478 @return: a Deferred that fires with an IMutableFileNode instance.
2481 def create_dirnode(initial_children={}):
2482 """Create a new unattached dirnode, possibly with initial children.
2484 @param initial_children: dict with keys that are unicode child names,
2485 and values that are (childnode, metadata) tuples.
2487 @return: a Deferred that fires with the new IDirectoryNode instance.
2490 def create_node_from_uri(uri, rouri):
2491 """Create a new IFilesystemNode instance from the uri, synchronously.
2492 @param uri: a string or IURI-providing instance, or None. This could
2493 be for a LiteralFileNode, a CHK file node, a mutable file
2494 node, or a directory node
2495 @param rouri: a string or IURI-providing instance, or None. If the
2496 main uri is None, I will use the rouri instead. If I
2497 recognize the format of the main uri, I will ignore the
2498 rouri (because it can be derived from the writecap).
2500 @return: an instance that provides IFilesystemNode (or more usefully
2501 one of its subclasses). File-specifying URIs will result in
2502 IFileNode-providing instances, like ImmutableFileNode,
2503 LiteralFileNode, or MutableFileNode. Directory-specifying
2504 URIs will result in IDirectoryNode-providing instances, like
2509 class INodeMaker(Interface):
2510 """The NodeMaker is used to create IFilesystemNode instances. It can
2511 accept a filecap/dircap string and return the node right away. It can
2512 also create new nodes (i.e. upload a file, or create a mutable file)
2513 asynchronously. Once you have one of these nodes, you can use other
2514 methods to determine whether it is a file or directory, and to download
2515 or modify its contents.
2517 The NodeMaker encapsulates all the authorities that these
2518 IFilesystemNodes require (like references to the StorageFarmBroker). Each
2519 Tahoe process will typically have a single NodeMaker, but unit tests may
2520 create simplified/mocked forms for testing purposes.
2523 def create_from_cap(writecap, readcap=None, deep_immutable=False, name=u"<unknown name>"):
2524 """I create an IFilesystemNode from the given writecap/readcap. I can
2525 only provide nodes for existing file/directory objects: use my other
2526 methods to create new objects. I return synchronously."""
2528 def create_mutable_file(contents=None, keysize=None):
2529 """I create a new mutable file, and return a Deferred that will fire
2530 with the IMutableFileNode instance when it is ready. If contents= is
2531 provided (a bytestring), it will be used as the initial contents of
2532 the new file, otherwise the file will contain zero bytes. keysize= is
2533 for use by unit tests, to create mutable files that are smaller than
2536 def create_new_mutable_directory(initial_children={}):
2537 """I create a new mutable directory, and return a Deferred that will
2538 fire with the IDirectoryNode instance when it is ready. If
2539 initial_children= is provided (a dict mapping unicode child name to
2540 (childnode, metadata_dict) tuples), the directory will be populated
2541 with those children, otherwise it will be empty."""
2544 class IClientStatus(Interface):
2545 def list_all_uploads():
2546 """Return a list of uploader objects, one for each upload that
2547 currently has an object available (tracked with weakrefs). This is
2548 intended for debugging purposes."""
2550 def list_active_uploads():
2551 """Return a list of active IUploadStatus objects."""
2553 def list_recent_uploads():
2554 """Return a list of IUploadStatus objects for the most recently
2557 def list_all_downloads():
2558 """Return a list of downloader objects, one for each download that
2559 currently has an object available (tracked with weakrefs). This is
2560 intended for debugging purposes."""
2562 def list_active_downloads():
2563 """Return a list of active IDownloadStatus objects."""
2565 def list_recent_downloads():
2566 """Return a list of IDownloadStatus objects for the most recently
2567 started downloads."""
2570 class IUploadStatus(Interface):
2572 """Return a timestamp (float with seconds since epoch) indicating
2573 when the operation was started."""
2575 def get_storage_index():
2576 """Return a string with the (binary) storage index in use on this
2577 upload. Returns None if the storage index has not yet been
2581 """Return an integer with the number of bytes that will eventually
2582 be uploaded for this file. Returns None if the size is not yet known.
2585 """Return True if this upload is using a Helper, False if not."""
2588 """Return a string describing the current state of the upload
2592 """Returns a tuple of floats, (chk, ciphertext, encode_and_push),
2593 each from 0.0 to 1.0 . 'chk' describes how much progress has been
2594 made towards hashing the file to determine a CHK encryption key: if
2595 non-convergent encryption is in use, this will be trivial, otherwise
2596 the whole file must be hashed. 'ciphertext' describes how much of the
2597 ciphertext has been pushed to the helper, and is '1.0' for non-helper
2598 uploads. 'encode_and_push' describes how much of the encode-and-push
2599 process has finished: for helper uploads this is dependent upon the
2600 helper providing progress reports. It might be reasonable to add all
2601 three numbers and report the sum to the user."""
2604 """Return True if the upload is currently active, False if not."""
2607 """Return an instance of UploadResults (which contains timing and
2608 sharemap information). Might return None if the upload is not yet
2612 """Each upload status gets a unique number: this method returns that
2613 number. This provides a handle to this particular upload, so a web
2614 page can generate a suitable hyperlink."""
2617 class IDownloadStatus(Interface):
2619 """Return a timestamp (float with seconds since epoch) indicating
2620 when the operation was started."""
2622 def get_storage_index():
2623 """Return a string with the (binary) storage index in use on this
2624 download. This may be None if there is no storage index (i.e. LIT
2628 """Return an integer with the number of bytes that will eventually be
2629 retrieved for this file. Returns None if the size is not yet known.
2633 """Return True if this download is using a Helper, False if not."""
2636 """Return a string describing the current state of the download
2640 """Returns a float (from 0.0 to 1.0) describing the amount of the
2641 download that has completed. This value will remain at 0.0 until the
2642 first byte of plaintext is pushed to the download target."""
2645 """Return True if the download is currently active, False if not."""
2648 """Each download status gets a unique number: this method returns
2649 that number. This provides a handle to this particular download, so a
2650 web page can generate a suitable hyperlink."""
2653 class IServermapUpdaterStatus(Interface):
2656 class IPublishStatus(Interface):
2659 class IRetrieveStatus(Interface):
2663 class NotCapableError(Exception):
2664 """You have tried to write to a read-only node."""
2666 class BadWriteEnablerError(Exception):
2670 class RIControlClient(RemoteInterface):
2671 def wait_for_client_connections(num_clients=int):
2672 """Do not return until we have connections to at least NUM_CLIENTS
2678 def upload_random_data_from_file(size=int, convergence=str):
2681 def download_to_tempfile_and_delete(uri=str):
2684 def get_memory_usage():
2685 """Return a dict describes the amount of memory currently in use. The
2686 keys are 'VmPeak', 'VmSize', and 'VmData'. The values are integers,
2687 measuring memory consupmtion in bytes."""
2688 return DictOf(str, int)
2690 def speed_test(count=int, size=int, mutable=Any()):
2691 """Write 'count' tempfiles to disk, all of the given size. Measure
2692 how long (in seconds) it takes to upload them all to the servers.
2693 Then measure how long it takes to download all of them. If 'mutable'
2694 is 'create', time creation of mutable files. If 'mutable' is
2695 'upload', then time access to the same mutable file instead of
2698 Returns a tuple of (upload_time, download_time).
2700 return (float, float)
2702 def measure_peer_response_time():
2703 """Send a short message to each connected peer, and measure the time
2704 it takes for them to respond to it. This is a rough measure of the
2705 application-level round trip time.
2707 @return: a dictionary mapping peerid to a float (RTT time in seconds)
2710 return DictOf(str, float)
2713 UploadResults = Any() #DictOf(str, str)
2716 class RIEncryptedUploadable(RemoteInterface):
2717 __remote_name__ = "RIEncryptedUploadable.tahoe.allmydata.com"
2722 def get_all_encoding_parameters():
2723 return (int, int, int, long)
2725 def read_encrypted(offset=Offset, length=ReadSize):
2732 class RICHKUploadHelper(RemoteInterface):
2733 __remote_name__ = "RIUploadHelper.tahoe.allmydata.com"
2737 Return a dictionary of version information.
2739 return DictOf(str, Any())
2741 def upload(reader=RIEncryptedUploadable):
2742 return UploadResults
2745 class RIHelper(RemoteInterface):
2746 __remote_name__ = "RIHelper.tahoe.allmydata.com"
2750 Return a dictionary of version information.
2752 return DictOf(str, Any())
2754 def upload_chk(si=StorageIndex):
2755 """See if a file with a given storage index needs uploading. The
2756 helper will ask the appropriate storage servers to see if the file
2757 has already been uploaded. If so, the helper will return a set of
2758 'upload results' that includes whatever hashes are needed to build
2759 the read-cap, and perhaps a truncated sharemap.
2761 If the file has not yet been uploaded (or if it was only partially
2762 uploaded), the helper will return an empty upload-results dictionary
2763 and also an RICHKUploadHelper object that will take care of the
2764 upload process. The client should call upload() on this object and
2765 pass it a reference to an RIEncryptedUploadable object that will
2766 provide ciphertext. When the upload is finished, the upload() method
2767 will finish and return the upload results.
2769 return (UploadResults, ChoiceOf(RICHKUploadHelper, None))
2772 class RIStatsProvider(RemoteInterface):
2773 __remote_name__ = "RIStatsProvider.tahoe.allmydata.com"
2775 Provides access to statistics and monitoring information.
2780 returns a dictionary containing 'counters' and 'stats', each a
2781 dictionary with string counter/stat name keys, and numeric or None values.
2782 counters are monotonically increasing measures of work done, and
2783 stats are instantaneous measures (potentially time averaged
2786 return DictOf(str, DictOf(str, ChoiceOf(float, int, long, None)))
2789 class RIStatsGatherer(RemoteInterface):
2790 __remote_name__ = "RIStatsGatherer.tahoe.allmydata.com"
2792 Provides a monitoring service for centralised collection of stats
2795 def provide(provider=RIStatsProvider, nickname=str):
2797 @param provider: a stats collector instance that should be polled
2798 periodically by the gatherer to collect stats.
2799 @param nickname: a name useful to identify the provided client
2804 class IStatsProducer(Interface):
2807 returns a dictionary, with str keys representing the names of stats
2808 to be monitored, and numeric values.
2811 class RIKeyGenerator(RemoteInterface):
2812 __remote_name__ = "RIKeyGenerator.tahoe.allmydata.com"
2814 Provides a service offering to make RSA key pairs.
2817 def get_rsa_key_pair(key_size=int):
2819 @param key_size: the size of the signature key.
2820 @return: tuple(verifying_key, signing_key)
2822 return TupleOf(str, str)
2825 class FileTooLargeError(Exception):
2829 class IValidatedThingProxy(Interface):
2831 """ Acquire a thing and validate it. Return a deferred that is
2832 eventually fired with self if the thing is valid or errbacked if it
2833 can't be acquired or validated."""
2836 class InsufficientVersionError(Exception):
2837 def __init__(self, needed, got):
2838 self.needed = needed
2842 return "InsufficientVersionError(need '%s', got %s)" % (self.needed,
2845 class EmptyPathnameComponentError(Exception):
2846 """The webapi disallows empty pathname components."""