2 from zope.interface import Interface
3 from foolscap.schema import StringConstraint, ListOf, TupleOf, SetOf, DictOf, \
4 ChoiceOf, IntegerConstraint
5 from foolscap import RemoteInterface, Referenceable
9 Hash = StringConstraint(maxLength=HASH_SIZE,
10 minLength=HASH_SIZE)# binary format 32-byte SHA256 hash
11 Nodeid = StringConstraint(maxLength=20,
12 minLength=20) # binary format 20-byte SHA1 hash
13 FURL = StringConstraint(1000)
14 StorageIndex = StringConstraint(16)
15 URI = StringConstraint(300) # kind of arbitrary
17 MAX_BUCKETS = 256 # per peer -- zfec offers at most 256 shares per file
19 ShareData = StringConstraint(None)
20 URIExtensionData = StringConstraint(1000)
21 Number = IntegerConstraint(8) # 2**(8*8) == 16EiB ~= 18e18 ~= 18 exabytes
23 ReadSize = int # the 'int' constraint is 2**31 == 2Gib -- large files are processed in not-so-large increments
24 WriteEnablerSecret = Hash # used to protect mutable bucket modifications
25 LeaseRenewSecret = Hash # used to protect bucket lease renewal requests
26 LeaseCancelSecret = Hash # used to protect bucket lease cancellation requests
28 class RIStubClient(RemoteInterface):
29 """Each client publishes a service announcement for a dummy object called
30 the StubClient. This object doesn't actually offer any services, but the
31 announcement helps the Introducer keep track of which clients are
32 subscribed (so the grid admin can keep track of things like the size of
33 the grid and the client versions in use. This is the (empty)
34 RemoteInterface for the StubClient."""
36 class RIBucketWriter(RemoteInterface):
37 """ Objects of this kind live on the server side. """
38 def write(offset=Offset, data=ShareData):
43 If the data that has been written is incomplete or inconsistent then
44 the server will throw the data away, else it will store it for future
50 """Abandon all the data that has been written.
54 class RIBucketReader(RemoteInterface):
55 def read(offset=Offset, length=ReadSize):
58 def advise_corrupt_share(reason=str):
59 """Clients who discover hash failures in shares that they have
60 downloaded from me will use this method to inform me about the
61 failures. I will record their concern so that my operator can
62 manually inspect the shares in question. I return None.
64 This is a wrapper around RIStorageServer.advise_corrupt_share(),
65 which is tied to a specific share, and therefore does not need the
66 extra share-identifying arguments. Please see that method for full
70 TestVector = ListOf(TupleOf(Offset, ReadSize, str, str))
71 # elements are (offset, length, operator, specimen)
72 # operator is one of "lt, le, eq, ne, ge, gt"
73 # nop always passes and is used to fetch data while writing.
74 # you should use length==len(specimen) for everything except nop
75 DataVector = ListOf(TupleOf(Offset, ShareData))
76 # (offset, data). This limits us to 30 writes of 1MiB each per call
77 TestAndWriteVectorsForShares = DictOf(int,
80 ChoiceOf(None, Offset), # new_length
82 ReadVector = ListOf(TupleOf(Offset, ReadSize))
83 ReadData = ListOf(ShareData)
84 # returns data[offset:offset+length] for each element of TestVector
86 class RIStorageServer(RemoteInterface):
87 __remote_name__ = "RIStorageServer.tahoe.allmydata.com"
91 Return a tuple of (my_version, oldest_supported) strings. Each string can be parsed by
92 a pyutil.version_class.Version instance or a distutils.version.LooseVersion instance,
93 and then compared. The first goal is to make sure that nodes are not confused by
94 speaking to an incompatible peer. The second goal is to enable the development of
95 backwards-compatibility code.
97 The meaning of the oldest_supported element is that if you treat this storage server as
98 though it were of that version, then you will not be disappointed.
100 The precise meaning of this method might change in incompatible ways until we get the
101 whole compatibility scheme nailed down.
103 return TupleOf(str, str)
105 def allocate_buckets(storage_index=StorageIndex,
106 renew_secret=LeaseRenewSecret,
107 cancel_secret=LeaseCancelSecret,
108 sharenums=SetOf(int, maxLength=MAX_BUCKETS),
109 allocated_size=Offset, canary=Referenceable):
111 @param storage_index: the index of the bucket to be created or
113 @param sharenums: these are the share numbers (probably between 0 and
114 99) that the sender is proposing to store on this
116 @param renew_secret: This is the secret used to protect bucket refresh
117 This secret is generated by the client and
118 stored for later comparison by the server. Each
119 server is given a different secret.
120 @param cancel_secret: Like renew_secret, but protects bucket decref.
121 @param canary: If the canary is lost before close(), the bucket is
123 @return: tuple of (alreadygot, allocated), where alreadygot is what we
124 already have and is what we hereby agree to accept. New
125 leases are added for shares in both lists.
127 return TupleOf(SetOf(int, maxLength=MAX_BUCKETS),
128 DictOf(int, RIBucketWriter, maxKeys=MAX_BUCKETS))
130 def add_lease(storage_index=StorageIndex,
131 renew_secret=LeaseRenewSecret,
132 cancel_secret=LeaseCancelSecret):
134 Add a new lease on the given bucket. If the renew_secret matches an
135 existing lease, that lease will be renewed instead.
139 def renew_lease(storage_index=StorageIndex, renew_secret=LeaseRenewSecret):
141 Renew the lease on a given bucket. Some networks will use this, some
146 def cancel_lease(storage_index=StorageIndex,
147 cancel_secret=LeaseCancelSecret):
149 Cancel the lease on a given bucket. If this was the last lease on the
150 bucket, the bucket will be deleted.
154 def get_buckets(storage_index=StorageIndex):
155 return DictOf(int, RIBucketReader, maxKeys=MAX_BUCKETS)
159 def slot_readv(storage_index=StorageIndex,
160 shares=ListOf(int), readv=ReadVector):
161 """Read a vector from the numbered shares associated with the given
162 storage index. An empty shares list means to return data from all
163 known shares. Returns a dictionary with one key per share."""
164 return DictOf(int, ReadData) # shnum -> results
166 def slot_testv_and_readv_and_writev(storage_index=StorageIndex,
167 secrets=TupleOf(WriteEnablerSecret,
170 tw_vectors=TestAndWriteVectorsForShares,
173 """General-purpose test-and-set operation for mutable slots. Perform
174 a bunch of comparisons against the existing shares. If they all pass,
175 then apply a bunch of write vectors to those shares. Then use the
176 read vectors to extract data from all the shares and return the data.
178 This method is, um, large. The goal is to allow clients to update all
179 the shares associated with a mutable file in a single round trip.
181 @param storage_index: the index of the bucket to be created or
183 @param write_enabler: a secret that is stored along with the slot.
184 Writes are accepted from any caller who can
185 present the matching secret. A different secret
186 should be used for each slot*server pair.
187 @param renew_secret: This is the secret used to protect bucket refresh
188 This secret is generated by the client and
189 stored for later comparison by the server. Each
190 server is given a different secret.
191 @param cancel_secret: Like renew_secret, but protects bucket decref.
193 The 'secrets' argument is a tuple of (write_enabler, renew_secret,
194 cancel_secret). The first is required to perform any write. The
195 latter two are used when allocating new shares. To simply acquire a
196 new lease on existing shares, use an empty testv and an empty writev.
198 Each share can have a separate test vector (i.e. a list of
199 comparisons to perform). If all vectors for all shares pass, then all
200 writes for all shares are recorded. Each comparison is a 4-tuple of
201 (offset, length, operator, specimen), which effectively does a bool(
202 (read(offset, length)) OPERATOR specimen ) and only performs the
203 write if all these evaluate to True. Basic test-and-set uses 'eq'.
204 Write-if-newer uses a seqnum and (offset, length, 'lt', specimen).
205 Write-if-same-or-newer uses 'le'.
207 Reads from the end of the container are truncated, and missing shares
208 behave like empty ones, so to assert that a share doesn't exist (for
209 use when creating a new share), use (0, 1, 'eq', '').
211 The write vector will be applied to the given share, expanding it if
212 necessary. A write vector applied to a share number that did not
213 exist previously will cause that share to be created.
215 Each write vector is accompanied by a 'new_length' argument. If
216 new_length is not None, use it to set the size of the container. This
217 can be used to pre-allocate space for a series of upcoming writes, or
218 truncate existing data. If the container is growing, new_length will
219 be applied before datav. If the container is shrinking, it will be
222 The read vector is used to extract data from all known shares,
223 *before* any writes have been applied. The same vector is used for
224 all shares. This captures the state that was tested by the test
227 This method returns two values: a boolean and a dict. The boolean is
228 True if the write vectors were applied, False if not. The dict is
229 keyed by share number, and each value contains a list of strings, one
230 for each element of the read vector.
232 If the write_enabler is wrong, this will raise BadWriteEnablerError.
233 To enable share migration (using update_write_enabler), the exception
234 will have the nodeid used for the old write enabler embedded in it,
235 in the following string::
237 The write enabler was recorded by nodeid '%s'.
239 Note that the nodeid here is encoded using the same base32 encoding
240 used by Foolscap and allmydata.util.idlib.nodeid_b2a().
243 return TupleOf(bool, DictOf(int, ReadData))
245 def advise_corrupt_share(share_type=str, storage_index=StorageIndex,
246 shnum=int, reason=str):
247 """Clients who discover hash failures in shares that they have
248 downloaded from me will use this method to inform me about the
249 failures. I will record their concern so that my operator can
250 manually inspect the shares in question. I return None.
252 'share_type' is either 'mutable' or 'immutable'. 'storage_index' is a
253 (binary) storage index string, and 'shnum' is the integer share
254 number. 'reason' is a human-readable explanation of the problem,
255 probably including some expected hash values and the computed ones
256 which did not match. Corruption advisories for mutable shares should
257 include a hash of the public key (the same value that appears in the
258 mutable-file verify-cap), since the current share format does not
262 class IStorageBucketWriter(Interface):
264 Objects of this kind live on the client side.
266 def put_block(segmentnum=int, data=ShareData):
267 """@param data: For most segments, this data will be 'blocksize'
268 bytes in length. The last segment might be shorter.
269 @return: a Deferred that fires (with None) when the operation completes
272 def put_plaintext_hashes(hashes=ListOf(Hash)):
274 @return: a Deferred that fires (with None) when the operation completes
277 def put_crypttext_hashes(hashes=ListOf(Hash)):
279 @return: a Deferred that fires (with None) when the operation completes
282 def put_block_hashes(blockhashes=ListOf(Hash)):
284 @return: a Deferred that fires (with None) when the operation completes
287 def put_share_hashes(sharehashes=ListOf(TupleOf(int, Hash))):
289 @return: a Deferred that fires (with None) when the operation completes
292 def put_uri_extension(data=URIExtensionData):
293 """This block of data contains integrity-checking information (hashes
294 of plaintext, crypttext, and shares), as well as encoding parameters
295 that are necessary to recover the data. This is a serialized dict
296 mapping strings to other strings. The hash of this data is kept in
297 the URI and verified before any of the data is used. All buckets for
298 a given file contain identical copies of this data.
300 The serialization format is specified with the following pseudocode:
301 for k in sorted(dict.keys()):
302 assert re.match(r'^[a-zA-Z_\-]+$', k)
303 write(k + ':' + netstring(dict[k]))
305 @return: a Deferred that fires (with None) when the operation completes
309 """Finish writing and close the bucket. The share is not finalized
310 until this method is called: if the uploading client disconnects
311 before calling close(), the partially-written share will be
314 @return: a Deferred that fires (with None) when the operation completes
317 class IStorageBucketReader(Interface):
319 def get_block(blocknum=int):
320 """Most blocks will be the same size. The last block might be shorter
326 def get_plaintext_hashes():
328 @return: ListOf(Hash)
331 def get_crypttext_hashes():
333 @return: ListOf(Hash)
336 def get_block_hashes():
338 @return: ListOf(Hash)
341 def get_share_hashes():
343 @return: ListOf(TupleOf(int, Hash))
346 def get_uri_extension():
348 @return: URIExtensionData
353 # hm, we need a solution for forward references in schemas
354 from foolscap.schema import Any
356 FileNode_ = Any() # TODO: foolscap needs constraints on copyables
357 DirectoryNode_ = Any() # TODO: same
358 AnyNode_ = ChoiceOf(FileNode_, DirectoryNode_)
361 class IURI(Interface):
362 def init_from_string(uri):
363 """Accept a string (as created by my to_string() method) and populate
364 this instance with its data. I am not normally called directly,
365 please use the module-level uri.from_string() function to convert
366 arbitrary URI strings into IURI-providing instances."""
369 """Return False if this URI be used to modify the data. Return True
370 if this URI cannot be used to modify the data."""
373 """Return True if the data can be modified by *somebody* (perhaps
374 someone who has a more powerful URI than this one)."""
377 """Return another IURI instance, which represents a read-only form of
378 this one. If is_readonly() is True, this returns self."""
381 """Return an instance that provides IVerifierURI, which can be used
382 to check on the availability of the file or directory, without
383 providing enough capabilities to actually read or modify the
384 contents. This may return None if the file does not need checking or
385 verification (e.g. LIT URIs).
389 """Return a string of printable ASCII characters, suitable for
390 passing into init_from_string."""
392 class IVerifierURI(Interface):
393 def init_from_string(uri):
394 """Accept a string (as created by my to_string() method) and populate
395 this instance with its data. I am not normally called directly,
396 please use the module-level uri.from_string() function to convert
397 arbitrary URI strings into IURI-providing instances."""
400 """Return a string of printable ASCII characters, suitable for
401 passing into init_from_string."""
403 class IDirnodeURI(Interface):
404 """I am a URI which represents a dirnode."""
407 class IFileURI(Interface):
408 """I am a URI which represents a filenode."""
410 """Return the length (in bytes) of the file that I represent."""
412 class IMutableFileURI(Interface):
413 """I am a URI which represents a mutable filenode."""
414 class INewDirectoryURI(Interface):
416 class IReadonlyNewDirectoryURI(Interface):
420 class IFilesystemNode(Interface):
423 Return the URI that can be used by others to get access to this
424 node. If this node is read-only, the URI will only offer read-only
425 access. If this node is read-write, the URI will offer read-write
428 If you have read-write access to a node and wish to share merely
429 read-only access with others, use get_readonly_uri().
432 def get_readonly_uri():
433 """Return the directory URI that can be used by others to get
434 read-only access to this directory node. The result is a read-only
435 URI, regardless of whether this dirnode is read-only or read-write.
437 If you have merely read-only access to this dirnode,
438 get_readonly_uri() will return the same thing as get_uri().
442 """Return an IVerifierURI instance that represents the
443 'verifiy/refresh capability' for this node. The holder of this
444 capability will be able to renew the lease for this node, protecting
445 it from garbage-collection. They will also be able to ask a server if
446 it holds a share for the file or directory.
449 def get_storage_index():
450 """Return a string with the (binary) storage index in use on this
451 download. This may be None if there is no storage index (i.e. LIT
455 """Return True if this reference provides mutable access to the given
456 file or directory (i.e. if you can modify it), or False if not. Note
457 that even if this reference is read-only, someone else may hold a
458 read-write reference to it."""
461 """Return True if this file or directory is mutable (by *somebody*,
462 not necessarily you), False if it is is immutable. Note that a file
463 might be mutable overall, but your reference to it might be
464 read-only. On the other hand, all references to an immutable file
465 will be read-only; there are no read-write references to an immutable
469 class IMutableFilesystemNode(IFilesystemNode):
472 class IFileNode(IFilesystemNode):
473 def download(target):
474 """Download the file's contents to a given IDownloadTarget"""
476 def download_to_data():
477 """Download the file's contents. Return a Deferred that fires
478 with those contents."""
481 """Return the length (in bytes) of the data this node represents."""
483 class IMutableFileNode(IFileNode, IMutableFilesystemNode):
484 """I provide access to a 'mutable file', which retains its identity
485 regardless of what contents are put in it.
487 The consistency-vs-availability problem means that there might be
488 multiple versions of a file present in the grid, some of which might be
489 unrecoverable (i.e. have fewer than 'k' shares). These versions are
490 loosely ordered: each has a sequence number and a hash, and any version
491 with seqnum=N was uploaded by a node which has seen at least one version
494 The 'servermap' (an instance of IMutableFileServerMap) is used to
495 describe the versions that are known to be present in the grid, and which
496 servers are hosting their shares. It is used to represent the 'state of
497 the world', and is used for this purpose by my test-and-set operations.
498 Downloading the contents of the mutable file will also return a
499 servermap. Uploading a new version into the mutable file requires a
500 servermap as input, and the semantics of the replace operation is
501 'replace the file with my new version if it looks like nobody else has
502 changed the file since my previous download'. Because the file is
503 distributed, this is not a perfect test-and-set operation, but it will do
504 its best. If the replace process sees evidence of a simultaneous write,
505 it will signal an UncoordinatedWriteError, so that the caller can take
509 Most readers will want to use the 'best' current version of the file, and
510 should use my 'download_best_version()' method.
512 To unconditionally replace the file, callers should use overwrite(). This
513 is the mode that user-visible mutable files will probably use.
515 To apply some delta to the file, call modify() with a callable modifier
516 function that can apply the modification that you want to make. This is
517 the mode that dirnodes will use, since most directory modification
518 operations can be expressed in terms of deltas to the directory state.
521 Three methods are available for users who need to perform more complex
522 operations. The first is get_servermap(), which returns an up-to-date
523 servermap using a specified mode. The second is download_version(), which
524 downloads a specific version (not necessarily the 'best' one). The third
525 is 'upload', which accepts new contents and a servermap (which must have
526 been updated with MODE_WRITE). The upload method will attempt to apply
527 the new contents as long as no other node has modified the file since the
528 servermap was updated. This might be useful to a caller who wants to
529 merge multiple versions into a single new one.
531 Note that each time the servermap is updated, a specific 'mode' is used,
532 which determines how many peers are queried. To use a servermap for my
533 replace() method, that servermap must have been updated in MODE_WRITE.
534 These modes are defined in allmydata.mutable.common, and consist of
535 MODE_READ, MODE_WRITE, MODE_ANYTHING, and MODE_CHECK. Please look in
536 allmydata/mutable/servermap.py for details about the differences.
538 Mutable files are currently limited in size (about 3.5MB max) and can
539 only be retrieved and updated all-at-once, as a single big string. Future
540 versions of our mutable files will remove this restriction.
543 def download_best_version():
544 """Download the 'best' available version of the file, meaning one of
545 the recoverable versions with the highest sequence number. If no
546 uncoordinated writes have occurred, and if enough shares are
547 available, then this will be the most recent version that has been
550 I update an internal servermap with MODE_READ, determine which
551 version of the file is indicated by
552 servermap.best_recoverable_version(), and return a Deferred that
553 fires with its contents. If no version is recoverable, the Deferred
554 will errback with UnrecoverableFileError.
557 def get_size_of_best_version():
558 """Find the size of the version that would be downloaded with
559 download_best_version(), without actually downloading the whole file.
561 I return a Deferred that fires with an integer.
564 def overwrite(new_contents):
565 """Unconditionally replace the contents of the mutable file with new
566 ones. This simply chains get_servermap(MODE_WRITE) and upload(). This
567 is only appropriate to use when the new contents of the file are
568 completely unrelated to the old ones, and you do not care about other
571 I return a Deferred that fires (with a PublishStatus object) when the
572 update has completed.
575 def modify(modifier_cb):
576 """Modify the contents of the file, by downloading the current
577 version, applying the modifier function (or bound method), then
578 uploading the new version. I return a Deferred that fires (with a
579 PublishStatus object) when the update is complete.
581 The modifier callable will be given two arguments: a string (with the
582 old contents) and a servermap. As with download_best_version(), the
583 old contents will be from the best recoverable version, but the
584 modifier can use the servermap to make other decisions (such as
585 refusing to apply the delta if there are multiple parallel versions,
586 or if there is evidence of a newer unrecoverable version).
588 The callable should return a string with the new contents. The
589 callable must be prepared to be called multiple times, and must
590 examine the input string to see if the change that it wants to make
591 is already present in the old version. If it does not need to make
592 any changes, it can either return None, or return its input string.
594 If the modifier raises an exception, it will be returned in the
599 def get_servermap(mode):
600 """Return a Deferred that fires with an IMutableFileServerMap
601 instance, updated using the given mode.
604 def download_version(servermap, version):
605 """Download a specific version of the file, using the servermap
606 as a guide to where the shares are located.
608 I return a Deferred that fires with the requested contents, or
609 errbacks with UnrecoverableFileError. Note that a servermap which was
610 updated with MODE_ANYTHING or MODE_READ may not know about shares for
611 all versions (those modes stop querying servers as soon as they can
612 fulfil their goals), so you may want to use MODE_CHECK (which checks
613 everything) to get increased visibility.
616 def upload(new_contents, servermap):
617 """Replace the contents of the file with new ones. This requires a
618 servermap that was previously updated with MODE_WRITE.
620 I attempt to provide test-and-set semantics, in that I will avoid
621 modifying any share that is different than the version I saw in the
622 servermap. However, if another node is writing to the file at the
623 same time as me, I may manage to update some shares while they update
624 others. If I see any evidence of this, I will signal
625 UncoordinatedWriteError, and the file will be left in an inconsistent
626 state (possibly the version you provided, possibly the old version,
627 possibly somebody else's version, and possibly a mix of shares from
630 The recommended response to UncoordinatedWriteError is to either
631 return it to the caller (since they failed to coordinate their
632 writes), or to attempt some sort of recovery. It may be sufficient to
633 wait a random interval (with exponential backoff) and repeat your
634 operation. If I do not signal UncoordinatedWriteError, then I was
635 able to write the new version without incident.
637 I return a Deferred that fires (with a PublishStatus object) when the
638 publish has completed. I will update the servermap in-place with the
639 location of all new shares.
643 """Return this filenode's writekey, or None if the node does not have
644 write-capability. This may be used to assist with data structures
645 that need to make certain data available only to writers, such as the
646 read-write child caps in dirnodes. The recommended process is to have
647 reader-visible data be submitted to the filenode in the clear (where
648 it will be encrypted by the filenode using the readkey), but encrypt
649 writer-visible data using this writekey.
652 class ExistingChildError(Exception):
653 """A directory node was asked to add or replace a child that already
654 exists, and overwrite= was set to False."""
656 class NoSuchChildError(Exception):
657 """A directory node was asked to fetch a child which does not exist."""
659 class IDirectoryNode(IMutableFilesystemNode):
660 """I represent a name-to-child mapping, holding the tahoe equivalent of a
661 directory. All child names are unicode strings, and all children are some
662 sort of IFilesystemNode (either files or subdirectories).
667 The dirnode ('1') URI returned by this method can be used in
668 set_uri() on a different directory ('2') to 'mount' a reference to
669 this directory ('1') under the other ('2'). This URI is just a
670 string, so it can be passed around through email or other out-of-band
674 def get_readonly_uri():
676 The dirnode ('1') URI returned by this method can be used in
677 set_uri() on a different directory ('2') to 'mount' a reference to
678 this directory ('1') under the other ('2'). This URI is just a
679 string, so it can be passed around through email or other out-of-band
684 """I return a Deferred that fires with a dictionary mapping child
685 name (a unicode string) to (node, metadata_dict) tuples, in which
686 'node' is either an IFileNode or IDirectoryNode, and 'metadata_dict'
687 is a dictionary of metadata."""
690 """I return a Deferred that fires with a boolean, True if there
691 exists a child of the given name, False if not. The child name must
692 be a unicode string."""
695 """I return a Deferred that fires with a specific named child node,
696 either an IFileNode or an IDirectoryNode. The child name must be a
697 unicode string. I raise NoSuchChildError if I do not have a child by
700 def get_metadata_for(name):
701 """I return a Deferred that fires with the metadata dictionary for a
702 specific named child node. This metadata is stored in the *edge*, not
703 in the child, so it is attached to the parent dirnode rather than the
704 child dir-or-file-node. The child name must be a unicode string. I
705 raise NoSuchChildError if I do not have a child by that name."""
707 def set_metadata_for(name, metadata):
708 """I replace any existing metadata for the named child with the new
709 metadata. The child name must be a unicode string. This metadata is
710 stored in the *edge*, not in the child, so it is attached to the
711 parent dirnode rather than the child dir-or-file-node. I return a
712 Deferred (that fires with this dirnode) when the operation is
713 complete. I raise NoSuchChildError if I do not have a child by that
716 def get_child_at_path(path):
717 """Transform a child path into an IDirectoryNode or IFileNode.
719 I perform a recursive series of 'get' operations to find the named
720 descendant node. I return a Deferred that fires with the node, or
721 errbacks with NoSuchChildError if the node could not be found.
723 The path can be either a single string (slash-separated) or a list of
724 path-name elements. All elements must be unicode strings.
727 def get_child_and_metadata_at_path(path):
728 """Transform a child path into an IDirectoryNode/IFileNode and
731 I am like get_child_at_path(), but my Deferred fires with a tuple of
732 (node, metadata). The metadata comes from the last edge. If the path
733 is empty, the metadata will be an empty dictionary.
736 def set_uri(name, child_uri, metadata=None, overwrite=True):
737 """I add a child (by URI) at the specific name. I return a Deferred
738 that fires when the operation finishes. If overwrite= is True, I will
739 replace any existing child of the same name, otherwise an existing
740 child will cause me to return ExistingChildError. The child name must
743 The child_uri could be for a file, or for a directory (either
744 read-write or read-only, using a URI that came from get_uri() ).
746 If metadata= is provided, I will use it as the metadata for the named
747 edge. This will replace any existing metadata. If metadata= is left
748 as the default value of None, I will set ['mtime'] to the current
749 time, and I will set ['ctime'] to the current time if there was not
750 already a child by this name present. This roughly matches the
751 ctime/mtime semantics of traditional filesystems.
753 If this directory node is read-only, the Deferred will errback with a
756 def set_children(entries, overwrite=True):
757 """Add multiple (name, child_uri) pairs (or (name, child_uri,
758 metadata) triples) to a directory node. Returns a Deferred that fires
759 (with None) when the operation finishes. This is equivalent to
760 calling set_uri() multiple times, but is much more efficient. All
761 child names must be unicode strings.
764 def set_node(name, child, metadata=None, overwrite=True):
765 """I add a child at the specific name. I return a Deferred that fires
766 when the operation finishes. This Deferred will fire with the child
767 node that was just added. I will replace any existing child of the
768 same name. The child name must be a unicode string. The 'child'
769 instance must be an instance providing IDirectoryNode or IFileNode.
771 If metadata= is provided, I will use it as the metadata for the named
772 edge. This will replace any existing metadata. If metadata= is left
773 as the default value of None, I will set ['mtime'] to the current
774 time, and I will set ['ctime'] to the current time if there was not
775 already a child by this name present. This roughly matches the
776 ctime/mtime semantics of traditional filesystems.
778 If this directory node is read-only, the Deferred will errback with a
781 def set_nodes(entries, overwrite=True):
782 """Add multiple (name, child_node) pairs (or (name, child_node,
783 metadata) triples) to a directory node. Returns a Deferred that fires
784 (with None) when the operation finishes. This is equivalent to
785 calling set_node() multiple times, but is much more efficient. All
786 child names must be unicode strings."""
789 def add_file(name, uploadable, metadata=None, overwrite=True):
790 """I upload a file (using the given IUploadable), then attach the
791 resulting FileNode to the directory at the given name. I set metadata
792 the same way as set_uri and set_node. The child name must be a
795 I return a Deferred that fires (with the IFileNode of the uploaded
796 file) when the operation completes."""
799 """I remove the child at the specific name. I return a Deferred that
800 fires when the operation finishes. The child name must be a unicode
801 string. I raise NoSuchChildError if I do not have a child by that
804 def create_empty_directory(name, overwrite=True):
805 """I create and attach an empty directory at the given name. The
806 child name must be a unicode string. I return a Deferred that fires
807 when the operation finishes."""
809 def move_child_to(current_child_name, new_parent, new_child_name=None,
811 """I take one of my children and move them to a new parent. The child
812 is referenced by name. On the new parent, the child will live under
813 'new_child_name', which defaults to 'current_child_name'. TODO: what
814 should we do about metadata? I return a Deferred that fires when the
815 operation finishes. The child name must be a unicode string. I raise
816 NoSuchChildError if I do not have a child by that name."""
818 def build_manifest():
819 """Return a Monitor. The Monitor's results will be a list of (path,
820 cap) tuples for nodes (directories and files) reachable from this
821 one. 'path' will be a tuple of unicode strings. The origin dirnode
822 will be represented by an empty path tuple. The Monitor will also
823 have an .origin_si attribute with the (binary) storage index of the
827 def start_deep_stats():
828 """Return a Monitor, examining all nodes (directories and files)
829 reachable from this one. The Monitor's results will be a dictionary
830 with the following keys::
832 count-immutable-files: count of how many CHK files are in the set
833 count-mutable-files: same, for mutable files (does not include
835 count-literal-files: same, for LIT files
836 count-files: sum of the above three
838 count-directories: count of directories
840 size-immutable-files: total bytes for all CHK files in the set
841 size-mutable-files (TODO): same, for current version of all mutable
842 files, does not include directories
843 size-literal-files: same, for LIT files
844 size-directories: size of mutable files used by directories
846 largest-directory: number of bytes in the largest directory
847 largest-directory-children: number of children in the largest
849 largest-immutable-file: number of bytes in the largest CHK file
851 size-mutable-files is not yet implemented, because it would involve
852 even more queries than deep_stats does.
854 The Monitor will also have an .origin_si attribute with the (binary)
855 storage index of the starting point.
857 This operation will visit every directory node underneath this one,
858 and can take a long time to run. On a typical workstation with good
859 bandwidth, this can examine roughly 15 directories per second (and
860 takes several minutes of 100% CPU for ~1700 directories).
863 class ICodecEncoder(Interface):
864 def set_params(data_size, required_shares, max_shares):
865 """Set up the parameters of this encoder.
867 This prepares the encoder to perform an operation that converts a
868 single block of data into a number of shares, such that a future
869 ICodecDecoder can use a subset of these shares to recover the
870 original data. This operation is invoked by calling encode(). Once
871 the encoding parameters are set up, the encode operation can be
872 invoked multiple times.
874 set_params() prepares the encoder to accept blocks of input data that
875 are exactly 'data_size' bytes in length. The encoder will be prepared
876 to produce 'max_shares' shares for each encode() operation (although
877 see the 'desired_share_ids' to use less CPU). The encoding math will
878 be chosen such that the decoder can get by with as few as
879 'required_shares' of these shares and still reproduce the original
880 data. For example, set_params(1000, 5, 5) offers no redundancy at
881 all, whereas set_params(1000, 1, 10) provides 10x redundancy.
883 Numerical Restrictions: 'data_size' is required to be an integral
884 multiple of 'required_shares'. In general, the caller should choose
885 required_shares and max_shares based upon their reliability
886 requirements and the number of peers available (the total storage
887 space used is roughly equal to max_shares*data_size/required_shares),
888 then choose data_size to achieve the memory footprint desired (larger
889 data_size means more efficient operation, smaller data_size means
890 smaller memory footprint).
892 In addition, 'max_shares' must be equal to or greater than
893 'required_shares'. Of course, setting them to be equal causes
894 encode() to degenerate into a particularly slow form of the 'split'
897 See encode() for more details about how these parameters are used.
899 set_params() must be called before any other ICodecEncoder methods
903 def get_encoder_type():
904 """Return a short string that describes the type of this encoder.
906 There is required to be a global table of encoder classes. This method
907 returns an index into this table; the value at this index is an
908 encoder class, and this encoder is an instance of that class.
911 def get_serialized_params(): # TODO: maybe, maybe not
912 """Return a string that describes the parameters of this encoder.
914 This string can be passed to the decoder to prepare it for handling
915 the encoded shares we create. It might contain more information than
916 was presented to set_params(), if there is some flexibility of
919 This string is intended to be embedded in the URI, so there are
920 several restrictions on its contents. At the moment I'm thinking that
921 this means it may contain hex digits and hyphens, and nothing else.
922 The idea is that the URI contains something like '%s:%s:%s' %
923 (encoder.get_encoder_name(), encoder.get_serialized_params(),
924 b2a(crypttext_hash)), and this is enough information to construct a
928 def get_block_size():
929 """Return the length of the shares that encode() will produce.
932 def encode_proposal(data, desired_share_ids=None):
935 'data' must be a string (or other buffer object), and len(data) must
936 be equal to the 'data_size' value passed earlier to set_params().
938 This will return a Deferred that will fire with two lists. The first
939 is a list of shares, each of which is a string (or other buffer
940 object) such that len(share) is the same as what get_share_size()
941 returned earlier. The second is a list of shareids, in which each is
942 an integer. The lengths of the two lists will always be equal to each
943 other. The user should take care to keep each share closely
944 associated with its shareid, as one is useless without the other.
946 The length of this output list will normally be the same as the value
947 provided to the 'max_shares' parameter of set_params(). This may be
948 different if 'desired_share_ids' is provided.
950 'desired_share_ids', if provided, is required to be a sequence of
951 ints, each of which is required to be >= 0 and < max_shares. If not
952 provided, encode() will produce 'max_shares' shares, as if
953 'desired_share_ids' were set to range(max_shares). You might use this
954 if you initially thought you were going to use 10 peers, started
955 encoding, and then two of the peers dropped out: you could use
956 desired_share_ids= to skip the work (both memory and CPU) of
957 producing shares for the peers which are no longer available.
961 def encode(inshares, desired_share_ids=None):
962 """Encode some data. This may be called multiple times. Each call is
965 inshares is a sequence of length required_shares, containing buffers
966 (i.e. strings), where each buffer contains the next contiguous
967 non-overlapping segment of the input data. Each buffer is required to
968 be the same length, and the sum of the lengths of the buffers is
969 required to be exactly the data_size promised by set_params(). (This
970 implies that the data has to be padded before being passed to
971 encode(), unless of course it already happens to be an even multiple
972 of required_shares in length.)
974 ALSO: the requirement to break up your data into 'required_shares'
975 chunks before calling encode() feels a bit surprising, at least from
976 the point of view of a user who doesn't know how FEC works. It feels
977 like an implementation detail that has leaked outside the
978 abstraction barrier. Can you imagine a use case in which the data to
979 be encoded might already be available in pre-segmented chunks, such
980 that it is faster or less work to make encode() take a list rather
981 than splitting a single string?
983 ALSO ALSO: I think 'inshares' is a misleading term, since encode()
984 is supposed to *produce* shares, so what it *accepts* should be
985 something other than shares. Other places in this interface use the
986 word 'data' for that-which-is-not-shares.. maybe we should use that
989 'desired_share_ids', if provided, is required to be a sequence of
990 ints, each of which is required to be >= 0 and < max_shares. If not
991 provided, encode() will produce 'max_shares' shares, as if
992 'desired_share_ids' were set to range(max_shares). You might use this
993 if you initially thought you were going to use 10 peers, started
994 encoding, and then two of the peers dropped out: you could use
995 desired_share_ids= to skip the work (both memory and CPU) of
996 producing shares for the peers which are no longer available.
998 For each call, encode() will return a Deferred that fires with two
999 lists, one containing shares and the other containing the shareids.
1000 The get_share_size() method can be used to determine the length of
1001 the share strings returned by encode(). Each shareid is a small
1002 integer, exactly as passed into 'desired_share_ids' (or
1003 range(max_shares), if desired_share_ids was not provided).
1005 The shares and their corresponding shareids are required to be kept
1006 together during storage and retrieval. Specifically, the share data is
1007 useless by itself: the decoder needs to be told which share is which
1008 by providing it with both the shareid and the actual share data.
1010 This function will allocate an amount of memory roughly equal to::
1012 (max_shares - required_shares) * get_share_size()
1014 When combined with the memory that the caller must allocate to
1015 provide the input data, this leads to a memory footprint roughly
1016 equal to the size of the resulting encoded shares (i.e. the expansion
1017 factor times the size of the input segment).
1022 # returning a list of (shareidN,shareN) tuples instead of a pair of
1023 # lists (shareids..,shares..). Brian thought the tuples would
1024 # encourage users to keep the share and shareid together throughout
1025 # later processing, Zooko pointed out that the code to iterate
1026 # through two lists is not really more complicated than using a list
1027 # of tuples and there's also a performance improvement
1029 # having 'data_size' not required to be an integral multiple of
1030 # 'required_shares'. Doing this would require encode() to perform
1031 # padding internally, and we'd prefer to have any padding be done
1032 # explicitly by the caller. Yes, it is an abstraction leak, but
1033 # hopefully not an onerous one.
1036 class ICodecDecoder(Interface):
1037 def set_serialized_params(params):
1038 """Set up the parameters of this encoder, from a string returned by
1039 encoder.get_serialized_params()."""
1041 def get_needed_shares():
1042 """Return the number of shares needed to reconstruct the data.
1043 set_serialized_params() is required to be called before this."""
1045 def decode(some_shares, their_shareids):
1046 """Decode a partial list of shares into data.
1048 'some_shares' is required to be a sequence of buffers of sharedata, a
1049 subset of the shares returned by ICodecEncode.encode(). Each share is
1050 required to be of the same length. The i'th element of their_shareids
1051 is required to be the shareid of the i'th buffer in some_shares.
1053 This returns a Deferred which fires with a sequence of buffers. This
1054 sequence will contain all of the segments of the original data, in
1055 order. The sum of the lengths of all of the buffers will be the
1056 'data_size' value passed into the original ICodecEncode.set_params()
1057 call. To get back the single original input block of data, use
1058 ''.join(output_buffers), or you may wish to simply write them in
1059 order to an output file.
1061 Note that some of the elements in the result sequence may be
1062 references to the elements of the some_shares input sequence. In
1063 particular, this means that if those share objects are mutable (e.g.
1064 arrays) and if they are changed, then both the input (the
1065 'some_shares' parameter) and the output (the value given when the
1066 deferred is triggered) will change.
1068 The length of 'some_shares' is required to be exactly the value of
1069 'required_shares' passed into the original ICodecEncode.set_params()
1073 class IEncoder(Interface):
1074 """I take an object that provides IEncryptedUploadable, which provides
1075 encrypted data, and a list of shareholders. I then encode, hash, and
1076 deliver shares to those shareholders. I will compute all the necessary
1077 Merkle hash trees that are necessary to validate the crypttext that
1078 eventually comes back from the shareholders. I provide the URI Extension
1079 Block Hash, and the encoding parameters, both of which must be included
1082 I do not choose shareholders, that is left to the IUploader. I must be
1083 given a dict of RemoteReferences to storage buckets that are ready and
1084 willing to receive data.
1088 """Specify the number of bytes that will be encoded. This must be
1089 peformed before get_serialized_params() can be called.
1091 def set_params(params):
1092 """Override the default encoding parameters. 'params' is a tuple of
1093 (k,d,n), where 'k' is the number of required shares, 'd' is the
1094 shares_of_happiness, and 'n' is the total number of shares that will
1097 Encoding parameters can be set in three ways. 1: The Encoder class
1098 provides defaults (3/7/10). 2: the Encoder can be constructed with
1099 an 'options' dictionary, in which the
1100 needed_and_happy_and_total_shares' key can be a (k,d,n) tuple. 3:
1101 set_params((k,d,n)) can be called.
1103 If you intend to use set_params(), you must call it before
1104 get_share_size or get_param are called.
1107 def set_encrypted_uploadable(u):
1108 """Provide a source of encrypted upload data. 'u' must implement
1109 IEncryptedUploadable.
1111 When this is called, the IEncryptedUploadable will be queried for its
1112 length and the storage_index that should be used.
1114 This returns a Deferred that fires with this Encoder instance.
1116 This must be performed before start() can be called.
1119 def get_param(name):
1120 """Return an encoding parameter, by name.
1122 'storage_index': return a string with the (16-byte truncated SHA-256
1123 hash) storage index to which these shares should be
1126 'share_counts': return a tuple describing how many shares are used:
1127 (needed_shares, shares_of_happiness, total_shares)
1129 'num_segments': return an int with the number of segments that
1132 'segment_size': return an int with the size of each segment.
1134 'block_size': return the size of the individual blocks that will
1135 be delivered to a shareholder's put_block() method. By
1136 knowing this, the shareholder will be able to keep all
1137 blocks in a single file and still provide random access
1138 when reading them. # TODO: can we avoid exposing this?
1140 'share_size': an int with the size of the data that will be stored
1141 on each shareholder. This is aggregate amount of data
1142 that will be sent to the shareholder, summed over all
1143 the put_block() calls I will ever make. It is useful to
1144 determine this size before asking potential
1145 shareholders whether they will grant a lease or not,
1146 since their answers will depend upon how much space we
1147 need. TODO: this might also include some amount of
1148 overhead, like the size of all the hashes. We need to
1149 decide whether this is useful or not.
1151 'serialized_params': a string with a concise description of the
1152 codec name and its parameters. This may be passed
1153 into the IUploadable to let it make sure that
1154 the same file encoded with different parameters
1155 will result in different storage indexes.
1157 Once this is called, set_size() and set_params() may not be called.
1160 def set_shareholders(shareholders):
1161 """Tell the encoder where to put the encoded shares. 'shareholders'
1162 must be a dictionary that maps share number (an integer ranging from
1163 0 to n-1) to an instance that provides IStorageBucketWriter. This
1164 must be performed before start() can be called."""
1167 """Begin the encode/upload process. This involves reading encrypted
1168 data from the IEncryptedUploadable, encoding it, uploading the shares
1169 to the shareholders, then sending the hash trees.
1171 set_encrypted_uploadable() and set_shareholders() must be called
1172 before this can be invoked.
1174 This returns a Deferred that fires with a tuple of
1175 (uri_extension_hash, needed_shares, total_shares, size) when the
1176 upload process is complete. This information, plus the encryption
1177 key, is sufficient to construct the URI.
1180 class IDecoder(Interface):
1181 """I take a list of shareholders and some setup information, then
1182 download, validate, decode, and decrypt data from them, writing the
1183 results to an output file.
1185 I do not locate the shareholders, that is left to the IDownloader. I must
1186 be given a dict of RemoteReferences to storage buckets that are ready to
1191 """I take a file-like object (providing write and close) to which all
1192 the plaintext data will be written.
1194 TODO: producer/consumer . Maybe write() should return a Deferred that
1195 indicates when it will accept more data? But probably having the
1196 IDecoder be a producer is easier to glue to IConsumer pieces.
1199 def set_shareholders(shareholders):
1200 """I take a dictionary that maps share identifiers (small integers)
1201 to RemoteReferences that provide RIBucketReader. This must be called
1205 """I start the download. This process involves retrieving data and
1206 hash chains from the shareholders, using the hashes to validate the
1207 data, decoding the shares into segments, decrypting the segments,
1208 then writing the resulting plaintext to the output file.
1210 I return a Deferred that will fire (with self) when the download is
1214 class IDownloadTarget(Interface):
1215 # Note that if the IDownloadTarget is also an IConsumer, the downloader
1216 # will register itself as a producer. This allows the target to invoke
1217 # downloader.pauseProducing, resumeProducing, and stopProducing.
1219 """Called before any calls to write() or close(). If an error
1220 occurs before any data is available, fail() may be called without
1221 a previous call to open().
1223 'size' is the length of the file being downloaded, in bytes."""
1226 """Output some data to the target."""
1228 """Inform the target that there is no more data to be written."""
1230 """fail() is called to indicate that the download has failed. 'why'
1231 is a Failure object indicating what went wrong. No further methods
1232 will be invoked on the IDownloadTarget after fail()."""
1233 def register_canceller(cb):
1234 """The FileDownloader uses this to register a no-argument function
1235 that the target can call to cancel the download. Once this canceller
1236 is invoked, no further calls to write() or close() will be made."""
1238 """When the FileDownloader is done, this finish() function will be
1239 called. Whatever it returns will be returned to the invoker of
1240 Downloader.download.
1243 class IDownloader(Interface):
1244 def download(uri, target):
1245 """Perform a CHK download, sending the data to the given target.
1246 'target' must provide IDownloadTarget.
1248 Returns a Deferred that fires (with the results of target.finish)
1249 when the download is finished, or errbacks if something went wrong."""
1251 class IEncryptedUploadable(Interface):
1252 def set_upload_status(upload_status):
1253 """Provide an IUploadStatus object that should be filled with status
1254 information. The IEncryptedUploadable is responsible for setting
1255 key-determination progress ('chk'), size, storage_index, and
1256 ciphertext-fetch progress. It may delegate some of this
1257 responsibility to others, in particular to the IUploadable."""
1260 """This behaves just like IUploadable.get_size()."""
1262 def get_all_encoding_parameters():
1263 """Return a Deferred that fires with a tuple of
1264 (k,happy,n,segment_size). The segment_size will be used as-is, and
1265 must match the following constraints: it must be a multiple of k, and
1266 it shouldn't be unreasonably larger than the file size (if
1267 segment_size is larger than filesize, the difference must be stored
1270 This usually passes through to the IUploadable method of the same
1273 The encoder strictly obeys the values returned by this method. To
1274 make an upload use non-default encoding parameters, you must arrange
1275 to control the values that this method returns.
1278 def get_storage_index():
1279 """Return a Deferred that fires with a 16-byte storage index.
1282 def read_encrypted(length, hash_only):
1283 """This behaves just like IUploadable.read(), but returns crypttext
1284 instead of plaintext. If hash_only is True, then this discards the
1285 data (and returns an empty list); this improves efficiency when
1286 resuming an interrupted upload (where we need to compute the
1287 plaintext hashes, but don't need the redundant encrypted data)."""
1289 def get_plaintext_hashtree_leaves(first, last, num_segments):
1290 """Get the leaf nodes of a merkle hash tree over the plaintext
1291 segments, i.e. get the tagged hashes of the given segments. The
1292 segment size is expected to be generated by the IEncryptedUploadable
1293 before any plaintext is read or ciphertext produced, so that the
1294 segment hashes can be generated with only a single pass.
1296 This returns a Deferred which fires with a sequence of hashes, using:
1298 tuple(segment_hashes[first:last])
1300 'num_segments' is used to assert that the number of segments that the
1301 IEncryptedUploadable handled matches the number of segments that the
1302 encoder was expecting.
1304 This method must not be called until the final byte has been read
1305 from read_encrypted(). Once this method is called, read_encrypted()
1306 can never be called again.
1309 def get_plaintext_hash():
1310 """Get the hash of the whole plaintext.
1312 This returns a Deferred which fires with a tagged SHA-256 hash of the
1313 whole plaintext, obtained from hashutil.plaintext_hash(data).
1317 """Just like IUploadable.close()."""
1319 class IUploadable(Interface):
1320 def set_upload_status(upload_status):
1321 """Provide an IUploadStatus object that should be filled with status
1322 information. The IUploadable is responsible for setting
1323 key-determination progress ('chk')."""
1325 def set_default_encoding_parameters(params):
1326 """Set the default encoding parameters, which must be a dict mapping
1327 strings to ints. The meaningful keys are 'k', 'happy', 'n', and
1328 'max_segment_size'. These might have an influence on the final
1329 encoding parameters returned by get_all_encoding_parameters(), if the
1330 Uploadable doesn't have more specific preferences.
1332 This call is optional: if it is not used, the Uploadable will use
1333 some built-in defaults. If used, this method must be called before
1334 any other IUploadable methods to have any effect.
1338 """Return a Deferred that will fire with the length of the data to be
1339 uploaded, in bytes. This will be called before the data is actually
1340 used, to compute encoding parameters.
1343 def get_all_encoding_parameters():
1344 """Return a Deferred that fires with a tuple of
1345 (k,happy,n,segment_size). The segment_size will be used as-is, and
1346 must match the following constraints: it must be a multiple of k, and
1347 it shouldn't be unreasonably larger than the file size (if
1348 segment_size is larger than filesize, the difference must be stored
1351 The relative values of k and n allow some IUploadables to request
1352 better redundancy than others (in exchange for consuming more space
1355 Larger values of segment_size reduce hash overhead, while smaller
1356 values reduce memory footprint and cause data to be delivered in
1357 smaller pieces (which may provide a smoother and more predictable
1358 download experience).
1360 The encoder strictly obeys the values returned by this method. To
1361 make an upload use non-default encoding parameters, you must arrange
1362 to control the values that this method returns. One way to influence
1363 them may be to call set_encoding_parameters() before calling
1364 get_all_encoding_parameters().
1367 def get_encryption_key():
1368 """Return a Deferred that fires with a 16-byte AES key. This key will
1369 be used to encrypt the data. The key will also be hashed to derive
1372 Uploadables which want to achieve convergence should hash their file
1373 contents and the serialized_encoding_parameters to form the key
1374 (which of course requires a full pass over the data). Uploadables can
1375 use the upload.ConvergentUploadMixin class to achieve this
1378 Uploadables which do not care about convergence (or do not wish to
1379 make multiple passes over the data) can simply return a
1380 strongly-random 16 byte string.
1382 get_encryption_key() may be called multiple times: the IUploadable is
1383 required to return the same value each time.
1387 """Return a Deferred that fires with a list of strings (perhaps with
1388 only a single element) which, when concatenated together, contain the
1389 next 'length' bytes of data. If EOF is near, this may provide fewer
1390 than 'length' bytes. The total number of bytes provided by read()
1391 before it signals EOF must equal the size provided by get_size().
1393 If the data must be acquired through multiple internal read
1394 operations, returning a list instead of a single string may help to
1395 reduce string copies.
1397 'length' will typically be equal to (min(get_size(),1MB)/req_shares),
1398 so a 10kB file means length=3kB, 100kB file means length=30kB,
1399 and >=1MB file means length=300kB.
1401 This method provides for a single full pass through the data. Later
1402 use cases may desire multiple passes or access to only parts of the
1403 data (such as a mutable file making small edits-in-place). This API
1404 will be expanded once those use cases are better understood.
1408 """The upload is finished, and whatever filehandle was in use may be
1411 class IUploadResults(Interface):
1412 """I am returned by upload() methods. I contain a number of public
1413 attributes which can be read to determine the results of the upload. Some
1414 of these are functional, some are timing information. All of these may be
1417 .file_size : the size of the file, in bytes
1418 .uri : the CHK read-cap for the file
1419 .ciphertext_fetched : how many bytes were fetched by the helper
1420 .sharemap : dict mapping share number to placement string
1421 .servermap : dict mapping server peerid to a set of share numbers
1422 .timings : dict of timing information, mapping name to seconds (float)
1423 total : total upload time, start to finish
1424 storage_index : time to compute the storage index
1425 peer_selection : time to decide which peers will be used
1426 contacting_helper : initial helper query to upload/no-upload decision
1427 existence_check : helper pre-upload existence check
1428 helper_total : initial helper query to helper finished pushing
1429 cumulative_fetch : helper waiting for ciphertext requests
1430 total_fetch : helper start to last ciphertext response
1431 cumulative_encoding : just time spent in zfec
1432 cumulative_sending : just time spent waiting for storage servers
1433 hashes_and_close : last segment push to shareholder close
1434 total_encode_and_push : first encode to shareholder close
1438 class IDownloadResults(Interface):
1439 """I am created internally by download() methods. I contain a number of
1440 public attributes which contain details about the download process.::
1442 .file_size : the size of the file, in bytes
1443 .servers_used : set of server peerids that were used during download
1444 .server_problems : dict mapping server peerid to a problem string. Only
1445 servers that had problems (bad hashes, disconnects) are
1447 .servermap : dict mapping server peerid to a set of share numbers. Only
1448 servers that had any shares are listed here.
1449 .timings : dict of timing information, mapping name to seconds (float)
1450 peer_selection : time to ask servers about shares
1451 servers_peer_selection : dict of peerid to DYHB-query time
1452 uri_extension : time to fetch a copy of the URI extension block
1453 hashtrees : time to fetch the hash trees
1454 segments : time to fetch, decode, and deliver segments
1455 cumulative_fetch : time spent waiting for storage servers
1456 cumulative_decode : just time spent in zfec
1457 cumulative_decrypt : just time spent in decryption
1458 total : total download time, start to finish
1459 fetch_per_server : dict of peerid to list of per-segment fetch times
1463 class IUploader(Interface):
1464 def upload(uploadable):
1465 """Upload the file. 'uploadable' must impement IUploadable. This
1466 returns a Deferred which fires with an UploadResults instance, from
1467 which the URI of the file can be obtained as results.uri ."""
1469 def upload_ssk(write_capability, new_version, uploadable):
1470 """TODO: how should this work?"""
1472 class ICheckable(Interface):
1473 def check(monitor, verify=False):
1474 """Check upon my health, optionally repairing any problems.
1476 This returns a Deferred that fires with an instance that provides
1477 ICheckerResults, or None if the object is non-distributed (i.e. LIT
1480 The monitor will be checked periodically to see if the operation has
1481 been cancelled. If so, no new queries will be sent, and the Deferred
1482 will fire (with a OperationCancelledError) immediately.
1484 Filenodes and dirnodes (which provide IFilesystemNode) are also
1485 checkable. Instances that represent verifier-caps will be checkable
1486 but not downloadable. Some objects (like LIT files) do not actually
1487 live in the grid, and their checkers return None (non-distributed
1488 files are always healthy).
1490 If verify=False, a relatively lightweight check will be performed: I
1491 will ask all servers if they have a share for me, and I will believe
1492 whatever they say. If there are at least N distinct shares on the
1493 grid, my results will indicate r.is_healthy()==True. This requires a
1494 roundtrip to each server, but does not transfer very much data, so
1495 the network bandwidth is fairly low.
1497 If verify=True, a more resource-intensive check will be performed:
1498 every share will be downloaded, and the hashes will be validated on
1499 every bit. I will ignore any shares that failed their hash checks. If
1500 there are at least N distinct valid shares on the grid, my results
1501 will indicate r.is_healthy()==True. This requires N/k times as much
1502 download bandwidth (and server disk IO) as a regular download. If a
1503 storage server is holding a corrupt share, or is experiencing memory
1504 failures during retrieval, or is malicious or buggy, then
1505 verification will detect the problem, but checking will not.
1507 TODO: any problems seen during checking will be reported to the
1508 health-manager.furl, a centralized object which is responsible for
1509 figuring out why files are unhealthy so corrective action can be
1513 def check_and_repair(monitor, verify=False):
1514 """Like check(), but if the file/directory is not healthy, attempt to
1517 Any non-healthy result will cause an immediate repair operation, to
1518 generate and upload new shares. After repair, the file will be as
1519 healthy as we can make it. Details about what sort of repair is done
1520 will be put in the check-and-repair results. The Deferred will not
1521 fire until the repair is complete.
1523 This returns a Deferred which fires with an instance of
1524 ICheckAndRepairResults."""
1526 class IDeepCheckable(Interface):
1527 def start_deep_check(verify=False):
1528 """Check upon the health of me and everything I can reach.
1530 This is a recursive form of check(), useable only on dirnodes.
1532 I return a Monitor, with results that are an IDeepCheckResults
1536 def start_deep_check_and_repair(verify=False):
1537 """Check upon the health of me and everything I can reach. Repair
1538 anything that isn't healthy.
1540 This is a recursive form of check_and_repair(), useable only on
1543 I return a Monitor, with results that are an
1544 IDeepCheckAndRepairResults object.
1547 class ICheckerResults(Interface):
1548 """I contain the detailed results of a check/verify operation.
1551 def get_storage_index():
1552 """Return a string with the (binary) storage index."""
1553 def get_storage_index_string():
1554 """Return a string with the (printable) abbreviated storage index."""
1557 """Return a boolean, True if the file/dir is fully healthy, False if
1558 it is damaged in any way. Non-distributed LIT files always return
1561 def needs_rebalancing():
1562 """Return a boolean, True if the file/dir's reliability could be
1563 improved by moving shares to new servers. Non-distributed LIT files
1564 always returne False."""
1568 """Return a dictionary that describes the state of the file/dir.
1569 Non-distributed LIT files always return an empty dictionary. Normal
1570 files and directories return a dictionary with the following keys
1571 (note that these use base32-encoded strings rather than binary ones)
1572 (also note that for mutable files, these counts are for the 'best'
1575 count-shares-good: the number of distinct good shares that were found
1576 count-shares-needed: 'k', the number of shares required for recovery
1577 count-shares-expected: 'N', the number of total shares generated
1578 count-good-share-hosts: the number of distinct storage servers with
1579 good shares. If this number is less than
1580 count-shares-good, then some shares are
1581 doubled up, increasing the correlation of
1582 failures. This indicates that one or more
1583 shares should be moved to an otherwise unused
1584 server, if one is available.
1585 count-corrupt-shares: the number of shares with integrity failures
1586 list-corrupt-shares: a list of 'share locators', one for each share
1587 that was found to be corrupt. Each share
1588 locator is a list of (serverid, storage_index,
1590 servers-responding: list of (binary) storage server identifiers,
1591 one for each server which responded to the share
1593 sharemap: dict mapping share identifier to list of serverids
1594 (binary strings). This indicates which servers are holding
1595 which shares. For immutable files, the shareid is an
1596 integer (the share number, from 0 to N-1). For mutable
1597 files, it is a string of the form 'seq%d-%s-sh%d',
1598 containing the sequence number, the roothash, and the
1601 The following keys are most relevant for mutable files, but immutable
1602 files will provide sensible values too::
1604 count-wrong-shares: the number of shares for versions other than the
1605 'best' one (which is defined as being the
1606 recoverable version with the highest sequence
1607 number, then the highest roothash). These are
1608 either leftover shares from an older version
1609 (perhaps on a server that was offline when an
1610 update occurred), shares from an unrecoverable
1611 newer version, or shares from an alternate
1612 current version that results from an
1613 uncoordinated write collision. For a healthy
1614 file, this will equal 0.
1616 count-recoverable-versions: the number of recoverable versions of
1617 the file. For a healthy file, this will
1620 count-unrecoverable-versions: the number of unrecoverable versions
1621 of the file. For a healthy file, this
1627 """Return a string with a brief (one-line) summary of the results."""
1630 """Return a list of strings with more detailed results."""
1632 class ICheckAndRepairResults(Interface):
1633 """I contain the detailed results of a check/verify/repair operation.
1635 The IFilesystemNode.check()/verify()/repair() methods all return
1636 instances that provide ICheckAndRepairResults.
1639 def get_storage_index():
1640 """Return a string with the (binary) storage index."""
1641 def get_storage_index_string():
1642 """Return a string with the (printable) abbreviated storage index."""
1643 def get_repair_attempted():
1644 """Return a boolean, True if a repair was attempted."""
1645 def get_repair_successful():
1646 """Return a boolean, True if repair was attempted and the file/dir
1647 was fully healthy afterwards. False if no repair was attempted or if
1648 a repair attempt failed."""
1649 def get_pre_repair_results():
1650 """Return an ICheckerResults instance that describes the state of the
1651 file/dir before any repair was attempted."""
1652 def get_post_repair_results():
1653 """Return an ICheckerResults instance that describes the state of the
1654 file/dir after any repair was attempted. If no repair was attempted,
1655 the pre-repair and post-repair results will be identical."""
1658 class IDeepCheckResults(Interface):
1659 """I contain the results of a deep-check operation.
1661 This is returned by a call to ICheckable.deep_check().
1664 def get_root_storage_index_string():
1665 """Return the storage index (abbreviated human-readable string) of
1666 the first object checked."""
1668 """Return a dictionary with the following keys::
1670 count-objects-checked: count of how many objects were checked
1671 count-objects-healthy: how many of those objects were completely
1673 count-objects-unhealthy: how many were damaged in some way
1674 count-corrupt-shares: how many shares were found to have
1675 corruption, summed over all objects
1679 def get_corrupt_shares():
1680 """Return a set of (serverid, storage_index, sharenum) for all shares
1681 that were found to be corrupt. Both serverid and storage_index are
1684 def get_all_results():
1685 """Return a dictionary mapping pathname (a tuple of strings, ready to
1686 be slash-joined) to an ICheckerResults instance, one for each object
1687 that was checked."""
1689 def get_results_for_storage_index(storage_index):
1690 """Retrive the ICheckerResults instance for the given (binary)
1691 storage index. Raises KeyError if there are no results for that
1695 """Return a dictionary with the same keys as
1696 IDirectoryNode.deep_stats()."""
1698 class IDeepCheckAndRepairResults(Interface):
1699 """I contain the results of a deep-check-and-repair operation.
1701 This is returned by a call to ICheckable.deep_check_and_repair().
1704 def get_root_storage_index_string():
1705 """Return the storage index (abbreviated human-readable string) of
1706 the first object checked."""
1708 """Return a dictionary with the following keys::
1710 count-objects-checked: count of how many objects were checked
1711 count-objects-healthy-pre-repair: how many of those objects were
1712 completely healthy (before any
1714 count-objects-unhealthy-pre-repair: how many were damaged in
1716 count-objects-healthy-post-repair: how many of those objects were
1717 completely healthy (after any
1719 count-objects-unhealthy-post-repair: how many were damaged in
1721 count-repairs-attempted: repairs were attempted on this many
1722 objects. The count-repairs- keys will
1723 always be provided, however unless
1724 repair=true is present, they will all
1726 count-repairs-successful: how many repairs resulted in healthy
1728 count-repairs-unsuccessful: how many repairs resulted did not
1729 results in completely healthy objects
1730 count-corrupt-shares-pre-repair: how many shares were found to
1731 have corruption, summed over all
1732 objects examined (before any
1734 count-corrupt-shares-post-repair: how many shares were found to
1735 have corruption, summed over all
1736 objects examined (after any
1741 """Return a dictionary with the same keys as
1742 IDirectoryNode.deep_stats()."""
1744 def get_corrupt_shares():
1745 """Return a set of (serverid, storage_index, sharenum) for all shares
1746 that were found to be corrupt before any repair was attempted. Both
1747 serverid and storage_index are binary.
1749 def get_remaining_corrupt_shares():
1750 """Return a set of (serverid, storage_index, sharenum) for all shares
1751 that were found to be corrupt after any repair was completed. Both
1752 serverid and storage_index are binary. These are shares that need
1753 manual inspection and probably deletion.
1755 def get_all_results():
1756 """Return a dictionary mapping pathname (a tuple of strings, ready to
1757 be slash-joined) to an ICheckAndRepairResults instance, one for each
1758 object that was checked."""
1761 class IRepairable(Interface):
1762 def repair(checker_results):
1763 """Attempt to repair the given object. Returns a Deferred that fires
1764 with a IRepairResults object.
1766 I must be called with an object that implements ICheckerResults, as
1767 proof that you have actually discovered a problem with this file. I
1768 will use the data in the checker results to guide the repair process,
1769 such as which servers provided bad data and should therefore be
1770 avoided. The ICheckerResults object is inside the
1771 ICheckAndRepairResults object, which is returned by the
1772 ICheckable.check() method::
1774 d = filenode.check(repair=False)
1775 def _got_results(check_and_repair_results):
1776 check_results = check_and_repair_results.get_pre_repair_results()
1777 return filenode.repair(check_results)
1778 d.addCallback(_got_results)
1782 class IRepairResults(Interface):
1783 """I contain the results of a repair operation."""
1786 class IClient(Interface):
1787 def upload(uploadable):
1788 """Upload some data into a CHK, get back the UploadResults for it.
1789 @param uploadable: something that implements IUploadable
1790 @return: a Deferred that fires with the UploadResults instance.
1791 To get the URI for this file, use results.uri .
1794 def create_mutable_file(contents=""):
1795 """Create a new mutable file with contents, get back the URI string.
1796 @param contents: the initial contents to place in the file.
1797 @return: a Deferred that fires with tne (string) SSK URI for the new
1801 def create_empty_dirnode():
1802 """Create a new dirnode, empty and unattached.
1803 @return: a Deferred that fires with the new IDirectoryNode instance.
1806 def create_node_from_uri(uri):
1807 """Create a new IFilesystemNode instance from the uri, synchronously.
1808 @param uri: a string or IURI-providing instance. This could be for a
1809 LiteralFileNode, a CHK file node, a mutable file node, or
1811 @return: an instance that provides IFilesystemNode (or more usefully one
1812 of its subclasses). File-specifying URIs will result in
1813 IFileNode or IMutableFileNode -providing instances, like
1814 FileNode, LiteralFileNode, or MutableFileNode.
1815 Directory-specifying URIs will result in
1816 IDirectoryNode-providing instances, like NewDirectoryNode.
1819 class IClientStatus(Interface):
1820 def list_all_uploads():
1821 """Return a list of uploader objects, one for each upload which
1822 currently has an object available (tracked with weakrefs). This is
1823 intended for debugging purposes."""
1824 def list_active_uploads():
1825 """Return a list of active IUploadStatus objects."""
1826 def list_recent_uploads():
1827 """Return a list of IUploadStatus objects for the most recently
1830 def list_all_downloads():
1831 """Return a list of downloader objects, one for each download which
1832 currently has an object available (tracked with weakrefs). This is
1833 intended for debugging purposes."""
1834 def list_active_downloads():
1835 """Return a list of active IDownloadStatus objects."""
1836 def list_recent_downloads():
1837 """Return a list of IDownloadStatus objects for the most recently
1838 started downloads."""
1840 class IUploadStatus(Interface):
1842 """Return a timestamp (float with seconds since epoch) indicating
1843 when the operation was started."""
1844 def get_storage_index():
1845 """Return a string with the (binary) storage index in use on this
1846 upload. Returns None if the storage index has not yet been
1849 """Return an integer with the number of bytes that will eventually
1850 be uploaded for this file. Returns None if the size is not yet known.
1853 """Return True if this upload is using a Helper, False if not."""
1855 """Return a string describing the current state of the upload
1858 """Returns a tuple of floats, (chk, ciphertext, encode_and_push),
1859 each from 0.0 to 1.0 . 'chk' describes how much progress has been
1860 made towards hashing the file to determine a CHK encryption key: if
1861 non-convergent encryption is in use, this will be trivial, otherwise
1862 the whole file must be hashed. 'ciphertext' describes how much of the
1863 ciphertext has been pushed to the helper, and is '1.0' for non-helper
1864 uploads. 'encode_and_push' describes how much of the encode-and-push
1865 process has finished: for helper uploads this is dependent upon the
1866 helper providing progress reports. It might be reasonable to add all
1867 three numbers and report the sum to the user."""
1869 """Return True if the upload is currently active, False if not."""
1871 """Return an instance of UploadResults (which contains timing and
1872 sharemap information). Might return None if the upload is not yet
1875 """Each upload status gets a unique number: this method returns that
1876 number. This provides a handle to this particular upload, so a web
1877 page can generate a suitable hyperlink."""
1879 class IDownloadStatus(Interface):
1881 """Return a timestamp (float with seconds since epoch) indicating
1882 when the operation was started."""
1883 def get_storage_index():
1884 """Return a string with the (binary) storage index in use on this
1885 download. This may be None if there is no storage index (i.e. LIT
1888 """Return an integer with the number of bytes that will eventually be
1889 retrieved for this file. Returns None if the size is not yet known.
1892 """Return True if this download is using a Helper, False if not."""
1894 """Return a string describing the current state of the download
1897 """Returns a float (from 0.0 to 1.0) describing the amount of the
1898 download that has completed. This value will remain at 0.0 until the
1899 first byte of plaintext is pushed to the download target."""
1901 """Return True if the download is currently active, False if not."""
1903 """Each download status gets a unique number: this method returns
1904 that number. This provides a handle to this particular download, so a
1905 web page can generate a suitable hyperlink."""
1907 class IServermapUpdaterStatus(Interface):
1909 class IPublishStatus(Interface):
1911 class IRetrieveStatus(Interface):
1914 class NotCapableError(Exception):
1915 """You have tried to write to a read-only node."""
1917 class BadWriteEnablerError(Exception):
1920 class RIControlClient(RemoteInterface):
1922 def wait_for_client_connections(num_clients=int):
1923 """Do not return until we have connections to at least NUM_CLIENTS
1927 def upload_from_file_to_uri(filename=str, convergence=ChoiceOf(None, StringConstraint(2**20))):
1928 """Upload a file to the grid. This accepts a filename (which must be
1929 absolute) that points to a file on the node's local disk. The node will
1930 read the contents of this file, upload it to the grid, then return the
1931 URI at which it was uploaded. If convergence is None then a random
1932 encryption key will be used, else the plaintext will be hashed, then
1933 that hash will be mixed together with the "convergence" string to form
1938 def download_from_uri_to_file(uri=URI, filename=str):
1939 """Download a file from the grid, placing it on the node's local disk
1940 at the given filename (which must be absolute[?]). Returns the
1941 absolute filename where the file was written."""
1946 def get_memory_usage():
1947 """Return a dict describes the amount of memory currently in use. The
1948 keys are 'VmPeak', 'VmSize', and 'VmData'. The values are integers,
1949 measuring memory consupmtion in bytes."""
1950 return DictOf(str, int)
1952 def speed_test(count=int, size=int, mutable=Any()):
1953 """Write 'count' tempfiles to disk, all of the given size. Measure
1954 how long (in seconds) it takes to upload them all to the servers.
1955 Then measure how long it takes to download all of them. If 'mutable'
1956 is 'create', time creation of mutable files. If 'mutable' is
1957 'upload', then time access to the same mutable file instead of
1960 Returns a tuple of (upload_time, download_time).
1962 return (float, float)
1964 def measure_peer_response_time():
1965 """Send a short message to each connected peer, and measure the time
1966 it takes for them to respond to it. This is a rough measure of the
1967 application-level round trip time.
1969 @return: a dictionary mapping peerid to a float (RTT time in seconds)
1972 return DictOf(Nodeid, float)
1974 UploadResults = Any() #DictOf(str, str)
1976 class RIEncryptedUploadable(RemoteInterface):
1977 __remote_name__ = "RIEncryptedUploadable.tahoe.allmydata.com"
1982 def get_all_encoding_parameters():
1983 return (int, int, int, long)
1985 def read_encrypted(offset=Offset, length=ReadSize):
1988 def get_plaintext_hashtree_leaves(first=int, last=int, num_segments=int):
1991 def get_plaintext_hash():
1998 class RICHKUploadHelper(RemoteInterface):
1999 __remote_name__ = "RIUploadHelper.tahoe.allmydata.com"
2001 def upload(reader=RIEncryptedUploadable):
2002 return UploadResults
2005 class RIHelper(RemoteInterface):
2006 __remote_name__ = "RIHelper.tahoe.allmydata.com"
2008 def upload_chk(si=StorageIndex):
2009 """See if a file with a given storage index needs uploading. The
2010 helper will ask the appropriate storage servers to see if the file
2011 has already been uploaded. If so, the helper will return a set of
2012 'upload results' that includes whatever hashes are needed to build
2013 the read-cap, and perhaps a truncated sharemap.
2015 If the file has not yet been uploaded (or if it was only partially
2016 uploaded), the helper will return an empty upload-results dictionary
2017 and also an RICHKUploadHelper object that will take care of the
2018 upload process. The client should call upload() on this object and
2019 pass it a reference to an RIEncryptedUploadable object that will
2020 provide ciphertext. When the upload is finished, the upload() method
2021 will finish and return the upload results.
2023 return (UploadResults, ChoiceOf(RICHKUploadHelper, None))
2026 class RIStatsProvider(RemoteInterface):
2027 __remote_name__ = "RIStatsProvider.tahoe.allmydata.com"
2029 Provides access to statistics and monitoring information.
2034 returns a dictionary containing 'counters' and 'stats', each a dictionary
2035 with string counter/stat name keys, and numeric values. counters are
2036 monotonically increasing measures of work done, and stats are instantaneous
2037 measures (potentially time averaged internally)
2039 return DictOf(str, DictOf(str, ChoiceOf(float, int, long)))
2041 class RIStatsGatherer(RemoteInterface):
2042 __remote_name__ = "RIStatsGatherer.tahoe.allmydata.com"
2044 Provides a monitoring service for centralised collection of stats
2047 def provide(provider=RIStatsProvider, nickname=str):
2049 @param provider: a stats collector instance which should be polled
2050 periodically by the gatherer to collect stats.
2051 @param nickname: a name useful to identify the provided client
2056 class IStatsProducer(Interface):
2059 returns a dictionary, with str keys representing the names of stats
2060 to be monitored, and numeric values.
2063 class RIKeyGenerator(RemoteInterface):
2064 __remote_name__ = "RIKeyGenerator.tahoe.allmydata.com"
2066 Provides a service offering to make RSA key pairs.
2069 def get_rsa_key_pair(key_size=int):
2071 @param key_size: the size of the signature key.
2072 @return: tuple(verifying_key, signing_key)
2074 return TupleOf(str, str)
2077 class FileTooLargeError(Exception):