2 from zope.interface import Interface
3 from foolscap.api import StringConstraint, ListOf, TupleOf, SetOf, DictOf, \
4 ChoiceOf, IntegerConstraint, Any, RemoteInterface, Referenceable
8 Hash = StringConstraint(maxLength=HASH_SIZE,
9 minLength=HASH_SIZE)# binary format 32-byte SHA256 hash
10 Nodeid = StringConstraint(maxLength=20,
11 minLength=20) # binary format 20-byte SHA1 hash
12 FURL = StringConstraint(1000)
13 StorageIndex = StringConstraint(16)
14 URI = StringConstraint(300) # kind of arbitrary
16 MAX_BUCKETS = 256 # per peer -- zfec offers at most 256 shares per file
18 ShareData = StringConstraint(None)
19 URIExtensionData = StringConstraint(1000)
20 Number = IntegerConstraint(8) # 2**(8*8) == 16EiB ~= 18e18 ~= 18 exabytes
22 ReadSize = int # the 'int' constraint is 2**31 == 2Gib -- large files are processed in not-so-large increments
23 WriteEnablerSecret = Hash # used to protect mutable bucket modifications
24 LeaseRenewSecret = Hash # used to protect bucket lease renewal requests
25 LeaseCancelSecret = Hash # used to protect bucket lease cancellation requests
27 class RIStubClient(RemoteInterface):
28 """Each client publishes a service announcement for a dummy object called
29 the StubClient. This object doesn't actually offer any services, but the
30 announcement helps the Introducer keep track of which clients are
31 subscribed (so the grid admin can keep track of things like the size of
32 the grid and the client versions in use. This is the (empty)
33 RemoteInterface for the StubClient."""
35 class RIBucketWriter(RemoteInterface):
36 """ Objects of this kind live on the server side. """
37 def write(offset=Offset, data=ShareData):
42 If the data that has been written is incomplete or inconsistent then
43 the server will throw the data away, else it will store it for future
49 """Abandon all the data that has been written.
53 class RIBucketReader(RemoteInterface):
54 def read(offset=Offset, length=ReadSize):
57 def advise_corrupt_share(reason=str):
58 """Clients who discover hash failures in shares that they have
59 downloaded from me will use this method to inform me about the
60 failures. I will record their concern so that my operator can
61 manually inspect the shares in question. I return None.
63 This is a wrapper around RIStorageServer.advise_corrupt_share(),
64 which is tied to a specific share, and therefore does not need the
65 extra share-identifying arguments. Please see that method for full
69 TestVector = ListOf(TupleOf(Offset, ReadSize, str, str))
70 # elements are (offset, length, operator, specimen)
71 # operator is one of "lt, le, eq, ne, ge, gt"
72 # nop always passes and is used to fetch data while writing.
73 # you should use length==len(specimen) for everything except nop
74 DataVector = ListOf(TupleOf(Offset, ShareData))
75 # (offset, data). This limits us to 30 writes of 1MiB each per call
76 TestAndWriteVectorsForShares = DictOf(int,
79 ChoiceOf(None, Offset), # new_length
81 ReadVector = ListOf(TupleOf(Offset, ReadSize))
82 ReadData = ListOf(ShareData)
83 # returns data[offset:offset+length] for each element of TestVector
85 class RIStorageServer(RemoteInterface):
86 __remote_name__ = "RIStorageServer.tahoe.allmydata.com"
90 Return a dictionary of version information.
92 return DictOf(str, Any())
94 def allocate_buckets(storage_index=StorageIndex,
95 renew_secret=LeaseRenewSecret,
96 cancel_secret=LeaseCancelSecret,
97 sharenums=SetOf(int, maxLength=MAX_BUCKETS),
98 allocated_size=Offset, canary=Referenceable):
100 @param storage_index: the index of the bucket to be created or
102 @param sharenums: these are the share numbers (probably between 0 and
103 99) that the sender is proposing to store on this
105 @param renew_secret: This is the secret used to protect bucket refresh
106 This secret is generated by the client and
107 stored for later comparison by the server. Each
108 server is given a different secret.
109 @param cancel_secret: Like renew_secret, but protects bucket decref.
110 @param canary: If the canary is lost before close(), the bucket is
112 @return: tuple of (alreadygot, allocated), where alreadygot is what we
113 already have and allocated is what we hereby agree to accept.
114 New leases are added for shares in both lists.
116 return TupleOf(SetOf(int, maxLength=MAX_BUCKETS),
117 DictOf(int, RIBucketWriter, maxKeys=MAX_BUCKETS))
119 def add_lease(storage_index=StorageIndex,
120 renew_secret=LeaseRenewSecret,
121 cancel_secret=LeaseCancelSecret):
123 Add a new lease on the given bucket. If the renew_secret matches an
124 existing lease, that lease will be renewed instead. If there is no
125 bucket for the given storage_index, return silently. (note that in
126 tahoe-1.3.0 and earlier, IndexError was raised if there was no
129 return Any() # returns None now, but future versions might change
131 def renew_lease(storage_index=StorageIndex, renew_secret=LeaseRenewSecret):
133 Renew the lease on a given bucket, resetting the timer to 31 days.
134 Some networks will use this, some will not. If there is no bucket for
135 the given storage_index, IndexError will be raised.
137 For mutable shares, if the given renew_secret does not match an
138 existing lease, IndexError will be raised with a note listing the
139 server-nodeids on the existing leases, so leases on migrated shares
140 can be renewed or cancelled. For immutable shares, IndexError
141 (without the note) will be raised.
145 def cancel_lease(storage_index=StorageIndex,
146 cancel_secret=LeaseCancelSecret):
148 Cancel the lease on a given bucket. If this was the last lease on the
149 bucket, the bucket will be deleted. If there is no bucket for the
150 given storage_index, IndexError will be raised.
152 For mutable shares, if the given cancel_secret does not match an
153 existing lease, IndexError will be raised with a note listing the
154 server-nodeids on the existing leases, so leases on migrated shares
155 can be renewed or cancelled. For immutable shares, IndexError
156 (without the note) will be raised.
160 def get_buckets(storage_index=StorageIndex):
161 return DictOf(int, RIBucketReader, maxKeys=MAX_BUCKETS)
165 def slot_readv(storage_index=StorageIndex,
166 shares=ListOf(int), readv=ReadVector):
167 """Read a vector from the numbered shares associated with the given
168 storage index. An empty shares list means to return data from all
169 known shares. Returns a dictionary with one key per share."""
170 return DictOf(int, ReadData) # shnum -> results
172 def slot_testv_and_readv_and_writev(storage_index=StorageIndex,
173 secrets=TupleOf(WriteEnablerSecret,
176 tw_vectors=TestAndWriteVectorsForShares,
179 """General-purpose test-and-set operation for mutable slots. Perform
180 a bunch of comparisons against the existing shares. If they all pass,
181 then apply a bunch of write vectors to those shares. Then use the
182 read vectors to extract data from all the shares and return the data.
184 This method is, um, large. The goal is to allow clients to update all
185 the shares associated with a mutable file in a single round trip.
187 @param storage_index: the index of the bucket to be created or
189 @param write_enabler: a secret that is stored along with the slot.
190 Writes are accepted from any caller who can
191 present the matching secret. A different secret
192 should be used for each slot*server pair.
193 @param renew_secret: This is the secret used to protect bucket refresh
194 This secret is generated by the client and
195 stored for later comparison by the server. Each
196 server is given a different secret.
197 @param cancel_secret: Like renew_secret, but protects bucket decref.
199 The 'secrets' argument is a tuple of (write_enabler, renew_secret,
200 cancel_secret). The first is required to perform any write. The
201 latter two are used when allocating new shares. To simply acquire a
202 new lease on existing shares, use an empty testv and an empty writev.
204 Each share can have a separate test vector (i.e. a list of
205 comparisons to perform). If all vectors for all shares pass, then all
206 writes for all shares are recorded. Each comparison is a 4-tuple of
207 (offset, length, operator, specimen), which effectively does a bool(
208 (read(offset, length)) OPERATOR specimen ) and only performs the
209 write if all these evaluate to True. Basic test-and-set uses 'eq'.
210 Write-if-newer uses a seqnum and (offset, length, 'lt', specimen).
211 Write-if-same-or-newer uses 'le'.
213 Reads from the end of the container are truncated, and missing shares
214 behave like empty ones, so to assert that a share doesn't exist (for
215 use when creating a new share), use (0, 1, 'eq', '').
217 The write vector will be applied to the given share, expanding it if
218 necessary. A write vector applied to a share number that did not
219 exist previously will cause that share to be created.
221 Each write vector is accompanied by a 'new_length' argument. If
222 new_length is not None, use it to set the size of the container. This
223 can be used to pre-allocate space for a series of upcoming writes, or
224 truncate existing data. If the container is growing, new_length will
225 be applied before datav. If the container is shrinking, it will be
226 applied afterwards. If new_length==0, the share will be deleted.
228 The read vector is used to extract data from all known shares,
229 *before* any writes have been applied. The same vector is used for
230 all shares. This captures the state that was tested by the test
233 This method returns two values: a boolean and a dict. The boolean is
234 True if the write vectors were applied, False if not. The dict is
235 keyed by share number, and each value contains a list of strings, one
236 for each element of the read vector.
238 If the write_enabler is wrong, this will raise BadWriteEnablerError.
239 To enable share migration (using update_write_enabler), the exception
240 will have the nodeid used for the old write enabler embedded in it,
241 in the following string::
243 The write enabler was recorded by nodeid '%s'.
245 Note that the nodeid here is encoded using the same base32 encoding
246 used by Foolscap and allmydata.util.idlib.nodeid_b2a().
249 return TupleOf(bool, DictOf(int, ReadData))
251 def advise_corrupt_share(share_type=str, storage_index=StorageIndex,
252 shnum=int, reason=str):
253 """Clients who discover hash failures in shares that they have
254 downloaded from me will use this method to inform me about the
255 failures. I will record their concern so that my operator can
256 manually inspect the shares in question. I return None.
258 'share_type' is either 'mutable' or 'immutable'. 'storage_index' is a
259 (binary) storage index string, and 'shnum' is the integer share
260 number. 'reason' is a human-readable explanation of the problem,
261 probably including some expected hash values and the computed ones
262 which did not match. Corruption advisories for mutable shares should
263 include a hash of the public key (the same value that appears in the
264 mutable-file verify-cap), since the current share format does not
268 class IStorageBucketWriter(Interface):
270 Objects of this kind live on the client side.
272 def put_block(segmentnum=int, data=ShareData):
273 """@param data: For most segments, this data will be 'blocksize'
274 bytes in length. The last segment might be shorter.
275 @return: a Deferred that fires (with None) when the operation completes
278 def put_plaintext_hashes(hashes=ListOf(Hash)):
280 @return: a Deferred that fires (with None) when the operation completes
283 def put_crypttext_hashes(hashes=ListOf(Hash)):
285 @return: a Deferred that fires (with None) when the operation completes
288 def put_block_hashes(blockhashes=ListOf(Hash)):
290 @return: a Deferred that fires (with None) when the operation completes
293 def put_share_hashes(sharehashes=ListOf(TupleOf(int, Hash))):
295 @return: a Deferred that fires (with None) when the operation completes
298 def put_uri_extension(data=URIExtensionData):
299 """This block of data contains integrity-checking information (hashes
300 of plaintext, crypttext, and shares), as well as encoding parameters
301 that are necessary to recover the data. This is a serialized dict
302 mapping strings to other strings. The hash of this data is kept in
303 the URI and verified before any of the data is used. All buckets for
304 a given file contain identical copies of this data.
306 The serialization format is specified with the following pseudocode:
307 for k in sorted(dict.keys()):
308 assert re.match(r'^[a-zA-Z_\-]+$', k)
309 write(k + ':' + netstring(dict[k]))
311 @return: a Deferred that fires (with None) when the operation completes
315 """Finish writing and close the bucket. The share is not finalized
316 until this method is called: if the uploading client disconnects
317 before calling close(), the partially-written share will be
320 @return: a Deferred that fires (with None) when the operation completes
323 class IStorageBucketReader(Interface):
325 def get_block_data(blocknum=int, blocksize=int, size=int):
326 """Most blocks will be the same size. The last block might be shorter
332 def get_crypttext_hashes():
334 @return: ListOf(Hash)
337 def get_block_hashes(at_least_these=SetOf(int)):
339 @return: ListOf(Hash)
342 def get_share_hashes(at_least_these=SetOf(int)):
344 @return: ListOf(TupleOf(int, Hash))
347 def get_uri_extension():
349 @return: URIExtensionData
352 class IStorageBroker(Interface):
353 def get_servers_for_index(peer_selection_index):
355 @return: list of (peerid, versioned-rref) tuples
357 def get_all_servers():
359 @return: frozenset of (peerid, versioned-rref) tuples
361 def get_all_serverids():
363 @return: frozenset of serverid strings
365 def get_nickname_for_serverid(serverid):
367 @return: unicode nickname, or None
370 # methods moved from IntroducerClient, need review
371 def get_all_connections():
372 """Return a frozenset of (nodeid, service_name, rref) tuples, one for
373 each active connection we've established to a remote service. This is
374 mostly useful for unit tests that need to wait until a certain number
375 of connections have been made."""
377 def get_all_connectors():
378 """Return a dict that maps from (nodeid, service_name) to a
379 RemoteServiceConnector instance for all services that we are actively
380 trying to connect to. Each RemoteServiceConnector has the following
383 service_name: the type of service provided, like 'storage'
384 announcement_time: when we first heard about this service
385 last_connect_time: when we last established a connection
386 last_loss_time: when we last lost a connection
388 version: the peer's version, from the most recent connection
389 oldest_supported: the peer's oldest supported version, same
391 rref: the RemoteReference, if connected, otherwise None
392 remote_host: the IAddress, if connected, otherwise None
394 This method is intended for monitoring interfaces, such as a web page
395 which describes connecting and connected peers.
398 def get_all_peerids():
399 """Return a frozenset of all peerids to whom we have a connection (to
400 one or more services) established. Mostly useful for unit tests."""
402 def get_all_connections_for(service_name):
403 """Return a frozenset of (nodeid, service_name, rref) tuples, one
404 for each active connection that provides the given SERVICE_NAME."""
406 def get_permuted_peers(service_name, key):
407 """Returns an ordered list of (peerid, rref) tuples, selecting from
408 the connections that provide SERVICE_NAME, using a hash-based
409 permutation keyed by KEY. This randomizes the service list in a
410 repeatable way, to distribute load over many peers.
414 class IURI(Interface):
415 def init_from_string(uri):
416 """Accept a string (as created by my to_string() method) and populate
417 this instance with its data. I am not normally called directly,
418 please use the module-level uri.from_string() function to convert
419 arbitrary URI strings into IURI-providing instances."""
422 """Return False if this URI be used to modify the data. Return True
423 if this URI cannot be used to modify the data."""
426 """Return True if the data can be modified by *somebody* (perhaps
427 someone who has a more powerful URI than this one)."""
430 """Return another IURI instance, which represents a read-only form of
431 this one. If is_readonly() is True, this returns self."""
433 def get_verify_cap():
434 """Return an instance that provides IVerifierURI, which can be used
435 to check on the availability of the file or directory, without
436 providing enough capabilities to actually read or modify the
437 contents. This may return None if the file does not need checking or
438 verification (e.g. LIT URIs).
442 """Return a string of printable ASCII characters, suitable for
443 passing into init_from_string."""
445 class IVerifierURI(Interface, IURI):
446 def init_from_string(uri):
447 """Accept a string (as created by my to_string() method) and populate
448 this instance with its data. I am not normally called directly,
449 please use the module-level uri.from_string() function to convert
450 arbitrary URI strings into IURI-providing instances."""
453 """Return a string of printable ASCII characters, suitable for
454 passing into init_from_string."""
456 class IDirnodeURI(Interface):
457 """I am a URI which represents a dirnode."""
460 class IFileURI(Interface):
461 """I am a URI which represents a filenode."""
463 """Return the length (in bytes) of the file that I represent."""
465 class IImmutableFileURI(IFileURI):
468 class IMutableFileURI(Interface):
469 """I am a URI which represents a mutable filenode."""
470 class IDirectoryURI(Interface):
472 class IReadonlyDirectoryURI(Interface):
475 class CannotPackUnknownNodeError(Exception):
476 """UnknownNodes (using filecaps from the future that we don't understand)
477 cannot yet be copied safely, so I refuse to copy them."""
479 class UnhandledCapTypeError(Exception):
480 """I recognize the cap/URI, but I cannot create an IFilesystemNode for
483 class IFilesystemNode(Interface):
486 Return the URI string that can be used by others to get access to
487 this node. If this node is read-only, the URI will only offer
488 read-only access. If this node is read-write, the URI will offer
491 If you have read-write access to a node and wish to share merely
492 read-only access with others, use get_readonly_uri().
495 def get_readonly_uri():
496 """Return the URI string that can be used by others to get read-only
497 access to this node. The result is a read-only URI, regardless of
498 whether this node is read-only or read-write.
500 If you have merely read-only access to this node, get_readonly_uri()
501 will return the same thing as get_uri().
504 def get_repair_cap():
505 """Return an IURI instance that can be used to repair the file, or
506 None if this node cannot be repaired (either because it is not
507 distributed, like a LIT file, or because the node does not represent
508 sufficient authority to create a repair-cap, like a read-only RSA
509 mutable file node [which cannot create the correct write-enablers]).
512 def get_verify_cap():
513 """Return an IVerifierURI instance that represents the
514 'verifiy/refresh capability' for this node. The holder of this
515 capability will be able to renew the lease for this node, protecting
516 it from garbage-collection. They will also be able to ask a server if
517 it holds a share for the file or directory.
520 def get_storage_index():
521 """Return a string with the (binary) storage index in use on this
522 download. This may be None if there is no storage index (i.e. LIT
526 """Return True if this reference provides mutable access to the given
527 file or directory (i.e. if you can modify it), or False if not. Note
528 that even if this reference is read-only, someone else may hold a
529 read-write reference to it."""
532 """Return True if this file or directory is mutable (by *somebody*,
533 not necessarily you), False if it is is immutable. Note that a file
534 might be mutable overall, but your reference to it might be
535 read-only. On the other hand, all references to an immutable file
536 will be read-only; there are no read-write references to an immutable
540 class IMutableFilesystemNode(IFilesystemNode):
543 class IFileNode(IFilesystemNode):
544 def download(target):
545 """Download the file's contents to a given IDownloadTarget"""
547 def download_to_data():
548 """Download the file's contents. Return a Deferred that fires
549 with those contents."""
552 """Return the length (in bytes) of the data this node represents."""
554 def read(consumer, offset=0, size=None):
555 """Download a portion (possibly all) of the file's contents, making
556 them available to the given IConsumer. Return a Deferred that fires
557 (with the consumer) when the consumer is unregistered (either because
558 the last byte has been given to it, or because the consumer threw an
559 exception during write(), possibly because it no longer wants to
560 receive data). The portion downloaded will start at 'offset' and
561 contain 'size' bytes (or the remainder of the file if size==None).
563 The consumer will be used in non-streaming mode: an IPullProducer
564 will be attached to it.
566 The consumer will not receive data right away: several network trips
567 must occur first. The order of events will be::
569 consumer.registerProducer(p, streaming)
570 (if streaming == False)::
571 consumer does p.resumeProducing()
573 consumer does p.resumeProducing()
574 consumer.write(data).. (repeat until all data is written)
575 consumer.unregisterProducer()
576 deferred.callback(consumer)
578 If a download error occurs, or an exception is raised by
579 consumer.registerProducer() or consumer.write(), I will call
580 consumer.unregisterProducer() and then deliver the exception via
581 deferred.errback(). To cancel the download, the consumer should call
582 p.stopProducing(), which will result in an exception being delivered
583 via deferred.errback().
585 A simple download-to-memory consumer example would look like this::
587 class MemoryConsumer:
588 implements(IConsumer)
592 def registerProducer(self, p, streaming):
593 assert streaming == False
596 def write(self, data):
597 self.chunks.append(data)
598 def unregisterProducer(self):
600 d = filenode.read(MemoryConsumer())
601 d.addCallback(lambda mc: "".join(mc.chunks))
606 class IMutableFileNode(IFileNode, IMutableFilesystemNode):
607 """I provide access to a 'mutable file', which retains its identity
608 regardless of what contents are put in it.
610 The consistency-vs-availability problem means that there might be
611 multiple versions of a file present in the grid, some of which might be
612 unrecoverable (i.e. have fewer than 'k' shares). These versions are
613 loosely ordered: each has a sequence number and a hash, and any version
614 with seqnum=N was uploaded by a node which has seen at least one version
617 The 'servermap' (an instance of IMutableFileServerMap) is used to
618 describe the versions that are known to be present in the grid, and which
619 servers are hosting their shares. It is used to represent the 'state of
620 the world', and is used for this purpose by my test-and-set operations.
621 Downloading the contents of the mutable file will also return a
622 servermap. Uploading a new version into the mutable file requires a
623 servermap as input, and the semantics of the replace operation is
624 'replace the file with my new version if it looks like nobody else has
625 changed the file since my previous download'. Because the file is
626 distributed, this is not a perfect test-and-set operation, but it will do
627 its best. If the replace process sees evidence of a simultaneous write,
628 it will signal an UncoordinatedWriteError, so that the caller can take
632 Most readers will want to use the 'best' current version of the file, and
633 should use my 'download_best_version()' method.
635 To unconditionally replace the file, callers should use overwrite(). This
636 is the mode that user-visible mutable files will probably use.
638 To apply some delta to the file, call modify() with a callable modifier
639 function that can apply the modification that you want to make. This is
640 the mode that dirnodes will use, since most directory modification
641 operations can be expressed in terms of deltas to the directory state.
644 Three methods are available for users who need to perform more complex
645 operations. The first is get_servermap(), which returns an up-to-date
646 servermap using a specified mode. The second is download_version(), which
647 downloads a specific version (not necessarily the 'best' one). The third
648 is 'upload', which accepts new contents and a servermap (which must have
649 been updated with MODE_WRITE). The upload method will attempt to apply
650 the new contents as long as no other node has modified the file since the
651 servermap was updated. This might be useful to a caller who wants to
652 merge multiple versions into a single new one.
654 Note that each time the servermap is updated, a specific 'mode' is used,
655 which determines how many peers are queried. To use a servermap for my
656 replace() method, that servermap must have been updated in MODE_WRITE.
657 These modes are defined in allmydata.mutable.common, and consist of
658 MODE_READ, MODE_WRITE, MODE_ANYTHING, and MODE_CHECK. Please look in
659 allmydata/mutable/servermap.py for details about the differences.
661 Mutable files are currently limited in size (about 3.5MB max) and can
662 only be retrieved and updated all-at-once, as a single big string. Future
663 versions of our mutable files will remove this restriction.
666 def download_best_version():
667 """Download the 'best' available version of the file, meaning one of
668 the recoverable versions with the highest sequence number. If no
669 uncoordinated writes have occurred, and if enough shares are
670 available, then this will be the most recent version that has been
673 I update an internal servermap with MODE_READ, determine which
674 version of the file is indicated by
675 servermap.best_recoverable_version(), and return a Deferred that
676 fires with its contents. If no version is recoverable, the Deferred
677 will errback with UnrecoverableFileError.
680 def get_size_of_best_version():
681 """Find the size of the version that would be downloaded with
682 download_best_version(), without actually downloading the whole file.
684 I return a Deferred that fires with an integer.
687 def overwrite(new_contents):
688 """Unconditionally replace the contents of the mutable file with new
689 ones. This simply chains get_servermap(MODE_WRITE) and upload(). This
690 is only appropriate to use when the new contents of the file are
691 completely unrelated to the old ones, and you do not care about other
694 I return a Deferred that fires (with a PublishStatus object) when the
695 update has completed.
698 def modify(modifier_cb):
699 """Modify the contents of the file, by downloading the current
700 version, applying the modifier function (or bound method), then
701 uploading the new version. I return a Deferred that fires (with a
702 PublishStatus object) when the update is complete.
704 The modifier callable will be given three arguments: a string (with
705 the old contents), a 'first_time' boolean, and a servermap. As with
706 download_best_version(), the old contents will be from the best
707 recoverable version, but the modifier can use the servermap to make
708 other decisions (such as refusing to apply the delta if there are
709 multiple parallel versions, or if there is evidence of a newer
710 unrecoverable version). 'first_time' will be True the first time the
711 modifier is called, and False on any subsequent calls.
713 The callable should return a string with the new contents. The
714 callable must be prepared to be called multiple times, and must
715 examine the input string to see if the change that it wants to make
716 is already present in the old version. If it does not need to make
717 any changes, it can either return None, or return its input string.
719 If the modifier raises an exception, it will be returned in the
724 def get_servermap(mode):
725 """Return a Deferred that fires with an IMutableFileServerMap
726 instance, updated using the given mode.
729 def download_version(servermap, version):
730 """Download a specific version of the file, using the servermap
731 as a guide to where the shares are located.
733 I return a Deferred that fires with the requested contents, or
734 errbacks with UnrecoverableFileError. Note that a servermap which was
735 updated with MODE_ANYTHING or MODE_READ may not know about shares for
736 all versions (those modes stop querying servers as soon as they can
737 fulfil their goals), so you may want to use MODE_CHECK (which checks
738 everything) to get increased visibility.
741 def upload(new_contents, servermap):
742 """Replace the contents of the file with new ones. This requires a
743 servermap that was previously updated with MODE_WRITE.
745 I attempt to provide test-and-set semantics, in that I will avoid
746 modifying any share that is different than the version I saw in the
747 servermap. However, if another node is writing to the file at the
748 same time as me, I may manage to update some shares while they update
749 others. If I see any evidence of this, I will signal
750 UncoordinatedWriteError, and the file will be left in an inconsistent
751 state (possibly the version you provided, possibly the old version,
752 possibly somebody else's version, and possibly a mix of shares from
755 The recommended response to UncoordinatedWriteError is to either
756 return it to the caller (since they failed to coordinate their
757 writes), or to attempt some sort of recovery. It may be sufficient to
758 wait a random interval (with exponential backoff) and repeat your
759 operation. If I do not signal UncoordinatedWriteError, then I was
760 able to write the new version without incident.
762 I return a Deferred that fires (with a PublishStatus object) when the
763 publish has completed. I will update the servermap in-place with the
764 location of all new shares.
768 """Return this filenode's writekey, or None if the node does not have
769 write-capability. This may be used to assist with data structures
770 that need to make certain data available only to writers, such as the
771 read-write child caps in dirnodes. The recommended process is to have
772 reader-visible data be submitted to the filenode in the clear (where
773 it will be encrypted by the filenode using the readkey), but encrypt
774 writer-visible data using this writekey.
777 class NotEnoughSharesError(Exception):
778 """Download was unable to get enough shares, or upload was unable to
779 place 'shares_of_happiness' shares."""
781 class NoSharesError(Exception):
782 """Upload or Download was unable to get any shares at all."""
784 class UnableToFetchCriticalDownloadDataError(Exception):
785 """I was unable to fetch some piece of critical data which is supposed to
786 be identically present in all shares."""
788 class NoServersError(Exception):
789 """Upload wasn't given any servers to work with, usually indicating a
790 network or Introducer problem."""
792 class ExistingChildError(Exception):
793 """A directory node was asked to add or replace a child that already
794 exists, and overwrite= was set to False."""
796 class NoSuchChildError(Exception):
797 """A directory node was asked to fetch a child which does not exist."""
799 class IDirectoryNode(IMutableFilesystemNode):
800 """I represent a name-to-child mapping, holding the tahoe equivalent of a
801 directory. All child names are unicode strings, and all children are some
802 sort of IFilesystemNode (either files or subdirectories).
807 The dirnode ('1') URI returned by this method can be used in
808 set_uri() on a different directory ('2') to 'mount' a reference to
809 this directory ('1') under the other ('2'). This URI is just a
810 string, so it can be passed around through email or other out-of-band
814 def get_readonly_uri():
816 The dirnode ('1') URI returned by this method can be used in
817 set_uri() on a different directory ('2') to 'mount' a reference to
818 this directory ('1') under the other ('2'). This URI is just a
819 string, so it can be passed around through email or other out-of-band
824 """I return a Deferred that fires with a dictionary mapping child
825 name (a unicode string) to (node, metadata_dict) tuples, in which
826 'node' is either an IFileNode or IDirectoryNode, and 'metadata_dict'
827 is a dictionary of metadata."""
830 """I return a Deferred that fires with a boolean, True if there
831 exists a child of the given name, False if not. The child name must
832 be a unicode string."""
835 """I return a Deferred that fires with a specific named child node,
836 either an IFileNode or an IDirectoryNode. The child name must be a
837 unicode string. I raise NoSuchChildError if I do not have a child by
840 def get_metadata_for(name):
841 """I return a Deferred that fires with the metadata dictionary for a
842 specific named child node. This metadata is stored in the *edge*, not
843 in the child, so it is attached to the parent dirnode rather than the
844 child dir-or-file-node. The child name must be a unicode string. I
845 raise NoSuchChildError if I do not have a child by that name."""
847 def set_metadata_for(name, metadata):
848 """I replace any existing metadata for the named child with the new
849 metadata. The child name must be a unicode string. This metadata is
850 stored in the *edge*, not in the child, so it is attached to the
851 parent dirnode rather than the child dir-or-file-node. I return a
852 Deferred (that fires with this dirnode) when the operation is
853 complete. I raise NoSuchChildError if I do not have a child by that
856 def get_child_at_path(path):
857 """Transform a child path into an IDirectoryNode or IFileNode.
859 I perform a recursive series of 'get' operations to find the named
860 descendant node. I return a Deferred that fires with the node, or
861 errbacks with NoSuchChildError if the node could not be found.
863 The path can be either a single string (slash-separated) or a list of
864 path-name elements. All elements must be unicode strings.
867 def get_child_and_metadata_at_path(path):
868 """Transform a child path into an IDirectoryNode/IFileNode and
871 I am like get_child_at_path(), but my Deferred fires with a tuple of
872 (node, metadata). The metadata comes from the last edge. If the path
873 is empty, the metadata will be an empty dictionary.
876 def set_uri(name, writecap, readcap=None, metadata=None, overwrite=True):
877 """I add a child (by writecap+readcap) at the specific name. I return
878 a Deferred that fires when the operation finishes. If overwrite= is
879 True, I will replace any existing child of the same name, otherwise
880 an existing child will cause me to return ExistingChildError. The
881 child name must be a unicode string.
883 The child caps could be for a file, or for a directory. If the new
884 child is read/write, you will provide both writecap and readcap. If
885 the child is read-only, you will provide the readcap write (i.e. the
886 writecap= and readcap= arguments will both be the child's readcap).
887 The filecaps are typically obtained from an IFilesystemNode with
888 get_uri() and get_readonly_uri().
890 If metadata= is provided, I will use it as the metadata for the named
891 edge. This will replace any existing metadata. If metadata= is left
892 as the default value of None, I will set ['mtime'] to the current
893 time, and I will set ['ctime'] to the current time if there was not
894 already a child by this name present. This roughly matches the
895 ctime/mtime semantics of traditional filesystems.
897 If this directory node is read-only, the Deferred will errback with a
900 def set_children(entries, overwrite=True):
901 """Add multiple (name, writecap, readcap) triples (or (name,
902 writecap, readcap, metadata) 4-tuples) to a directory node. Returns a
903 Deferred that fires (with None) when the operation finishes. This is
904 equivalent to calling set_uri() multiple times, but is much more
905 efficient. All child names must be unicode strings.
908 def set_node(name, child, metadata=None, overwrite=True):
909 """I add a child at the specific name. I return a Deferred that fires
910 when the operation finishes. This Deferred will fire with the child
911 node that was just added. I will replace any existing child of the
912 same name. The child name must be a unicode string. The 'child'
913 instance must be an instance providing IDirectoryNode or IFileNode.
915 If metadata= is provided, I will use it as the metadata for the named
916 edge. This will replace any existing metadata. If metadata= is left
917 as the default value of None, I will set ['mtime'] to the current
918 time, and I will set ['ctime'] to the current time if there was not
919 already a child by this name present. This roughly matches the
920 ctime/mtime semantics of traditional filesystems.
922 If this directory node is read-only, the Deferred will errback with a
925 def set_nodes(entries, overwrite=True):
926 """Add multiple (name, child_node) pairs (or (name, child_node,
927 metadata) triples) to a directory node. Returns a Deferred that fires
928 (with None) when the operation finishes. This is equivalent to
929 calling set_node() multiple times, but is much more efficient. All
930 child names must be unicode strings."""
933 def add_file(name, uploadable, metadata=None, overwrite=True):
934 """I upload a file (using the given IUploadable), then attach the
935 resulting FileNode to the directory at the given name. I set metadata
936 the same way as set_uri and set_node. The child name must be a
939 I return a Deferred that fires (with the IFileNode of the uploaded
940 file) when the operation completes."""
943 """I remove the child at the specific name. I return a Deferred that
944 fires when the operation finishes. The child name must be a unicode
945 string. I raise NoSuchChildError if I do not have a child by that
948 def create_empty_directory(name, overwrite=True):
949 """I create and attach an empty directory at the given name. The
950 child name must be a unicode string. I return a Deferred that fires
951 when the operation finishes."""
953 def move_child_to(current_child_name, new_parent, new_child_name=None,
955 """I take one of my children and move them to a new parent. The child
956 is referenced by name. On the new parent, the child will live under
957 'new_child_name', which defaults to 'current_child_name'. TODO: what
958 should we do about metadata? I return a Deferred that fires when the
959 operation finishes. The child name must be a unicode string. I raise
960 NoSuchChildError if I do not have a child by that name."""
962 def build_manifest():
963 """I generate a table of everything reachable from this directory.
964 I also compute deep-stats as described below.
966 I return a Monitor. The Monitor's results will be a dictionary with
969 res['manifest']: a list of (path, cap) tuples for all nodes
970 (directories and files) reachable from this one.
971 'path' will be a tuple of unicode strings. The
972 origin dirnode will be represented by an empty path
974 res['verifycaps']: a list of (printable) verifycap strings, one for
975 each reachable non-LIT node. This is a set:
976 it will contain no duplicates.
977 res['storage-index']: a list of (base32) storage index strings,
978 one for each reachable non-LIT node. This is
979 a set: it will contain no duplicates.
980 res['stats']: a dictionary, the same that is generated by
981 start_deep_stats() below.
983 The Monitor will also have an .origin_si attribute with the (binary)
984 storage index of the starting point.
987 def start_deep_stats():
988 """Return a Monitor, examining all nodes (directories and files)
989 reachable from this one. The Monitor's results will be a dictionary
990 with the following keys::
992 count-immutable-files: count of how many CHK files are in the set
993 count-mutable-files: same, for mutable files (does not include
995 count-literal-files: same, for LIT files
996 count-files: sum of the above three
998 count-directories: count of directories
1000 size-immutable-files: total bytes for all CHK files in the set
1001 size-mutable-files (TODO): same, for current version of all mutable
1002 files, does not include directories
1003 size-literal-files: same, for LIT files
1004 size-directories: size of mutable files used by directories
1006 largest-directory: number of bytes in the largest directory
1007 largest-directory-children: number of children in the largest
1009 largest-immutable-file: number of bytes in the largest CHK file
1011 size-mutable-files is not yet implemented, because it would involve
1012 even more queries than deep_stats does.
1014 The Monitor will also have an .origin_si attribute with the (binary)
1015 storage index of the starting point.
1017 This operation will visit every directory node underneath this one,
1018 and can take a long time to run. On a typical workstation with good
1019 bandwidth, this can examine roughly 15 directories per second (and
1020 takes several minutes of 100% CPU for ~1700 directories).
1023 class ICodecEncoder(Interface):
1024 def set_params(data_size, required_shares, max_shares):
1025 """Set up the parameters of this encoder.
1027 This prepares the encoder to perform an operation that converts a
1028 single block of data into a number of shares, such that a future
1029 ICodecDecoder can use a subset of these shares to recover the
1030 original data. This operation is invoked by calling encode(). Once
1031 the encoding parameters are set up, the encode operation can be
1032 invoked multiple times.
1034 set_params() prepares the encoder to accept blocks of input data that
1035 are exactly 'data_size' bytes in length. The encoder will be prepared
1036 to produce 'max_shares' shares for each encode() operation (although
1037 see the 'desired_share_ids' to use less CPU). The encoding math will
1038 be chosen such that the decoder can get by with as few as
1039 'required_shares' of these shares and still reproduce the original
1040 data. For example, set_params(1000, 5, 5) offers no redundancy at
1041 all, whereas set_params(1000, 1, 10) provides 10x redundancy.
1043 Numerical Restrictions: 'data_size' is required to be an integral
1044 multiple of 'required_shares'. In general, the caller should choose
1045 required_shares and max_shares based upon their reliability
1046 requirements and the number of peers available (the total storage
1047 space used is roughly equal to max_shares*data_size/required_shares),
1048 then choose data_size to achieve the memory footprint desired (larger
1049 data_size means more efficient operation, smaller data_size means
1050 smaller memory footprint).
1052 In addition, 'max_shares' must be equal to or greater than
1053 'required_shares'. Of course, setting them to be equal causes
1054 encode() to degenerate into a particularly slow form of the 'split'
1057 See encode() for more details about how these parameters are used.
1059 set_params() must be called before any other ICodecEncoder methods
1064 """Return the 3-tuple of data_size, required_shares, max_shares"""
1066 def get_encoder_type():
1067 """Return a short string that describes the type of this encoder.
1069 There is required to be a global table of encoder classes. This method
1070 returns an index into this table; the value at this index is an
1071 encoder class, and this encoder is an instance of that class.
1074 def get_block_size():
1075 """Return the length of the shares that encode() will produce.
1078 def encode_proposal(data, desired_share_ids=None):
1079 """Encode some data.
1081 'data' must be a string (or other buffer object), and len(data) must
1082 be equal to the 'data_size' value passed earlier to set_params().
1084 This will return a Deferred that will fire with two lists. The first
1085 is a list of shares, each of which is a string (or other buffer
1086 object) such that len(share) is the same as what get_share_size()
1087 returned earlier. The second is a list of shareids, in which each is
1088 an integer. The lengths of the two lists will always be equal to each
1089 other. The user should take care to keep each share closely
1090 associated with its shareid, as one is useless without the other.
1092 The length of this output list will normally be the same as the value
1093 provided to the 'max_shares' parameter of set_params(). This may be
1094 different if 'desired_share_ids' is provided.
1096 'desired_share_ids', if provided, is required to be a sequence of
1097 ints, each of which is required to be >= 0 and < max_shares. If not
1098 provided, encode() will produce 'max_shares' shares, as if
1099 'desired_share_ids' were set to range(max_shares). You might use this
1100 if you initially thought you were going to use 10 peers, started
1101 encoding, and then two of the peers dropped out: you could use
1102 desired_share_ids= to skip the work (both memory and CPU) of
1103 producing shares for the peers which are no longer available.
1107 def encode(inshares, desired_share_ids=None):
1108 """Encode some data. This may be called multiple times. Each call is
1111 inshares is a sequence of length required_shares, containing buffers
1112 (i.e. strings), where each buffer contains the next contiguous
1113 non-overlapping segment of the input data. Each buffer is required to
1114 be the same length, and the sum of the lengths of the buffers is
1115 required to be exactly the data_size promised by set_params(). (This
1116 implies that the data has to be padded before being passed to
1117 encode(), unless of course it already happens to be an even multiple
1118 of required_shares in length.)
1120 ALSO: the requirement to break up your data into 'required_shares'
1121 chunks before calling encode() feels a bit surprising, at least from
1122 the point of view of a user who doesn't know how FEC works. It feels
1123 like an implementation detail that has leaked outside the
1124 abstraction barrier. Can you imagine a use case in which the data to
1125 be encoded might already be available in pre-segmented chunks, such
1126 that it is faster or less work to make encode() take a list rather
1127 than splitting a single string?
1129 ALSO ALSO: I think 'inshares' is a misleading term, since encode()
1130 is supposed to *produce* shares, so what it *accepts* should be
1131 something other than shares. Other places in this interface use the
1132 word 'data' for that-which-is-not-shares.. maybe we should use that
1135 'desired_share_ids', if provided, is required to be a sequence of
1136 ints, each of which is required to be >= 0 and < max_shares. If not
1137 provided, encode() will produce 'max_shares' shares, as if
1138 'desired_share_ids' were set to range(max_shares). You might use this
1139 if you initially thought you were going to use 10 peers, started
1140 encoding, and then two of the peers dropped out: you could use
1141 desired_share_ids= to skip the work (both memory and CPU) of
1142 producing shares for the peers which are no longer available.
1144 For each call, encode() will return a Deferred that fires with two
1145 lists, one containing shares and the other containing the shareids.
1146 The get_share_size() method can be used to determine the length of
1147 the share strings returned by encode(). Each shareid is a small
1148 integer, exactly as passed into 'desired_share_ids' (or
1149 range(max_shares), if desired_share_ids was not provided).
1151 The shares and their corresponding shareids are required to be kept
1152 together during storage and retrieval. Specifically, the share data is
1153 useless by itself: the decoder needs to be told which share is which
1154 by providing it with both the shareid and the actual share data.
1156 This function will allocate an amount of memory roughly equal to::
1158 (max_shares - required_shares) * get_share_size()
1160 When combined with the memory that the caller must allocate to
1161 provide the input data, this leads to a memory footprint roughly
1162 equal to the size of the resulting encoded shares (i.e. the expansion
1163 factor times the size of the input segment).
1168 # returning a list of (shareidN,shareN) tuples instead of a pair of
1169 # lists (shareids..,shares..). Brian thought the tuples would
1170 # encourage users to keep the share and shareid together throughout
1171 # later processing, Zooko pointed out that the code to iterate
1172 # through two lists is not really more complicated than using a list
1173 # of tuples and there's also a performance improvement
1175 # having 'data_size' not required to be an integral multiple of
1176 # 'required_shares'. Doing this would require encode() to perform
1177 # padding internally, and we'd prefer to have any padding be done
1178 # explicitly by the caller. Yes, it is an abstraction leak, but
1179 # hopefully not an onerous one.
1182 class ICodecDecoder(Interface):
1183 def set_params(data_size, required_shares, max_shares):
1184 """Set the params. They have to be exactly the same ones that were
1185 used for encoding."""
1187 def get_needed_shares():
1188 """Return the number of shares needed to reconstruct the data.
1189 set_params() is required to be called before this."""
1191 def decode(some_shares, their_shareids):
1192 """Decode a partial list of shares into data.
1194 'some_shares' is required to be a sequence of buffers of sharedata, a
1195 subset of the shares returned by ICodecEncode.encode(). Each share is
1196 required to be of the same length. The i'th element of their_shareids
1197 is required to be the shareid of the i'th buffer in some_shares.
1199 This returns a Deferred which fires with a sequence of buffers. This
1200 sequence will contain all of the segments of the original data, in
1201 order. The sum of the lengths of all of the buffers will be the
1202 'data_size' value passed into the original ICodecEncode.set_params()
1203 call. To get back the single original input block of data, use
1204 ''.join(output_buffers), or you may wish to simply write them in
1205 order to an output file.
1207 Note that some of the elements in the result sequence may be
1208 references to the elements of the some_shares input sequence. In
1209 particular, this means that if those share objects are mutable (e.g.
1210 arrays) and if they are changed, then both the input (the
1211 'some_shares' parameter) and the output (the value given when the
1212 deferred is triggered) will change.
1214 The length of 'some_shares' is required to be exactly the value of
1215 'required_shares' passed into the original ICodecEncode.set_params()
1219 class IEncoder(Interface):
1220 """I take an object that provides IEncryptedUploadable, which provides
1221 encrypted data, and a list of shareholders. I then encode, hash, and
1222 deliver shares to those shareholders. I will compute all the necessary
1223 Merkle hash trees that are necessary to validate the crypttext that
1224 eventually comes back from the shareholders. I provide the URI Extension
1225 Block Hash, and the encoding parameters, both of which must be included
1228 I do not choose shareholders, that is left to the IUploader. I must be
1229 given a dict of RemoteReferences to storage buckets that are ready and
1230 willing to receive data.
1234 """Specify the number of bytes that will be encoded. This must be
1235 peformed before get_serialized_params() can be called.
1237 def set_params(params):
1238 """Override the default encoding parameters. 'params' is a tuple of
1239 (k,d,n), where 'k' is the number of required shares, 'd' is the
1240 shares_of_happiness, and 'n' is the total number of shares that will
1243 Encoding parameters can be set in three ways. 1: The Encoder class
1244 provides defaults (3/7/10). 2: the Encoder can be constructed with
1245 an 'options' dictionary, in which the
1246 needed_and_happy_and_total_shares' key can be a (k,d,n) tuple. 3:
1247 set_params((k,d,n)) can be called.
1249 If you intend to use set_params(), you must call it before
1250 get_share_size or get_param are called.
1253 def set_encrypted_uploadable(u):
1254 """Provide a source of encrypted upload data. 'u' must implement
1255 IEncryptedUploadable.
1257 When this is called, the IEncryptedUploadable will be queried for its
1258 length and the storage_index that should be used.
1260 This returns a Deferred that fires with this Encoder instance.
1262 This must be performed before start() can be called.
1265 def get_param(name):
1266 """Return an encoding parameter, by name.
1268 'storage_index': return a string with the (16-byte truncated SHA-256
1269 hash) storage index to which these shares should be
1272 'share_counts': return a tuple describing how many shares are used:
1273 (needed_shares, shares_of_happiness, total_shares)
1275 'num_segments': return an int with the number of segments that
1278 'segment_size': return an int with the size of each segment.
1280 'block_size': return the size of the individual blocks that will
1281 be delivered to a shareholder's put_block() method. By
1282 knowing this, the shareholder will be able to keep all
1283 blocks in a single file and still provide random access
1284 when reading them. # TODO: can we avoid exposing this?
1286 'share_size': an int with the size of the data that will be stored
1287 on each shareholder. This is aggregate amount of data
1288 that will be sent to the shareholder, summed over all
1289 the put_block() calls I will ever make. It is useful to
1290 determine this size before asking potential
1291 shareholders whether they will grant a lease or not,
1292 since their answers will depend upon how much space we
1293 need. TODO: this might also include some amount of
1294 overhead, like the size of all the hashes. We need to
1295 decide whether this is useful or not.
1297 'serialized_params': a string with a concise description of the
1298 codec name and its parameters. This may be passed
1299 into the IUploadable to let it make sure that
1300 the same file encoded with different parameters
1301 will result in different storage indexes.
1303 Once this is called, set_size() and set_params() may not be called.
1306 def set_shareholders(shareholders):
1307 """Tell the encoder where to put the encoded shares. 'shareholders'
1308 must be a dictionary that maps share number (an integer ranging from
1309 0 to n-1) to an instance that provides IStorageBucketWriter. This
1310 must be performed before start() can be called."""
1313 """Begin the encode/upload process. This involves reading encrypted
1314 data from the IEncryptedUploadable, encoding it, uploading the shares
1315 to the shareholders, then sending the hash trees.
1317 set_encrypted_uploadable() and set_shareholders() must be called
1318 before this can be invoked.
1320 This returns a Deferred that fires with a verify cap when the upload
1321 process is complete. The verifycap, plus the encryption key, is
1322 sufficient to construct the read cap.
1325 class IDecoder(Interface):
1326 """I take a list of shareholders and some setup information, then
1327 download, validate, decode, and decrypt data from them, writing the
1328 results to an output file.
1330 I do not locate the shareholders, that is left to the IDownloader. I must
1331 be given a dict of RemoteReferences to storage buckets that are ready to
1336 """I take a file-like object (providing write and close) to which all
1337 the plaintext data will be written.
1339 TODO: producer/consumer . Maybe write() should return a Deferred that
1340 indicates when it will accept more data? But probably having the
1341 IDecoder be a producer is easier to glue to IConsumer pieces.
1344 def set_shareholders(shareholders):
1345 """I take a dictionary that maps share identifiers (small integers)
1346 to RemoteReferences that provide RIBucketReader. This must be called
1350 """I start the download. This process involves retrieving data and
1351 hash chains from the shareholders, using the hashes to validate the
1352 data, decoding the shares into segments, decrypting the segments,
1353 then writing the resulting plaintext to the output file.
1355 I return a Deferred that will fire (with self) when the download is
1359 class IDownloadTarget(Interface):
1360 # Note that if the IDownloadTarget is also an IConsumer, the downloader
1361 # will register itself as a producer. This allows the target to invoke
1362 # downloader.pauseProducing, resumeProducing, and stopProducing.
1364 """Called before any calls to write() or close(). If an error
1365 occurs before any data is available, fail() may be called without
1366 a previous call to open().
1368 'size' is the length of the file being downloaded, in bytes."""
1371 """Output some data to the target."""
1373 """Inform the target that there is no more data to be written."""
1375 """fail() is called to indicate that the download has failed. 'why'
1376 is a Failure object indicating what went wrong. No further methods
1377 will be invoked on the IDownloadTarget after fail()."""
1378 def register_canceller(cb):
1379 """The CiphertextDownloader uses this to register a no-argument function
1380 that the target can call to cancel the download. Once this canceller
1381 is invoked, no further calls to write() or close() will be made."""
1383 """When the CiphertextDownloader is done, this finish() function will be
1384 called. Whatever it returns will be returned to the invoker of
1385 Downloader.download.
1387 # The following methods are just because that target might be a
1388 # repairer.DownUpConnector, and just because the current CHKUpload object
1389 # expects to find the storage index and encoding parameters in its
1391 def set_storageindex(storageindex):
1392 """ Set the storage index. """
1393 def set_encodingparams(encodingparams):
1394 """ Set the encoding parameters. """
1396 class IDownloader(Interface):
1397 def download(uri, target):
1398 """Perform a CHK download, sending the data to the given target.
1399 'target' must provide IDownloadTarget.
1401 Returns a Deferred that fires (with the results of target.finish)
1402 when the download is finished, or errbacks if something went wrong."""
1404 class IEncryptedUploadable(Interface):
1405 def set_upload_status(upload_status):
1406 """Provide an IUploadStatus object that should be filled with status
1407 information. The IEncryptedUploadable is responsible for setting
1408 key-determination progress ('chk'), size, storage_index, and
1409 ciphertext-fetch progress. It may delegate some of this
1410 responsibility to others, in particular to the IUploadable."""
1413 """This behaves just like IUploadable.get_size()."""
1415 def get_all_encoding_parameters():
1416 """Return a Deferred that fires with a tuple of
1417 (k,happy,n,segment_size). The segment_size will be used as-is, and
1418 must match the following constraints: it must be a multiple of k, and
1419 it shouldn't be unreasonably larger than the file size (if
1420 segment_size is larger than filesize, the difference must be stored
1423 This usually passes through to the IUploadable method of the same
1426 The encoder strictly obeys the values returned by this method. To
1427 make an upload use non-default encoding parameters, you must arrange
1428 to control the values that this method returns.
1431 def get_storage_index():
1432 """Return a Deferred that fires with a 16-byte storage index.
1435 def read_encrypted(length, hash_only):
1436 """This behaves just like IUploadable.read(), but returns crypttext
1437 instead of plaintext. If hash_only is True, then this discards the
1438 data (and returns an empty list); this improves efficiency when
1439 resuming an interrupted upload (where we need to compute the
1440 plaintext hashes, but don't need the redundant encrypted data)."""
1442 def get_plaintext_hashtree_leaves(first, last, num_segments):
1443 """OBSOLETE; Get the leaf nodes of a merkle hash tree over the
1444 plaintext segments, i.e. get the tagged hashes of the given segments.
1445 The segment size is expected to be generated by the
1446 IEncryptedUploadable before any plaintext is read or ciphertext
1447 produced, so that the segment hashes can be generated with only a
1450 This returns a Deferred which fires with a sequence of hashes, using:
1452 tuple(segment_hashes[first:last])
1454 'num_segments' is used to assert that the number of segments that the
1455 IEncryptedUploadable handled matches the number of segments that the
1456 encoder was expecting.
1458 This method must not be called until the final byte has been read
1459 from read_encrypted(). Once this method is called, read_encrypted()
1460 can never be called again.
1463 def get_plaintext_hash():
1464 """OBSOLETE; Get the hash of the whole plaintext.
1466 This returns a Deferred which fires with a tagged SHA-256 hash of the
1467 whole plaintext, obtained from hashutil.plaintext_hash(data).
1471 """Just like IUploadable.close()."""
1473 class IUploadable(Interface):
1474 def set_upload_status(upload_status):
1475 """Provide an IUploadStatus object that should be filled with status
1476 information. The IUploadable is responsible for setting
1477 key-determination progress ('chk')."""
1479 def set_default_encoding_parameters(params):
1480 """Set the default encoding parameters, which must be a dict mapping
1481 strings to ints. The meaningful keys are 'k', 'happy', 'n', and
1482 'max_segment_size'. These might have an influence on the final
1483 encoding parameters returned by get_all_encoding_parameters(), if the
1484 Uploadable doesn't have more specific preferences.
1486 This call is optional: if it is not used, the Uploadable will use
1487 some built-in defaults. If used, this method must be called before
1488 any other IUploadable methods to have any effect.
1492 """Return a Deferred that will fire with the length of the data to be
1493 uploaded, in bytes. This will be called before the data is actually
1494 used, to compute encoding parameters.
1497 def get_all_encoding_parameters():
1498 """Return a Deferred that fires with a tuple of
1499 (k,happy,n,segment_size). The segment_size will be used as-is, and
1500 must match the following constraints: it must be a multiple of k, and
1501 it shouldn't be unreasonably larger than the file size (if
1502 segment_size is larger than filesize, the difference must be stored
1505 The relative values of k and n allow some IUploadables to request
1506 better redundancy than others (in exchange for consuming more space
1509 Larger values of segment_size reduce hash overhead, while smaller
1510 values reduce memory footprint and cause data to be delivered in
1511 smaller pieces (which may provide a smoother and more predictable
1512 download experience).
1514 The encoder strictly obeys the values returned by this method. To
1515 make an upload use non-default encoding parameters, you must arrange
1516 to control the values that this method returns. One way to influence
1517 them may be to call set_encoding_parameters() before calling
1518 get_all_encoding_parameters().
1521 def get_encryption_key():
1522 """Return a Deferred that fires with a 16-byte AES key. This key will
1523 be used to encrypt the data. The key will also be hashed to derive
1526 Uploadables which want to achieve convergence should hash their file
1527 contents and the serialized_encoding_parameters to form the key
1528 (which of course requires a full pass over the data). Uploadables can
1529 use the upload.ConvergentUploadMixin class to achieve this
1532 Uploadables which do not care about convergence (or do not wish to
1533 make multiple passes over the data) can simply return a
1534 strongly-random 16 byte string.
1536 get_encryption_key() may be called multiple times: the IUploadable is
1537 required to return the same value each time.
1541 """Return a Deferred that fires with a list of strings (perhaps with
1542 only a single element) which, when concatenated together, contain the
1543 next 'length' bytes of data. If EOF is near, this may provide fewer
1544 than 'length' bytes. The total number of bytes provided by read()
1545 before it signals EOF must equal the size provided by get_size().
1547 If the data must be acquired through multiple internal read
1548 operations, returning a list instead of a single string may help to
1549 reduce string copies.
1551 'length' will typically be equal to (min(get_size(),1MB)/req_shares),
1552 so a 10kB file means length=3kB, 100kB file means length=30kB,
1553 and >=1MB file means length=300kB.
1555 This method provides for a single full pass through the data. Later
1556 use cases may desire multiple passes or access to only parts of the
1557 data (such as a mutable file making small edits-in-place). This API
1558 will be expanded once those use cases are better understood.
1562 """The upload is finished, and whatever filehandle was in use may be
1565 class IUploadResults(Interface):
1566 """I am returned by upload() methods. I contain a number of public
1567 attributes which can be read to determine the results of the upload. Some
1568 of these are functional, some are timing information. All of these may be
1571 .file_size : the size of the file, in bytes
1572 .uri : the CHK read-cap for the file
1573 .ciphertext_fetched : how many bytes were fetched by the helper
1574 .sharemap: dict mapping share identifier to set of serverids
1575 (binary strings). This indicates which servers were given
1576 which shares. For immutable files, the shareid is an
1577 integer (the share number, from 0 to N-1). For mutable
1578 files, it is a string of the form 'seq%d-%s-sh%d',
1579 containing the sequence number, the roothash, and the
1581 .servermap : dict mapping server peerid to a set of share numbers
1582 .timings : dict of timing information, mapping name to seconds (float)
1583 total : total upload time, start to finish
1584 storage_index : time to compute the storage index
1585 peer_selection : time to decide which peers will be used
1586 contacting_helper : initial helper query to upload/no-upload decision
1587 existence_check : helper pre-upload existence check
1588 helper_total : initial helper query to helper finished pushing
1589 cumulative_fetch : helper waiting for ciphertext requests
1590 total_fetch : helper start to last ciphertext response
1591 cumulative_encoding : just time spent in zfec
1592 cumulative_sending : just time spent waiting for storage servers
1593 hashes_and_close : last segment push to shareholder close
1594 total_encode_and_push : first encode to shareholder close
1598 class IDownloadResults(Interface):
1599 """I am created internally by download() methods. I contain a number of
1600 public attributes which contain details about the download process.::
1602 .file_size : the size of the file, in bytes
1603 .servers_used : set of server peerids that were used during download
1604 .server_problems : dict mapping server peerid to a problem string. Only
1605 servers that had problems (bad hashes, disconnects)
1607 .servermap : dict mapping server peerid to a set of share numbers. Only
1608 servers that had any shares are listed here.
1609 .timings : dict of timing information, mapping name to seconds (float)
1610 peer_selection : time to ask servers about shares
1611 servers_peer_selection : dict of peerid to DYHB-query time
1612 uri_extension : time to fetch a copy of the URI extension block
1613 hashtrees : time to fetch the hash trees
1614 segments : time to fetch, decode, and deliver segments
1615 cumulative_fetch : time spent waiting for storage servers
1616 cumulative_decode : just time spent in zfec
1617 cumulative_decrypt : just time spent in decryption
1618 total : total download time, start to finish
1619 fetch_per_server : dict of peerid to list of per-segment fetch times
1623 class IUploader(Interface):
1624 def upload(uploadable):
1625 """Upload the file. 'uploadable' must impement IUploadable. This
1626 returns a Deferred which fires with an UploadResults instance, from
1627 which the URI of the file can be obtained as results.uri ."""
1629 def upload_ssk(write_capability, new_version, uploadable):
1630 """TODO: how should this work?"""
1632 class ICheckable(Interface):
1633 def check(monitor, verify=False, add_lease=False):
1634 """Check upon my health, optionally repairing any problems.
1636 This returns a Deferred that fires with an instance that provides
1637 ICheckResults, or None if the object is non-distributed (i.e. LIT
1640 The monitor will be checked periodically to see if the operation has
1641 been cancelled. If so, no new queries will be sent, and the Deferred
1642 will fire (with a OperationCancelledError) immediately.
1644 Filenodes and dirnodes (which provide IFilesystemNode) are also
1645 checkable. Instances that represent verifier-caps will be checkable
1646 but not downloadable. Some objects (like LIT files) do not actually
1647 live in the grid, and their checkers return None (non-distributed
1648 files are always healthy).
1650 If verify=False, a relatively lightweight check will be performed: I
1651 will ask all servers if they have a share for me, and I will believe
1652 whatever they say. If there are at least N distinct shares on the
1653 grid, my results will indicate r.is_healthy()==True. This requires a
1654 roundtrip to each server, but does not transfer very much data, so
1655 the network bandwidth is fairly low.
1657 If verify=True, a more resource-intensive check will be performed:
1658 every share will be downloaded, and the hashes will be validated on
1659 every bit. I will ignore any shares that failed their hash checks. If
1660 there are at least N distinct valid shares on the grid, my results
1661 will indicate r.is_healthy()==True. This requires N/k times as much
1662 download bandwidth (and server disk IO) as a regular download. If a
1663 storage server is holding a corrupt share, or is experiencing memory
1664 failures during retrieval, or is malicious or buggy, then
1665 verification will detect the problem, but checking will not.
1667 If add_lease=True, I will ensure that an up-to-date lease is present
1668 on each share. The lease secrets will be derived from by node secret
1669 (in BASEDIR/private/secret), so either I will add a new lease to the
1670 share, or I will merely renew the lease that I already had. In a
1671 future version of the storage-server protocol (once Accounting has
1672 been implemented), there may be additional options here to define the
1673 kind of lease that is obtained (which account number to claim, etc).
1675 TODO: any problems seen during checking will be reported to the
1676 health-manager.furl, a centralized object which is responsible for
1677 figuring out why files are unhealthy so corrective action can be
1681 def check_and_repair(monitor, verify=False, add_lease=False):
1682 """Like check(), but if the file/directory is not healthy, attempt to
1685 Any non-healthy result will cause an immediate repair operation, to
1686 generate and upload new shares. After repair, the file will be as
1687 healthy as we can make it. Details about what sort of repair is done
1688 will be put in the check-and-repair results. The Deferred will not
1689 fire until the repair is complete.
1691 This returns a Deferred which fires with an instance of
1692 ICheckAndRepairResults."""
1694 class IDeepCheckable(Interface):
1695 def start_deep_check(verify=False, add_lease=False):
1696 """Check upon the health of me and everything I can reach.
1698 This is a recursive form of check(), useable only on dirnodes.
1700 I return a Monitor, with results that are an IDeepCheckResults
1703 TODO: If any of the directories I traverse are unrecoverable, the
1704 Monitor will report failure. If any of the files I check upon are
1705 unrecoverable, those problems will be reported in the
1706 IDeepCheckResults as usual, and the Monitor will not report a
1710 def start_deep_check_and_repair(verify=False, add_lease=False):
1711 """Check upon the health of me and everything I can reach. Repair
1712 anything that isn't healthy.
1714 This is a recursive form of check_and_repair(), useable only on
1717 I return a Monitor, with results that are an
1718 IDeepCheckAndRepairResults object.
1720 TODO: If any of the directories I traverse are unrecoverable, the
1721 Monitor will report failure. If any of the files I check upon are
1722 unrecoverable, those problems will be reported in the
1723 IDeepCheckResults as usual, and the Monitor will not report a
1727 class ICheckResults(Interface):
1728 """I contain the detailed results of a check/verify operation.
1731 def get_storage_index():
1732 """Return a string with the (binary) storage index."""
1733 def get_storage_index_string():
1734 """Return a string with the (printable) abbreviated storage index."""
1736 """Return the (string) URI of the object that was checked."""
1739 """Return a boolean, True if the file/dir is fully healthy, False if
1740 it is damaged in any way. Non-distributed LIT files always return
1743 def is_recoverable():
1744 """Return a boolean, True if the file/dir can be recovered, False if
1745 not. Unrecoverable files are obviously unhealthy. Non-distributed LIT
1746 files always return True."""
1748 def needs_rebalancing():
1749 """Return a boolean, True if the file/dir's reliability could be
1750 improved by moving shares to new servers. Non-distributed LIT files
1751 always return False."""
1755 """Return a dictionary that describes the state of the file/dir. LIT
1756 files always return an empty dictionary. Normal files and directories
1757 return a dictionary with the following keys (note that these use
1758 binary strings rather than base32-encoded ones) (also note that for
1759 mutable files, these counts are for the 'best' version):
1761 count-shares-good: the number of distinct good shares that were found
1762 count-shares-needed: 'k', the number of shares required for recovery
1763 count-shares-expected: 'N', the number of total shares generated
1764 count-good-share-hosts: the number of distinct storage servers with
1765 good shares. If this number is less than
1766 count-shares-good, then some shares are
1767 doubled up, increasing the correlation of
1768 failures. This indicates that one or more
1769 shares should be moved to an otherwise unused
1770 server, if one is available.
1771 count-corrupt-shares: the number of shares with integrity failures
1772 list-corrupt-shares: a list of 'share locators', one for each share
1773 that was found to be corrupt. Each share
1774 locator is a list of (serverid, storage_index,
1776 count-incompatible-shares: the number of shares which are of a share
1777 format unknown to this checker
1778 list-incompatible-shares: a list of 'share locators', one for each
1779 share that was found to be of an unknown
1780 format. Each share locator is a list of
1781 (serverid, storage_index, sharenum).
1782 servers-responding: list of (binary) storage server identifiers,
1783 one for each server which responded to the share
1784 query (even if they said they didn't have
1785 shares, and even if they said they did have
1786 shares but then didn't send them when asked, or
1787 dropped the connection, or returned a Failure,
1788 and even if they said they did have shares and
1789 sent incorrect ones when asked)
1790 sharemap: dict mapping share identifier to list of serverids
1791 (binary strings). This indicates which servers are holding
1792 which shares. For immutable files, the shareid is an
1793 integer (the share number, from 0 to N-1). For mutable
1794 files, it is a string of the form 'seq%d-%s-sh%d',
1795 containing the sequence number, the roothash, and the
1798 The following keys are most relevant for mutable files, but immutable
1799 files will provide sensible values too::
1801 count-wrong-shares: the number of shares for versions other than the
1802 'best' one (which is defined as being the
1803 recoverable version with the highest sequence
1804 number, then the highest roothash). These are
1805 either leftover shares from an older version
1806 (perhaps on a server that was offline when an
1807 update occurred), shares from an unrecoverable
1808 newer version, or shares from an alternate
1809 current version that results from an
1810 uncoordinated write collision. For a healthy
1811 file, this will equal 0.
1813 count-recoverable-versions: the number of recoverable versions of
1814 the file. For a healthy file, this will
1817 count-unrecoverable-versions: the number of unrecoverable versions
1818 of the file. For a healthy file, this
1824 """Return a string with a brief (one-line) summary of the results."""
1827 """Return a list of strings with more detailed results."""
1829 class ICheckAndRepairResults(Interface):
1830 """I contain the detailed results of a check/verify/repair operation.
1832 The IFilesystemNode.check()/verify()/repair() methods all return
1833 instances that provide ICheckAndRepairResults.
1836 def get_storage_index():
1837 """Return a string with the (binary) storage index."""
1838 def get_storage_index_string():
1839 """Return a string with the (printable) abbreviated storage index."""
1840 def get_repair_attempted():
1841 """Return a boolean, True if a repair was attempted. We might not
1842 attempt to repair the file because it was healthy, or healthy enough
1843 (i.e. some shares were missing but not enough to exceed some
1844 threshold), or because we don't know how to repair this object."""
1845 def get_repair_successful():
1846 """Return a boolean, True if repair was attempted and the file/dir
1847 was fully healthy afterwards. False if no repair was attempted or if
1848 a repair attempt failed."""
1849 def get_pre_repair_results():
1850 """Return an ICheckResults instance that describes the state of the
1851 file/dir before any repair was attempted."""
1852 def get_post_repair_results():
1853 """Return an ICheckResults instance that describes the state of the
1854 file/dir after any repair was attempted. If no repair was attempted,
1855 the pre-repair and post-repair results will be identical."""
1858 class IDeepCheckResults(Interface):
1859 """I contain the results of a deep-check operation.
1861 This is returned by a call to ICheckable.deep_check().
1864 def get_root_storage_index_string():
1865 """Return the storage index (abbreviated human-readable string) of
1866 the first object checked."""
1868 """Return a dictionary with the following keys::
1870 count-objects-checked: count of how many objects were checked
1871 count-objects-healthy: how many of those objects were completely
1873 count-objects-unhealthy: how many were damaged in some way
1874 count-objects-unrecoverable: how many were unrecoverable
1875 count-corrupt-shares: how many shares were found to have
1876 corruption, summed over all objects
1880 def get_corrupt_shares():
1881 """Return a set of (serverid, storage_index, sharenum) for all shares
1882 that were found to be corrupt. Both serverid and storage_index are
1885 def get_all_results():
1886 """Return a dictionary mapping pathname (a tuple of strings, ready to
1887 be slash-joined) to an ICheckResults instance, one for each object
1888 that was checked."""
1890 def get_results_for_storage_index(storage_index):
1891 """Retrive the ICheckResults instance for the given (binary)
1892 storage index. Raises KeyError if there are no results for that
1896 """Return a dictionary with the same keys as
1897 IDirectoryNode.deep_stats()."""
1899 class IDeepCheckAndRepairResults(Interface):
1900 """I contain the results of a deep-check-and-repair operation.
1902 This is returned by a call to ICheckable.deep_check_and_repair().
1905 def get_root_storage_index_string():
1906 """Return the storage index (abbreviated human-readable string) of
1907 the first object checked."""
1909 """Return a dictionary with the following keys::
1911 count-objects-checked: count of how many objects were checked
1912 count-objects-healthy-pre-repair: how many of those objects were
1913 completely healthy (before any
1915 count-objects-unhealthy-pre-repair: how many were damaged in
1917 count-objects-unrecoverable-pre-repair: how many were unrecoverable
1918 count-objects-healthy-post-repair: how many of those objects were
1919 completely healthy (after any
1921 count-objects-unhealthy-post-repair: how many were damaged in
1923 count-objects-unrecoverable-post-repair: how many were
1925 count-repairs-attempted: repairs were attempted on this many
1926 objects. The count-repairs- keys will
1927 always be provided, however unless
1928 repair=true is present, they will all
1930 count-repairs-successful: how many repairs resulted in healthy
1932 count-repairs-unsuccessful: how many repairs resulted did not
1933 results in completely healthy objects
1934 count-corrupt-shares-pre-repair: how many shares were found to
1935 have corruption, summed over all
1936 objects examined (before any
1938 count-corrupt-shares-post-repair: how many shares were found to
1939 have corruption, summed over all
1940 objects examined (after any
1945 """Return a dictionary with the same keys as
1946 IDirectoryNode.deep_stats()."""
1948 def get_corrupt_shares():
1949 """Return a set of (serverid, storage_index, sharenum) for all shares
1950 that were found to be corrupt before any repair was attempted. Both
1951 serverid and storage_index are binary.
1953 def get_remaining_corrupt_shares():
1954 """Return a set of (serverid, storage_index, sharenum) for all shares
1955 that were found to be corrupt after any repair was completed. Both
1956 serverid and storage_index are binary. These are shares that need
1957 manual inspection and probably deletion.
1959 def get_all_results():
1960 """Return a dictionary mapping pathname (a tuple of strings, ready to
1961 be slash-joined) to an ICheckAndRepairResults instance, one for each
1962 object that was checked."""
1964 def get_results_for_storage_index(storage_index):
1965 """Retrive the ICheckAndRepairResults instance for the given (binary)
1966 storage index. Raises KeyError if there are no results for that
1970 class IRepairable(Interface):
1971 def repair(check_results):
1972 """Attempt to repair the given object. Returns a Deferred that fires
1973 with a IRepairResults object.
1975 I must be called with an object that implements ICheckResults, as
1976 proof that you have actually discovered a problem with this file. I
1977 will use the data in the checker results to guide the repair process,
1978 such as which servers provided bad data and should therefore be
1979 avoided. The ICheckResults object is inside the
1980 ICheckAndRepairResults object, which is returned by the
1981 ICheckable.check() method::
1983 d = filenode.check(repair=False)
1984 def _got_results(check_and_repair_results):
1985 check_results = check_and_repair_results.get_pre_repair_results()
1986 return filenode.repair(check_results)
1987 d.addCallback(_got_results)
1991 class IRepairResults(Interface):
1992 """I contain the results of a repair operation."""
1995 class IClient(Interface):
1996 def upload(uploadable):
1997 """Upload some data into a CHK, get back the UploadResults for it.
1998 @param uploadable: something that implements IUploadable
1999 @return: a Deferred that fires with the UploadResults instance.
2000 To get the URI for this file, use results.uri .
2003 def create_mutable_file(contents=""):
2004 """Create a new mutable file with contents, get back the URI string.
2005 @param contents: the initial contents to place in the file.
2006 @return: a Deferred that fires with tne (string) SSK URI for the new
2010 def create_dirnode(initial_children={}):
2011 """Create a new unattached dirnode, possibly with initial children.
2013 @param initial_children: dict with keys that are unicode child names,
2014 and values that are (child_writecap, child_readcap, metadata) tuples.
2016 @return: a Deferred that fires with the new IDirectoryNode instance.
2019 def create_node_from_uri(uri, rouri):
2020 """Create a new IFilesystemNode instance from the uri, synchronously.
2021 @param uri: a string or IURI-providing instance, or None. This could
2022 be for a LiteralFileNode, a CHK file node, a mutable file
2023 node, or a directory node
2024 @param rouri: a string or IURI-providing instance, or None. If the
2025 main uri is None, I will use the rouri instead. If I
2026 recognize the format of the main uri, I will ignore the
2027 rouri (because it can be derived from the writecap).
2029 @return: an instance that provides IFilesystemNode (or more usefully
2030 one of its subclasses). File-specifying URIs will result in
2031 IFileNode or IMutableFileNode -providing instances, like
2032 FileNode, LiteralFileNode, or MutableFileNode.
2033 Directory-specifying URIs will result in
2034 IDirectoryNode-providing instances, like DirectoryNode.
2037 class IClientStatus(Interface):
2038 def list_all_uploads():
2039 """Return a list of uploader objects, one for each upload which
2040 currently has an object available (tracked with weakrefs). This is
2041 intended for debugging purposes."""
2042 def list_active_uploads():
2043 """Return a list of active IUploadStatus objects."""
2044 def list_recent_uploads():
2045 """Return a list of IUploadStatus objects for the most recently
2048 def list_all_downloads():
2049 """Return a list of downloader objects, one for each download which
2050 currently has an object available (tracked with weakrefs). This is
2051 intended for debugging purposes."""
2052 def list_active_downloads():
2053 """Return a list of active IDownloadStatus objects."""
2054 def list_recent_downloads():
2055 """Return a list of IDownloadStatus objects for the most recently
2056 started downloads."""
2058 class IUploadStatus(Interface):
2060 """Return a timestamp (float with seconds since epoch) indicating
2061 when the operation was started."""
2062 def get_storage_index():
2063 """Return a string with the (binary) storage index in use on this
2064 upload. Returns None if the storage index has not yet been
2067 """Return an integer with the number of bytes that will eventually
2068 be uploaded for this file. Returns None if the size is not yet known.
2071 """Return True if this upload is using a Helper, False if not."""
2073 """Return a string describing the current state of the upload
2076 """Returns a tuple of floats, (chk, ciphertext, encode_and_push),
2077 each from 0.0 to 1.0 . 'chk' describes how much progress has been
2078 made towards hashing the file to determine a CHK encryption key: if
2079 non-convergent encryption is in use, this will be trivial, otherwise
2080 the whole file must be hashed. 'ciphertext' describes how much of the
2081 ciphertext has been pushed to the helper, and is '1.0' for non-helper
2082 uploads. 'encode_and_push' describes how much of the encode-and-push
2083 process has finished: for helper uploads this is dependent upon the
2084 helper providing progress reports. It might be reasonable to add all
2085 three numbers and report the sum to the user."""
2087 """Return True if the upload is currently active, False if not."""
2089 """Return an instance of UploadResults (which contains timing and
2090 sharemap information). Might return None if the upload is not yet
2093 """Each upload status gets a unique number: this method returns that
2094 number. This provides a handle to this particular upload, so a web
2095 page can generate a suitable hyperlink."""
2097 class IDownloadStatus(Interface):
2099 """Return a timestamp (float with seconds since epoch) indicating
2100 when the operation was started."""
2101 def get_storage_index():
2102 """Return a string with the (binary) storage index in use on this
2103 download. This may be None if there is no storage index (i.e. LIT
2106 """Return an integer with the number of bytes that will eventually be
2107 retrieved for this file. Returns None if the size is not yet known.
2110 """Return True if this download is using a Helper, False if not."""
2112 """Return a string describing the current state of the download
2115 """Returns a float (from 0.0 to 1.0) describing the amount of the
2116 download that has completed. This value will remain at 0.0 until the
2117 first byte of plaintext is pushed to the download target."""
2119 """Return True if the download is currently active, False if not."""
2121 """Each download status gets a unique number: this method returns
2122 that number. This provides a handle to this particular download, so a
2123 web page can generate a suitable hyperlink."""
2125 class IServermapUpdaterStatus(Interface):
2127 class IPublishStatus(Interface):
2129 class IRetrieveStatus(Interface):
2132 class NotCapableError(Exception):
2133 """You have tried to write to a read-only node."""
2135 class BadWriteEnablerError(Exception):
2138 class RIControlClient(RemoteInterface):
2140 def wait_for_client_connections(num_clients=int):
2141 """Do not return until we have connections to at least NUM_CLIENTS
2145 def upload_from_file_to_uri(filename=str,
2146 convergence=ChoiceOf(None,
2147 StringConstraint(2**20))):
2148 """Upload a file to the grid. This accepts a filename (which must be
2149 absolute) that points to a file on the node's local disk. The node will
2150 read the contents of this file, upload it to the grid, then return the
2151 URI at which it was uploaded. If convergence is None then a random
2152 encryption key will be used, else the plaintext will be hashed, then
2153 that hash will be mixed together with the "convergence" string to form
2158 def download_from_uri_to_file(uri=URI, filename=str):
2159 """Download a file from the grid, placing it on the node's local disk
2160 at the given filename (which must be absolute[?]). Returns the
2161 absolute filename where the file was written."""
2166 def get_memory_usage():
2167 """Return a dict describes the amount of memory currently in use. The
2168 keys are 'VmPeak', 'VmSize', and 'VmData'. The values are integers,
2169 measuring memory consupmtion in bytes."""
2170 return DictOf(str, int)
2172 def speed_test(count=int, size=int, mutable=Any()):
2173 """Write 'count' tempfiles to disk, all of the given size. Measure
2174 how long (in seconds) it takes to upload them all to the servers.
2175 Then measure how long it takes to download all of them. If 'mutable'
2176 is 'create', time creation of mutable files. If 'mutable' is
2177 'upload', then time access to the same mutable file instead of
2180 Returns a tuple of (upload_time, download_time).
2182 return (float, float)
2184 def measure_peer_response_time():
2185 """Send a short message to each connected peer, and measure the time
2186 it takes for them to respond to it. This is a rough measure of the
2187 application-level round trip time.
2189 @return: a dictionary mapping peerid to a float (RTT time in seconds)
2192 return DictOf(Nodeid, float)
2194 UploadResults = Any() #DictOf(str, str)
2196 class RIEncryptedUploadable(RemoteInterface):
2197 __remote_name__ = "RIEncryptedUploadable.tahoe.allmydata.com"
2202 def get_all_encoding_parameters():
2203 return (int, int, int, long)
2205 def read_encrypted(offset=Offset, length=ReadSize):
2212 class RICHKUploadHelper(RemoteInterface):
2213 __remote_name__ = "RIUploadHelper.tahoe.allmydata.com"
2217 Return a dictionary of version information.
2219 return DictOf(str, Any())
2221 def upload(reader=RIEncryptedUploadable):
2222 return UploadResults
2225 class RIHelper(RemoteInterface):
2226 __remote_name__ = "RIHelper.tahoe.allmydata.com"
2230 Return a dictionary of version information.
2232 return DictOf(str, Any())
2234 def upload_chk(si=StorageIndex):
2235 """See if a file with a given storage index needs uploading. The
2236 helper will ask the appropriate storage servers to see if the file
2237 has already been uploaded. If so, the helper will return a set of
2238 'upload results' that includes whatever hashes are needed to build
2239 the read-cap, and perhaps a truncated sharemap.
2241 If the file has not yet been uploaded (or if it was only partially
2242 uploaded), the helper will return an empty upload-results dictionary
2243 and also an RICHKUploadHelper object that will take care of the
2244 upload process. The client should call upload() on this object and
2245 pass it a reference to an RIEncryptedUploadable object that will
2246 provide ciphertext. When the upload is finished, the upload() method
2247 will finish and return the upload results.
2249 return (UploadResults, ChoiceOf(RICHKUploadHelper, None))
2252 class RIStatsProvider(RemoteInterface):
2253 __remote_name__ = "RIStatsProvider.tahoe.allmydata.com"
2255 Provides access to statistics and monitoring information.
2260 returns a dictionary containing 'counters' and 'stats', each a
2261 dictionary with string counter/stat name keys, and numeric values.
2262 counters are monotonically increasing measures of work done, and
2263 stats are instantaneous measures (potentially time averaged
2266 return DictOf(str, DictOf(str, ChoiceOf(float, int, long)))
2268 class RIStatsGatherer(RemoteInterface):
2269 __remote_name__ = "RIStatsGatherer.tahoe.allmydata.com"
2271 Provides a monitoring service for centralised collection of stats
2274 def provide(provider=RIStatsProvider, nickname=str):
2276 @param provider: a stats collector instance which should be polled
2277 periodically by the gatherer to collect stats.
2278 @param nickname: a name useful to identify the provided client
2283 class IStatsProducer(Interface):
2286 returns a dictionary, with str keys representing the names of stats
2287 to be monitored, and numeric values.
2290 class RIKeyGenerator(RemoteInterface):
2291 __remote_name__ = "RIKeyGenerator.tahoe.allmydata.com"
2293 Provides a service offering to make RSA key pairs.
2296 def get_rsa_key_pair(key_size=int):
2298 @param key_size: the size of the signature key.
2299 @return: tuple(verifying_key, signing_key)
2301 return TupleOf(str, str)
2304 class FileTooLargeError(Exception):
2307 class IValidatedThingProxy(Interface):
2309 """ Acquire a thing and validate it. Return a deferred which is
2310 eventually fired with self if the thing is valid or errbacked if it
2311 can't be acquired or validated."""
2313 class InsufficientVersionError(Exception):
2314 def __init__(self, needed, got):
2315 self.needed = needed
2318 return "InsufficientVersionError(need '%s', got %s)" % (self.needed,