2 from zope.interface import Interface
3 from foolscap.api import StringConstraint, ListOf, TupleOf, SetOf, DictOf, \
4 ChoiceOf, IntegerConstraint, Any, RemoteInterface, Referenceable
8 Hash = StringConstraint(maxLength=HASH_SIZE,
9 minLength=HASH_SIZE)# binary format 32-byte SHA256 hash
10 Nodeid = StringConstraint(maxLength=20,
11 minLength=20) # binary format 20-byte SHA1 hash
12 FURL = StringConstraint(1000)
13 StorageIndex = StringConstraint(16)
14 URI = StringConstraint(300) # kind of arbitrary
16 MAX_BUCKETS = 256 # per peer -- zfec offers at most 256 shares per file
18 ShareData = StringConstraint(None)
19 URIExtensionData = StringConstraint(1000)
20 Number = IntegerConstraint(8) # 2**(8*8) == 16EiB ~= 18e18 ~= 18 exabytes
22 ReadSize = int # the 'int' constraint is 2**31 == 2Gib -- large files are processed in not-so-large increments
23 WriteEnablerSecret = Hash # used to protect mutable bucket modifications
24 LeaseRenewSecret = Hash # used to protect bucket lease renewal requests
25 LeaseCancelSecret = Hash # used to protect bucket lease cancellation requests
27 class RIStubClient(RemoteInterface):
28 """Each client publishes a service announcement for a dummy object called
29 the StubClient. This object doesn't actually offer any services, but the
30 announcement helps the Introducer keep track of which clients are
31 subscribed (so the grid admin can keep track of things like the size of
32 the grid and the client versions in use. This is the (empty)
33 RemoteInterface for the StubClient."""
35 class RIBucketWriter(RemoteInterface):
36 """ Objects of this kind live on the server side. """
37 def write(offset=Offset, data=ShareData):
42 If the data that has been written is incomplete or inconsistent then
43 the server will throw the data away, else it will store it for future
49 """Abandon all the data that has been written.
53 class RIBucketReader(RemoteInterface):
54 def read(offset=Offset, length=ReadSize):
57 def advise_corrupt_share(reason=str):
58 """Clients who discover hash failures in shares that they have
59 downloaded from me will use this method to inform me about the
60 failures. I will record their concern so that my operator can
61 manually inspect the shares in question. I return None.
63 This is a wrapper around RIStorageServer.advise_corrupt_share(),
64 which is tied to a specific share, and therefore does not need the
65 extra share-identifying arguments. Please see that method for full
69 TestVector = ListOf(TupleOf(Offset, ReadSize, str, str))
70 # elements are (offset, length, operator, specimen)
71 # operator is one of "lt, le, eq, ne, ge, gt"
72 # nop always passes and is used to fetch data while writing.
73 # you should use length==len(specimen) for everything except nop
74 DataVector = ListOf(TupleOf(Offset, ShareData))
75 # (offset, data). This limits us to 30 writes of 1MiB each per call
76 TestAndWriteVectorsForShares = DictOf(int,
79 ChoiceOf(None, Offset), # new_length
81 ReadVector = ListOf(TupleOf(Offset, ReadSize))
82 ReadData = ListOf(ShareData)
83 # returns data[offset:offset+length] for each element of TestVector
85 class RIStorageServer(RemoteInterface):
86 __remote_name__ = "RIStorageServer.tahoe.allmydata.com"
90 Return a dictionary of version information.
92 return DictOf(str, Any())
94 def allocate_buckets(storage_index=StorageIndex,
95 renew_secret=LeaseRenewSecret,
96 cancel_secret=LeaseCancelSecret,
97 sharenums=SetOf(int, maxLength=MAX_BUCKETS),
98 allocated_size=Offset, canary=Referenceable):
100 @param storage_index: the index of the bucket to be created or
102 @param sharenums: these are the share numbers (probably between 0 and
103 99) that the sender is proposing to store on this
105 @param renew_secret: This is the secret used to protect bucket refresh
106 This secret is generated by the client and
107 stored for later comparison by the server. Each
108 server is given a different secret.
109 @param cancel_secret: Like renew_secret, but protects bucket decref.
110 @param canary: If the canary is lost before close(), the bucket is
112 @return: tuple of (alreadygot, allocated), where alreadygot is what we
113 already have and allocated is what we hereby agree to accept.
114 New leases are added for shares in both lists.
116 return TupleOf(SetOf(int, maxLength=MAX_BUCKETS),
117 DictOf(int, RIBucketWriter, maxKeys=MAX_BUCKETS))
119 def add_lease(storage_index=StorageIndex,
120 renew_secret=LeaseRenewSecret,
121 cancel_secret=LeaseCancelSecret):
123 Add a new lease on the given bucket. If the renew_secret matches an
124 existing lease, that lease will be renewed instead. If there is no
125 bucket for the given storage_index, return silently. (note that in
126 tahoe-1.3.0 and earlier, IndexError was raised if there was no
129 return Any() # returns None now, but future versions might change
131 def renew_lease(storage_index=StorageIndex, renew_secret=LeaseRenewSecret):
133 Renew the lease on a given bucket, resetting the timer to 31 days.
134 Some networks will use this, some will not. If there is no bucket for
135 the given storage_index, IndexError will be raised.
137 For mutable shares, if the given renew_secret does not match an
138 existing lease, IndexError will be raised with a note listing the
139 server-nodeids on the existing leases, so leases on migrated shares
140 can be renewed or cancelled. For immutable shares, IndexError
141 (without the note) will be raised.
145 def cancel_lease(storage_index=StorageIndex,
146 cancel_secret=LeaseCancelSecret):
148 Cancel the lease on a given bucket. If this was the last lease on the
149 bucket, the bucket will be deleted. If there is no bucket for the
150 given storage_index, IndexError will be raised.
152 For mutable shares, if the given cancel_secret does not match an
153 existing lease, IndexError will be raised with a note listing the
154 server-nodeids on the existing leases, so leases on migrated shares
155 can be renewed or cancelled. For immutable shares, IndexError
156 (without the note) will be raised.
160 def get_buckets(storage_index=StorageIndex):
161 return DictOf(int, RIBucketReader, maxKeys=MAX_BUCKETS)
165 def slot_readv(storage_index=StorageIndex,
166 shares=ListOf(int), readv=ReadVector):
167 """Read a vector from the numbered shares associated with the given
168 storage index. An empty shares list means to return data from all
169 known shares. Returns a dictionary with one key per share."""
170 return DictOf(int, ReadData) # shnum -> results
172 def slot_testv_and_readv_and_writev(storage_index=StorageIndex,
173 secrets=TupleOf(WriteEnablerSecret,
176 tw_vectors=TestAndWriteVectorsForShares,
179 """General-purpose test-and-set operation for mutable slots. Perform
180 a bunch of comparisons against the existing shares. If they all pass,
181 then apply a bunch of write vectors to those shares. Then use the
182 read vectors to extract data from all the shares and return the data.
184 This method is, um, large. The goal is to allow clients to update all
185 the shares associated with a mutable file in a single round trip.
187 @param storage_index: the index of the bucket to be created or
189 @param write_enabler: a secret that is stored along with the slot.
190 Writes are accepted from any caller who can
191 present the matching secret. A different secret
192 should be used for each slot*server pair.
193 @param renew_secret: This is the secret used to protect bucket refresh
194 This secret is generated by the client and
195 stored for later comparison by the server. Each
196 server is given a different secret.
197 @param cancel_secret: Like renew_secret, but protects bucket decref.
199 The 'secrets' argument is a tuple of (write_enabler, renew_secret,
200 cancel_secret). The first is required to perform any write. The
201 latter two are used when allocating new shares. To simply acquire a
202 new lease on existing shares, use an empty testv and an empty writev.
204 Each share can have a separate test vector (i.e. a list of
205 comparisons to perform). If all vectors for all shares pass, then all
206 writes for all shares are recorded. Each comparison is a 4-tuple of
207 (offset, length, operator, specimen), which effectively does a bool(
208 (read(offset, length)) OPERATOR specimen ) and only performs the
209 write if all these evaluate to True. Basic test-and-set uses 'eq'.
210 Write-if-newer uses a seqnum and (offset, length, 'lt', specimen).
211 Write-if-same-or-newer uses 'le'.
213 Reads from the end of the container are truncated, and missing shares
214 behave like empty ones, so to assert that a share doesn't exist (for
215 use when creating a new share), use (0, 1, 'eq', '').
217 The write vector will be applied to the given share, expanding it if
218 necessary. A write vector applied to a share number that did not
219 exist previously will cause that share to be created.
221 Each write vector is accompanied by a 'new_length' argument. If
222 new_length is not None, use it to set the size of the container. This
223 can be used to pre-allocate space for a series of upcoming writes, or
224 truncate existing data. If the container is growing, new_length will
225 be applied before datav. If the container is shrinking, it will be
226 applied afterwards. If new_length==0, the share will be deleted.
228 The read vector is used to extract data from all known shares,
229 *before* any writes have been applied. The same vector is used for
230 all shares. This captures the state that was tested by the test
233 This method returns two values: a boolean and a dict. The boolean is
234 True if the write vectors were applied, False if not. The dict is
235 keyed by share number, and each value contains a list of strings, one
236 for each element of the read vector.
238 If the write_enabler is wrong, this will raise BadWriteEnablerError.
239 To enable share migration (using update_write_enabler), the exception
240 will have the nodeid used for the old write enabler embedded in it,
241 in the following string::
243 The write enabler was recorded by nodeid '%s'.
245 Note that the nodeid here is encoded using the same base32 encoding
246 used by Foolscap and allmydata.util.idlib.nodeid_b2a().
249 return TupleOf(bool, DictOf(int, ReadData))
251 def advise_corrupt_share(share_type=str, storage_index=StorageIndex,
252 shnum=int, reason=str):
253 """Clients who discover hash failures in shares that they have
254 downloaded from me will use this method to inform me about the
255 failures. I will record their concern so that my operator can
256 manually inspect the shares in question. I return None.
258 'share_type' is either 'mutable' or 'immutable'. 'storage_index' is a
259 (binary) storage index string, and 'shnum' is the integer share
260 number. 'reason' is a human-readable explanation of the problem,
261 probably including some expected hash values and the computed ones
262 which did not match. Corruption advisories for mutable shares should
263 include a hash of the public key (the same value that appears in the
264 mutable-file verify-cap), since the current share format does not
268 class IStorageBucketWriter(Interface):
270 Objects of this kind live on the client side.
272 def put_block(segmentnum=int, data=ShareData):
273 """@param data: For most segments, this data will be 'blocksize'
274 bytes in length. The last segment might be shorter.
275 @return: a Deferred that fires (with None) when the operation completes
278 def put_plaintext_hashes(hashes=ListOf(Hash)):
280 @return: a Deferred that fires (with None) when the operation completes
283 def put_crypttext_hashes(hashes=ListOf(Hash)):
285 @return: a Deferred that fires (with None) when the operation completes
288 def put_block_hashes(blockhashes=ListOf(Hash)):
290 @return: a Deferred that fires (with None) when the operation completes
293 def put_share_hashes(sharehashes=ListOf(TupleOf(int, Hash))):
295 @return: a Deferred that fires (with None) when the operation completes
298 def put_uri_extension(data=URIExtensionData):
299 """This block of data contains integrity-checking information (hashes
300 of plaintext, crypttext, and shares), as well as encoding parameters
301 that are necessary to recover the data. This is a serialized dict
302 mapping strings to other strings. The hash of this data is kept in
303 the URI and verified before any of the data is used. All buckets for
304 a given file contain identical copies of this data.
306 The serialization format is specified with the following pseudocode:
307 for k in sorted(dict.keys()):
308 assert re.match(r'^[a-zA-Z_\-]+$', k)
309 write(k + ':' + netstring(dict[k]))
311 @return: a Deferred that fires (with None) when the operation completes
315 """Finish writing and close the bucket. The share is not finalized
316 until this method is called: if the uploading client disconnects
317 before calling close(), the partially-written share will be
320 @return: a Deferred that fires (with None) when the operation completes
323 class IStorageBucketReader(Interface):
325 def get_block_data(blocknum=int, blocksize=int, size=int):
326 """Most blocks will be the same size. The last block might be shorter
332 def get_crypttext_hashes():
334 @return: ListOf(Hash)
337 def get_block_hashes(at_least_these=SetOf(int)):
339 @return: ListOf(Hash)
342 def get_share_hashes(at_least_these=SetOf(int)):
344 @return: ListOf(TupleOf(int, Hash))
347 def get_uri_extension():
349 @return: URIExtensionData
352 class IStorageBroker(Interface):
353 def get_servers_for_index(peer_selection_index):
355 @return: list of (peerid, versioned-rref) tuples
357 def get_all_servers():
359 @return: frozenset of (peerid, versioned-rref) tuples
361 def get_all_serverids():
363 @return: frozenset of serverid strings
365 def get_nickname_for_serverid(serverid):
367 @return: unicode nickname, or None
370 # methods moved from IntroducerClient, need review
371 def get_all_connections():
372 """Return a frozenset of (nodeid, service_name, rref) tuples, one for
373 each active connection we've established to a remote service. This is
374 mostly useful for unit tests that need to wait until a certain number
375 of connections have been made."""
377 def get_all_connectors():
378 """Return a dict that maps from (nodeid, service_name) to a
379 RemoteServiceConnector instance for all services that we are actively
380 trying to connect to. Each RemoteServiceConnector has the following
383 service_name: the type of service provided, like 'storage'
384 announcement_time: when we first heard about this service
385 last_connect_time: when we last established a connection
386 last_loss_time: when we last lost a connection
388 version: the peer's version, from the most recent connection
389 oldest_supported: the peer's oldest supported version, same
391 rref: the RemoteReference, if connected, otherwise None
392 remote_host: the IAddress, if connected, otherwise None
394 This method is intended for monitoring interfaces, such as a web page
395 which describes connecting and connected peers.
398 def get_all_peerids():
399 """Return a frozenset of all peerids to whom we have a connection (to
400 one or more services) established. Mostly useful for unit tests."""
402 def get_all_connections_for(service_name):
403 """Return a frozenset of (nodeid, service_name, rref) tuples, one
404 for each active connection that provides the given SERVICE_NAME."""
406 def get_permuted_peers(service_name, key):
407 """Returns an ordered list of (peerid, rref) tuples, selecting from
408 the connections that provide SERVICE_NAME, using a hash-based
409 permutation keyed by KEY. This randomizes the service list in a
410 repeatable way, to distribute load over many peers.
414 class IURI(Interface):
415 def init_from_string(uri):
416 """Accept a string (as created by my to_string() method) and populate
417 this instance with its data. I am not normally called directly,
418 please use the module-level uri.from_string() function to convert
419 arbitrary URI strings into IURI-providing instances."""
422 """Return False if this URI be used to modify the data. Return True
423 if this URI cannot be used to modify the data."""
426 """Return True if the data can be modified by *somebody* (perhaps
427 someone who has a more powerful URI than this one)."""
430 """Return another IURI instance, which represents a read-only form of
431 this one. If is_readonly() is True, this returns self."""
433 def get_verify_cap():
434 """Return an instance that provides IVerifierURI, which can be used
435 to check on the availability of the file or directory, without
436 providing enough capabilities to actually read or modify the
437 contents. This may return None if the file does not need checking or
438 verification (e.g. LIT URIs).
442 """Return a string of printable ASCII characters, suitable for
443 passing into init_from_string."""
445 class IVerifierURI(Interface, IURI):
446 def init_from_string(uri):
447 """Accept a string (as created by my to_string() method) and populate
448 this instance with its data. I am not normally called directly,
449 please use the module-level uri.from_string() function to convert
450 arbitrary URI strings into IURI-providing instances."""
453 """Return a string of printable ASCII characters, suitable for
454 passing into init_from_string."""
456 class IDirnodeURI(Interface):
457 """I am a URI which represents a dirnode."""
460 class IFileURI(Interface):
461 """I am a URI which represents a filenode."""
463 """Return the length (in bytes) of the file that I represent."""
465 class IImmutableFileURI(IFileURI):
468 class IMutableFileURI(Interface):
469 """I am a URI which represents a mutable filenode."""
470 class IDirectoryURI(Interface):
472 class IReadonlyDirectoryURI(Interface):
475 class CannotPackUnknownNodeError(Exception):
476 """UnknownNodes (using filecaps from the future that we don't understand)
477 cannot yet be copied safely, so I refuse to copy them."""
479 class UnhandledCapTypeError(Exception):
480 """I recognize the cap/URI, but I cannot create an IFilesystemNode for
483 class NotDeepImmutableError(Exception):
484 """Deep-immutable directories can only contain deep-immutable children"""
486 class IFilesystemNode(Interface):
488 """Return the strongest 'cap instance' associated with this node.
489 (writecap for writeable-mutable files/directories, readcap for
490 immutable or readonly-mutable files/directories). To convert this
491 into a string, call .to_string() on the result."""
494 """Return a readonly cap instance for this node. For immutable or
495 readonly nodes, get_cap() and get_readcap() return the same thing."""
497 def get_repair_cap():
498 """Return an IURI instance that can be used to repair the file, or
499 None if this node cannot be repaired (either because it is not
500 distributed, like a LIT file, or because the node does not represent
501 sufficient authority to create a repair-cap, like a read-only RSA
502 mutable file node [which cannot create the correct write-enablers]).
505 def get_verify_cap():
506 """Return an IVerifierURI instance that represents the
507 'verifiy/refresh capability' for this node. The holder of this
508 capability will be able to renew the lease for this node, protecting
509 it from garbage-collection. They will also be able to ask a server if
510 it holds a share for the file or directory.
515 Return the URI string that can be used by others to get access to
516 this node. If this node is read-only, the URI will only offer
517 read-only access. If this node is read-write, the URI will offer
520 If you have read-write access to a node and wish to share merely
521 read-only access with others, use get_readonly_uri().
524 def get_readonly_uri():
525 """Return the URI string that can be used by others to get read-only
526 access to this node. The result is a read-only URI, regardless of
527 whether this node is read-only or read-write.
529 If you have merely read-only access to this node, get_readonly_uri()
530 will return the same thing as get_uri().
533 def get_storage_index():
534 """Return a string with the (binary) storage index in use on this
535 download. This may be None if there is no storage index (i.e. LIT
539 """Return True if this reference provides mutable access to the given
540 file or directory (i.e. if you can modify it), or False if not. Note
541 that even if this reference is read-only, someone else may hold a
542 read-write reference to it."""
545 """Return True if this file or directory is mutable (by *somebody*,
546 not necessarily you), False if it is is immutable. Note that a file
547 might be mutable overall, but your reference to it might be
548 read-only. On the other hand, all references to an immutable file
549 will be read-only; there are no read-write references to an immutable
553 class IMutableFilesystemNode(IFilesystemNode):
556 class IFileNode(IFilesystemNode):
557 def download(target):
558 """Download the file's contents to a given IDownloadTarget"""
560 def download_to_data():
561 """Download the file's contents. Return a Deferred that fires
562 with those contents."""
565 """Return the length (in bytes) of the data this node represents."""
567 def read(consumer, offset=0, size=None):
568 """Download a portion (possibly all) of the file's contents, making
569 them available to the given IConsumer. Return a Deferred that fires
570 (with the consumer) when the consumer is unregistered (either because
571 the last byte has been given to it, or because the consumer threw an
572 exception during write(), possibly because it no longer wants to
573 receive data). The portion downloaded will start at 'offset' and
574 contain 'size' bytes (or the remainder of the file if size==None).
576 The consumer will be used in non-streaming mode: an IPullProducer
577 will be attached to it.
579 The consumer will not receive data right away: several network trips
580 must occur first. The order of events will be::
582 consumer.registerProducer(p, streaming)
583 (if streaming == False)::
584 consumer does p.resumeProducing()
586 consumer does p.resumeProducing()
587 consumer.write(data).. (repeat until all data is written)
588 consumer.unregisterProducer()
589 deferred.callback(consumer)
591 If a download error occurs, or an exception is raised by
592 consumer.registerProducer() or consumer.write(), I will call
593 consumer.unregisterProducer() and then deliver the exception via
594 deferred.errback(). To cancel the download, the consumer should call
595 p.stopProducing(), which will result in an exception being delivered
596 via deferred.errback().
598 A simple download-to-memory consumer example would look like this::
600 class MemoryConsumer:
601 implements(IConsumer)
605 def registerProducer(self, p, streaming):
606 assert streaming == False
609 def write(self, data):
610 self.chunks.append(data)
611 def unregisterProducer(self):
613 d = filenode.read(MemoryConsumer())
614 d.addCallback(lambda mc: "".join(mc.chunks))
619 class IMutableFileNode(IFileNode, IMutableFilesystemNode):
620 """I provide access to a 'mutable file', which retains its identity
621 regardless of what contents are put in it.
623 The consistency-vs-availability problem means that there might be
624 multiple versions of a file present in the grid, some of which might be
625 unrecoverable (i.e. have fewer than 'k' shares). These versions are
626 loosely ordered: each has a sequence number and a hash, and any version
627 with seqnum=N was uploaded by a node which has seen at least one version
630 The 'servermap' (an instance of IMutableFileServerMap) is used to
631 describe the versions that are known to be present in the grid, and which
632 servers are hosting their shares. It is used to represent the 'state of
633 the world', and is used for this purpose by my test-and-set operations.
634 Downloading the contents of the mutable file will also return a
635 servermap. Uploading a new version into the mutable file requires a
636 servermap as input, and the semantics of the replace operation is
637 'replace the file with my new version if it looks like nobody else has
638 changed the file since my previous download'. Because the file is
639 distributed, this is not a perfect test-and-set operation, but it will do
640 its best. If the replace process sees evidence of a simultaneous write,
641 it will signal an UncoordinatedWriteError, so that the caller can take
645 Most readers will want to use the 'best' current version of the file, and
646 should use my 'download_best_version()' method.
648 To unconditionally replace the file, callers should use overwrite(). This
649 is the mode that user-visible mutable files will probably use.
651 To apply some delta to the file, call modify() with a callable modifier
652 function that can apply the modification that you want to make. This is
653 the mode that dirnodes will use, since most directory modification
654 operations can be expressed in terms of deltas to the directory state.
657 Three methods are available for users who need to perform more complex
658 operations. The first is get_servermap(), which returns an up-to-date
659 servermap using a specified mode. The second is download_version(), which
660 downloads a specific version (not necessarily the 'best' one). The third
661 is 'upload', which accepts new contents and a servermap (which must have
662 been updated with MODE_WRITE). The upload method will attempt to apply
663 the new contents as long as no other node has modified the file since the
664 servermap was updated. This might be useful to a caller who wants to
665 merge multiple versions into a single new one.
667 Note that each time the servermap is updated, a specific 'mode' is used,
668 which determines how many peers are queried. To use a servermap for my
669 replace() method, that servermap must have been updated in MODE_WRITE.
670 These modes are defined in allmydata.mutable.common, and consist of
671 MODE_READ, MODE_WRITE, MODE_ANYTHING, and MODE_CHECK. Please look in
672 allmydata/mutable/servermap.py for details about the differences.
674 Mutable files are currently limited in size (about 3.5MB max) and can
675 only be retrieved and updated all-at-once, as a single big string. Future
676 versions of our mutable files will remove this restriction.
679 def download_best_version():
680 """Download the 'best' available version of the file, meaning one of
681 the recoverable versions with the highest sequence number. If no
682 uncoordinated writes have occurred, and if enough shares are
683 available, then this will be the most recent version that has been
686 I update an internal servermap with MODE_READ, determine which
687 version of the file is indicated by
688 servermap.best_recoverable_version(), and return a Deferred that
689 fires with its contents. If no version is recoverable, the Deferred
690 will errback with UnrecoverableFileError.
693 def get_size_of_best_version():
694 """Find the size of the version that would be downloaded with
695 download_best_version(), without actually downloading the whole file.
697 I return a Deferred that fires with an integer.
700 def overwrite(new_contents):
701 """Unconditionally replace the contents of the mutable file with new
702 ones. This simply chains get_servermap(MODE_WRITE) and upload(). This
703 is only appropriate to use when the new contents of the file are
704 completely unrelated to the old ones, and you do not care about other
707 I return a Deferred that fires (with a PublishStatus object) when the
708 update has completed.
711 def modify(modifier_cb):
712 """Modify the contents of the file, by downloading the current
713 version, applying the modifier function (or bound method), then
714 uploading the new version. I return a Deferred that fires (with a
715 PublishStatus object) when the update is complete.
717 The modifier callable will be given three arguments: a string (with
718 the old contents), a 'first_time' boolean, and a servermap. As with
719 download_best_version(), the old contents will be from the best
720 recoverable version, but the modifier can use the servermap to make
721 other decisions (such as refusing to apply the delta if there are
722 multiple parallel versions, or if there is evidence of a newer
723 unrecoverable version). 'first_time' will be True the first time the
724 modifier is called, and False on any subsequent calls.
726 The callable should return a string with the new contents. The
727 callable must be prepared to be called multiple times, and must
728 examine the input string to see if the change that it wants to make
729 is already present in the old version. If it does not need to make
730 any changes, it can either return None, or return its input string.
732 If the modifier raises an exception, it will be returned in the
737 def get_servermap(mode):
738 """Return a Deferred that fires with an IMutableFileServerMap
739 instance, updated using the given mode.
742 def download_version(servermap, version):
743 """Download a specific version of the file, using the servermap
744 as a guide to where the shares are located.
746 I return a Deferred that fires with the requested contents, or
747 errbacks with UnrecoverableFileError. Note that a servermap which was
748 updated with MODE_ANYTHING or MODE_READ may not know about shares for
749 all versions (those modes stop querying servers as soon as they can
750 fulfil their goals), so you may want to use MODE_CHECK (which checks
751 everything) to get increased visibility.
754 def upload(new_contents, servermap):
755 """Replace the contents of the file with new ones. This requires a
756 servermap that was previously updated with MODE_WRITE.
758 I attempt to provide test-and-set semantics, in that I will avoid
759 modifying any share that is different than the version I saw in the
760 servermap. However, if another node is writing to the file at the
761 same time as me, I may manage to update some shares while they update
762 others. If I see any evidence of this, I will signal
763 UncoordinatedWriteError, and the file will be left in an inconsistent
764 state (possibly the version you provided, possibly the old version,
765 possibly somebody else's version, and possibly a mix of shares from
768 The recommended response to UncoordinatedWriteError is to either
769 return it to the caller (since they failed to coordinate their
770 writes), or to attempt some sort of recovery. It may be sufficient to
771 wait a random interval (with exponential backoff) and repeat your
772 operation. If I do not signal UncoordinatedWriteError, then I was
773 able to write the new version without incident.
775 I return a Deferred that fires (with a PublishStatus object) when the
776 publish has completed. I will update the servermap in-place with the
777 location of all new shares.
781 """Return this filenode's writekey, or None if the node does not have
782 write-capability. This may be used to assist with data structures
783 that need to make certain data available only to writers, such as the
784 read-write child caps in dirnodes. The recommended process is to have
785 reader-visible data be submitted to the filenode in the clear (where
786 it will be encrypted by the filenode using the readkey), but encrypt
787 writer-visible data using this writekey.
790 class NotEnoughSharesError(Exception):
791 """Download was unable to get enough shares, or upload was unable to
792 place 'shares_of_happiness' shares."""
794 class NoSharesError(Exception):
795 """Upload or Download was unable to get any shares at all."""
797 class UnableToFetchCriticalDownloadDataError(Exception):
798 """I was unable to fetch some piece of critical data which is supposed to
799 be identically present in all shares."""
801 class NoServersError(Exception):
802 """Upload wasn't given any servers to work with, usually indicating a
803 network or Introducer problem."""
805 class ExistingChildError(Exception):
806 """A directory node was asked to add or replace a child that already
807 exists, and overwrite= was set to False."""
809 class NoSuchChildError(Exception):
810 """A directory node was asked to fetch a child which does not exist."""
812 class IDirectoryNode(IMutableFilesystemNode):
813 """I represent a name-to-child mapping, holding the tahoe equivalent of a
814 directory. All child names are unicode strings, and all children are some
815 sort of IFilesystemNode (either files or subdirectories).
820 The dirnode ('1') URI returned by this method can be used in
821 set_uri() on a different directory ('2') to 'mount' a reference to
822 this directory ('1') under the other ('2'). This URI is just a
823 string, so it can be passed around through email or other out-of-band
827 def get_readonly_uri():
829 The dirnode ('1') URI returned by this method can be used in
830 set_uri() on a different directory ('2') to 'mount' a reference to
831 this directory ('1') under the other ('2'). This URI is just a
832 string, so it can be passed around through email or other out-of-band
837 """I return a Deferred that fires with a dictionary mapping child
838 name (a unicode string) to (node, metadata_dict) tuples, in which
839 'node' is either an IFileNode or IDirectoryNode, and 'metadata_dict'
840 is a dictionary of metadata."""
843 """I return a Deferred that fires with a boolean, True if there
844 exists a child of the given name, False if not. The child name must
845 be a unicode string."""
848 """I return a Deferred that fires with a specific named child node,
849 either an IFileNode or an IDirectoryNode. The child name must be a
850 unicode string. I raise NoSuchChildError if I do not have a child by
853 def get_metadata_for(name):
854 """I return a Deferred that fires with the metadata dictionary for a
855 specific named child node. This metadata is stored in the *edge*, not
856 in the child, so it is attached to the parent dirnode rather than the
857 child dir-or-file-node. The child name must be a unicode string. I
858 raise NoSuchChildError if I do not have a child by that name."""
860 def set_metadata_for(name, metadata):
861 """I replace any existing metadata for the named child with the new
862 metadata. The child name must be a unicode string. This metadata is
863 stored in the *edge*, not in the child, so it is attached to the
864 parent dirnode rather than the child dir-or-file-node. I return a
865 Deferred (that fires with this dirnode) when the operation is
866 complete. I raise NoSuchChildError if I do not have a child by that
869 def get_child_at_path(path):
870 """Transform a child path into an IDirectoryNode or IFileNode.
872 I perform a recursive series of 'get' operations to find the named
873 descendant node. I return a Deferred that fires with the node, or
874 errbacks with NoSuchChildError if the node could not be found.
876 The path can be either a single string (slash-separated) or a list of
877 path-name elements. All elements must be unicode strings.
880 def get_child_and_metadata_at_path(path):
881 """Transform a child path into an IDirectoryNode/IFileNode and
884 I am like get_child_at_path(), but my Deferred fires with a tuple of
885 (node, metadata). The metadata comes from the last edge. If the path
886 is empty, the metadata will be an empty dictionary.
889 def set_uri(name, writecap, readcap=None, metadata=None, overwrite=True):
890 """I add a child (by writecap+readcap) at the specific name. I return
891 a Deferred that fires when the operation finishes. If overwrite= is
892 True, I will replace any existing child of the same name, otherwise
893 an existing child will cause me to return ExistingChildError. The
894 child name must be a unicode string.
896 The child caps could be for a file, or for a directory. If the new
897 child is read/write, you will provide both writecap and readcap. If
898 the child is read-only, you will provide the readcap write (i.e. the
899 writecap= and readcap= arguments will both be the child's readcap).
900 The filecaps are typically obtained from an IFilesystemNode with
901 get_uri() and get_readonly_uri().
903 If metadata= is provided, I will use it as the metadata for the named
904 edge. This will replace any existing metadata. If metadata= is left
905 as the default value of None, I will set ['mtime'] to the current
906 time, and I will set ['ctime'] to the current time if there was not
907 already a child by this name present. This roughly matches the
908 ctime/mtime semantics of traditional filesystems.
910 If this directory node is read-only, the Deferred will errback with a
913 def set_children(entries, overwrite=True):
914 """Add multiple children (by writecap+readcap) to a directory node.
915 Takes a dictionary, with childname as keys and (writecap, readcap)
916 tuples (or (writecap, readcap, metadata) triples) as values. Returns
917 a Deferred that fires (with this dirnode) when the operation
918 finishes. This is equivalent to calling set_uri() multiple times, but
919 is much more efficient. All child names must be unicode strings.
922 def set_node(name, child, metadata=None, overwrite=True):
923 """I add a child at the specific name. I return a Deferred that fires
924 when the operation finishes. This Deferred will fire with the child
925 node that was just added. I will replace any existing child of the
926 same name. The child name must be a unicode string. The 'child'
927 instance must be an instance providing IDirectoryNode or IFileNode.
929 If metadata= is provided, I will use it as the metadata for the named
930 edge. This will replace any existing metadata. If metadata= is left
931 as the default value of None, I will set ['mtime'] to the current
932 time, and I will set ['ctime'] to the current time if there was not
933 already a child by this name present. This roughly matches the
934 ctime/mtime semantics of traditional filesystems.
936 If this directory node is read-only, the Deferred will errback with a
939 def set_nodes(entries, overwrite=True):
940 """Add multiple children to a directory node. Takes a dict mapping
941 unicode childname to (child_node, metdata) tuples. If metdata=None,
942 the original metadata is left unmodified. Returns a Deferred that
943 fires (with this dirnode) when the operation finishes. This is
944 equivalent to calling set_node() multiple times, but is much more
947 def add_file(name, uploadable, metadata=None, overwrite=True):
948 """I upload a file (using the given IUploadable), then attach the
949 resulting FileNode to the directory at the given name. I set metadata
950 the same way as set_uri and set_node. The child name must be a
953 I return a Deferred that fires (with the IFileNode of the uploaded
954 file) when the operation completes."""
957 """I remove the child at the specific name. I return a Deferred that
958 fires when the operation finishes. The child name must be a unicode
959 string. I raise NoSuchChildError if I do not have a child by that
962 def create_subdirectory(name, initial_children={}, overwrite=True):
963 """I create and attach a directory at the given name. The new
964 directory can be empty, or it can be populated with children
965 according to 'initial_children', which takes a dictionary in the same
966 format as set_nodes (i.e. mapping unicode child name to (childnode,
967 metadata) tuples). The child name must be a unicode string. I return
968 a Deferred that fires (with the new directory node) when the
969 operation finishes."""
971 def move_child_to(current_child_name, new_parent, new_child_name=None,
973 """I take one of my children and move them to a new parent. The child
974 is referenced by name. On the new parent, the child will live under
975 'new_child_name', which defaults to 'current_child_name'. TODO: what
976 should we do about metadata? I return a Deferred that fires when the
977 operation finishes. The child name must be a unicode string. I raise
978 NoSuchChildError if I do not have a child by that name."""
980 def build_manifest():
981 """I generate a table of everything reachable from this directory.
982 I also compute deep-stats as described below.
984 I return a Monitor. The Monitor's results will be a dictionary with
987 res['manifest']: a list of (path, cap) tuples for all nodes
988 (directories and files) reachable from this one.
989 'path' will be a tuple of unicode strings. The
990 origin dirnode will be represented by an empty path
992 res['verifycaps']: a list of (printable) verifycap strings, one for
993 each reachable non-LIT node. This is a set:
994 it will contain no duplicates.
995 res['storage-index']: a list of (base32) storage index strings,
996 one for each reachable non-LIT node. This is
997 a set: it will contain no duplicates.
998 res['stats']: a dictionary, the same that is generated by
999 start_deep_stats() below.
1001 The Monitor will also have an .origin_si attribute with the (binary)
1002 storage index of the starting point.
1005 def start_deep_stats():
1006 """Return a Monitor, examining all nodes (directories and files)
1007 reachable from this one. The Monitor's results will be a dictionary
1008 with the following keys::
1010 count-immutable-files: count of how many CHK files are in the set
1011 count-mutable-files: same, for mutable files (does not include
1013 count-literal-files: same, for LIT files
1014 count-files: sum of the above three
1016 count-directories: count of directories
1018 size-immutable-files: total bytes for all CHK files in the set
1019 size-mutable-files (TODO): same, for current version of all mutable
1020 files, does not include directories
1021 size-literal-files: same, for LIT files
1022 size-directories: size of mutable files used by directories
1024 largest-directory: number of bytes in the largest directory
1025 largest-directory-children: number of children in the largest
1027 largest-immutable-file: number of bytes in the largest CHK file
1029 size-mutable-files is not yet implemented, because it would involve
1030 even more queries than deep_stats does.
1032 The Monitor will also have an .origin_si attribute with the (binary)
1033 storage index of the starting point.
1035 This operation will visit every directory node underneath this one,
1036 and can take a long time to run. On a typical workstation with good
1037 bandwidth, this can examine roughly 15 directories per second (and
1038 takes several minutes of 100% CPU for ~1700 directories).
1041 class ICodecEncoder(Interface):
1042 def set_params(data_size, required_shares, max_shares):
1043 """Set up the parameters of this encoder.
1045 This prepares the encoder to perform an operation that converts a
1046 single block of data into a number of shares, such that a future
1047 ICodecDecoder can use a subset of these shares to recover the
1048 original data. This operation is invoked by calling encode(). Once
1049 the encoding parameters are set up, the encode operation can be
1050 invoked multiple times.
1052 set_params() prepares the encoder to accept blocks of input data that
1053 are exactly 'data_size' bytes in length. The encoder will be prepared
1054 to produce 'max_shares' shares for each encode() operation (although
1055 see the 'desired_share_ids' to use less CPU). The encoding math will
1056 be chosen such that the decoder can get by with as few as
1057 'required_shares' of these shares and still reproduce the original
1058 data. For example, set_params(1000, 5, 5) offers no redundancy at
1059 all, whereas set_params(1000, 1, 10) provides 10x redundancy.
1061 Numerical Restrictions: 'data_size' is required to be an integral
1062 multiple of 'required_shares'. In general, the caller should choose
1063 required_shares and max_shares based upon their reliability
1064 requirements and the number of peers available (the total storage
1065 space used is roughly equal to max_shares*data_size/required_shares),
1066 then choose data_size to achieve the memory footprint desired (larger
1067 data_size means more efficient operation, smaller data_size means
1068 smaller memory footprint).
1070 In addition, 'max_shares' must be equal to or greater than
1071 'required_shares'. Of course, setting them to be equal causes
1072 encode() to degenerate into a particularly slow form of the 'split'
1075 See encode() for more details about how these parameters are used.
1077 set_params() must be called before any other ICodecEncoder methods
1082 """Return the 3-tuple of data_size, required_shares, max_shares"""
1084 def get_encoder_type():
1085 """Return a short string that describes the type of this encoder.
1087 There is required to be a global table of encoder classes. This method
1088 returns an index into this table; the value at this index is an
1089 encoder class, and this encoder is an instance of that class.
1092 def get_block_size():
1093 """Return the length of the shares that encode() will produce.
1096 def encode_proposal(data, desired_share_ids=None):
1097 """Encode some data.
1099 'data' must be a string (or other buffer object), and len(data) must
1100 be equal to the 'data_size' value passed earlier to set_params().
1102 This will return a Deferred that will fire with two lists. The first
1103 is a list of shares, each of which is a string (or other buffer
1104 object) such that len(share) is the same as what get_share_size()
1105 returned earlier. The second is a list of shareids, in which each is
1106 an integer. The lengths of the two lists will always be equal to each
1107 other. The user should take care to keep each share closely
1108 associated with its shareid, as one is useless without the other.
1110 The length of this output list will normally be the same as the value
1111 provided to the 'max_shares' parameter of set_params(). This may be
1112 different if 'desired_share_ids' is provided.
1114 'desired_share_ids', if provided, is required to be a sequence of
1115 ints, each of which is required to be >= 0 and < max_shares. If not
1116 provided, encode() will produce 'max_shares' shares, as if
1117 'desired_share_ids' were set to range(max_shares). You might use this
1118 if you initially thought you were going to use 10 peers, started
1119 encoding, and then two of the peers dropped out: you could use
1120 desired_share_ids= to skip the work (both memory and CPU) of
1121 producing shares for the peers which are no longer available.
1125 def encode(inshares, desired_share_ids=None):
1126 """Encode some data. This may be called multiple times. Each call is
1129 inshares is a sequence of length required_shares, containing buffers
1130 (i.e. strings), where each buffer contains the next contiguous
1131 non-overlapping segment of the input data. Each buffer is required to
1132 be the same length, and the sum of the lengths of the buffers is
1133 required to be exactly the data_size promised by set_params(). (This
1134 implies that the data has to be padded before being passed to
1135 encode(), unless of course it already happens to be an even multiple
1136 of required_shares in length.)
1138 ALSO: the requirement to break up your data into 'required_shares'
1139 chunks before calling encode() feels a bit surprising, at least from
1140 the point of view of a user who doesn't know how FEC works. It feels
1141 like an implementation detail that has leaked outside the
1142 abstraction barrier. Can you imagine a use case in which the data to
1143 be encoded might already be available in pre-segmented chunks, such
1144 that it is faster or less work to make encode() take a list rather
1145 than splitting a single string?
1147 ALSO ALSO: I think 'inshares' is a misleading term, since encode()
1148 is supposed to *produce* shares, so what it *accepts* should be
1149 something other than shares. Other places in this interface use the
1150 word 'data' for that-which-is-not-shares.. maybe we should use that
1153 'desired_share_ids', if provided, is required to be a sequence of
1154 ints, each of which is required to be >= 0 and < max_shares. If not
1155 provided, encode() will produce 'max_shares' shares, as if
1156 'desired_share_ids' were set to range(max_shares). You might use this
1157 if you initially thought you were going to use 10 peers, started
1158 encoding, and then two of the peers dropped out: you could use
1159 desired_share_ids= to skip the work (both memory and CPU) of
1160 producing shares for the peers which are no longer available.
1162 For each call, encode() will return a Deferred that fires with two
1163 lists, one containing shares and the other containing the shareids.
1164 The get_share_size() method can be used to determine the length of
1165 the share strings returned by encode(). Each shareid is a small
1166 integer, exactly as passed into 'desired_share_ids' (or
1167 range(max_shares), if desired_share_ids was not provided).
1169 The shares and their corresponding shareids are required to be kept
1170 together during storage and retrieval. Specifically, the share data is
1171 useless by itself: the decoder needs to be told which share is which
1172 by providing it with both the shareid and the actual share data.
1174 This function will allocate an amount of memory roughly equal to::
1176 (max_shares - required_shares) * get_share_size()
1178 When combined with the memory that the caller must allocate to
1179 provide the input data, this leads to a memory footprint roughly
1180 equal to the size of the resulting encoded shares (i.e. the expansion
1181 factor times the size of the input segment).
1186 # returning a list of (shareidN,shareN) tuples instead of a pair of
1187 # lists (shareids..,shares..). Brian thought the tuples would
1188 # encourage users to keep the share and shareid together throughout
1189 # later processing, Zooko pointed out that the code to iterate
1190 # through two lists is not really more complicated than using a list
1191 # of tuples and there's also a performance improvement
1193 # having 'data_size' not required to be an integral multiple of
1194 # 'required_shares'. Doing this would require encode() to perform
1195 # padding internally, and we'd prefer to have any padding be done
1196 # explicitly by the caller. Yes, it is an abstraction leak, but
1197 # hopefully not an onerous one.
1200 class ICodecDecoder(Interface):
1201 def set_params(data_size, required_shares, max_shares):
1202 """Set the params. They have to be exactly the same ones that were
1203 used for encoding."""
1205 def get_needed_shares():
1206 """Return the number of shares needed to reconstruct the data.
1207 set_params() is required to be called before this."""
1209 def decode(some_shares, their_shareids):
1210 """Decode a partial list of shares into data.
1212 'some_shares' is required to be a sequence of buffers of sharedata, a
1213 subset of the shares returned by ICodecEncode.encode(). Each share is
1214 required to be of the same length. The i'th element of their_shareids
1215 is required to be the shareid of the i'th buffer in some_shares.
1217 This returns a Deferred which fires with a sequence of buffers. This
1218 sequence will contain all of the segments of the original data, in
1219 order. The sum of the lengths of all of the buffers will be the
1220 'data_size' value passed into the original ICodecEncode.set_params()
1221 call. To get back the single original input block of data, use
1222 ''.join(output_buffers), or you may wish to simply write them in
1223 order to an output file.
1225 Note that some of the elements in the result sequence may be
1226 references to the elements of the some_shares input sequence. In
1227 particular, this means that if those share objects are mutable (e.g.
1228 arrays) and if they are changed, then both the input (the
1229 'some_shares' parameter) and the output (the value given when the
1230 deferred is triggered) will change.
1232 The length of 'some_shares' is required to be exactly the value of
1233 'required_shares' passed into the original ICodecEncode.set_params()
1237 class IEncoder(Interface):
1238 """I take an object that provides IEncryptedUploadable, which provides
1239 encrypted data, and a list of shareholders. I then encode, hash, and
1240 deliver shares to those shareholders. I will compute all the necessary
1241 Merkle hash trees that are necessary to validate the crypttext that
1242 eventually comes back from the shareholders. I provide the URI Extension
1243 Block Hash, and the encoding parameters, both of which must be included
1246 I do not choose shareholders, that is left to the IUploader. I must be
1247 given a dict of RemoteReferences to storage buckets that are ready and
1248 willing to receive data.
1252 """Specify the number of bytes that will be encoded. This must be
1253 peformed before get_serialized_params() can be called.
1255 def set_params(params):
1256 """Override the default encoding parameters. 'params' is a tuple of
1257 (k,d,n), where 'k' is the number of required shares, 'd' is the
1258 shares_of_happiness, and 'n' is the total number of shares that will
1261 Encoding parameters can be set in three ways. 1: The Encoder class
1262 provides defaults (3/7/10). 2: the Encoder can be constructed with
1263 an 'options' dictionary, in which the
1264 needed_and_happy_and_total_shares' key can be a (k,d,n) tuple. 3:
1265 set_params((k,d,n)) can be called.
1267 If you intend to use set_params(), you must call it before
1268 get_share_size or get_param are called.
1271 def set_encrypted_uploadable(u):
1272 """Provide a source of encrypted upload data. 'u' must implement
1273 IEncryptedUploadable.
1275 When this is called, the IEncryptedUploadable will be queried for its
1276 length and the storage_index that should be used.
1278 This returns a Deferred that fires with this Encoder instance.
1280 This must be performed before start() can be called.
1283 def get_param(name):
1284 """Return an encoding parameter, by name.
1286 'storage_index': return a string with the (16-byte truncated SHA-256
1287 hash) storage index to which these shares should be
1290 'share_counts': return a tuple describing how many shares are used:
1291 (needed_shares, shares_of_happiness, total_shares)
1293 'num_segments': return an int with the number of segments that
1296 'segment_size': return an int with the size of each segment.
1298 'block_size': return the size of the individual blocks that will
1299 be delivered to a shareholder's put_block() method. By
1300 knowing this, the shareholder will be able to keep all
1301 blocks in a single file and still provide random access
1302 when reading them. # TODO: can we avoid exposing this?
1304 'share_size': an int with the size of the data that will be stored
1305 on each shareholder. This is aggregate amount of data
1306 that will be sent to the shareholder, summed over all
1307 the put_block() calls I will ever make. It is useful to
1308 determine this size before asking potential
1309 shareholders whether they will grant a lease or not,
1310 since their answers will depend upon how much space we
1311 need. TODO: this might also include some amount of
1312 overhead, like the size of all the hashes. We need to
1313 decide whether this is useful or not.
1315 'serialized_params': a string with a concise description of the
1316 codec name and its parameters. This may be passed
1317 into the IUploadable to let it make sure that
1318 the same file encoded with different parameters
1319 will result in different storage indexes.
1321 Once this is called, set_size() and set_params() may not be called.
1324 def set_shareholders(shareholders):
1325 """Tell the encoder where to put the encoded shares. 'shareholders'
1326 must be a dictionary that maps share number (an integer ranging from
1327 0 to n-1) to an instance that provides IStorageBucketWriter. This
1328 must be performed before start() can be called."""
1331 """Begin the encode/upload process. This involves reading encrypted
1332 data from the IEncryptedUploadable, encoding it, uploading the shares
1333 to the shareholders, then sending the hash trees.
1335 set_encrypted_uploadable() and set_shareholders() must be called
1336 before this can be invoked.
1338 This returns a Deferred that fires with a verify cap when the upload
1339 process is complete. The verifycap, plus the encryption key, is
1340 sufficient to construct the read cap.
1343 class IDecoder(Interface):
1344 """I take a list of shareholders and some setup information, then
1345 download, validate, decode, and decrypt data from them, writing the
1346 results to an output file.
1348 I do not locate the shareholders, that is left to the IDownloader. I must
1349 be given a dict of RemoteReferences to storage buckets that are ready to
1354 """I take a file-like object (providing write and close) to which all
1355 the plaintext data will be written.
1357 TODO: producer/consumer . Maybe write() should return a Deferred that
1358 indicates when it will accept more data? But probably having the
1359 IDecoder be a producer is easier to glue to IConsumer pieces.
1362 def set_shareholders(shareholders):
1363 """I take a dictionary that maps share identifiers (small integers)
1364 to RemoteReferences that provide RIBucketReader. This must be called
1368 """I start the download. This process involves retrieving data and
1369 hash chains from the shareholders, using the hashes to validate the
1370 data, decoding the shares into segments, decrypting the segments,
1371 then writing the resulting plaintext to the output file.
1373 I return a Deferred that will fire (with self) when the download is
1377 class IDownloadTarget(Interface):
1378 # Note that if the IDownloadTarget is also an IConsumer, the downloader
1379 # will register itself as a producer. This allows the target to invoke
1380 # downloader.pauseProducing, resumeProducing, and stopProducing.
1382 """Called before any calls to write() or close(). If an error
1383 occurs before any data is available, fail() may be called without
1384 a previous call to open().
1386 'size' is the length of the file being downloaded, in bytes."""
1389 """Output some data to the target."""
1391 """Inform the target that there is no more data to be written."""
1393 """fail() is called to indicate that the download has failed. 'why'
1394 is a Failure object indicating what went wrong. No further methods
1395 will be invoked on the IDownloadTarget after fail()."""
1396 def register_canceller(cb):
1397 """The CiphertextDownloader uses this to register a no-argument function
1398 that the target can call to cancel the download. Once this canceller
1399 is invoked, no further calls to write() or close() will be made."""
1401 """When the CiphertextDownloader is done, this finish() function will be
1402 called. Whatever it returns will be returned to the invoker of
1403 Downloader.download.
1405 # The following methods are just because that target might be a
1406 # repairer.DownUpConnector, and just because the current CHKUpload object
1407 # expects to find the storage index and encoding parameters in its
1409 def set_storageindex(storageindex):
1410 """ Set the storage index. """
1411 def set_encodingparams(encodingparams):
1412 """ Set the encoding parameters. """
1414 class IDownloader(Interface):
1415 def download(uri, target):
1416 """Perform a CHK download, sending the data to the given target.
1417 'target' must provide IDownloadTarget.
1419 Returns a Deferred that fires (with the results of target.finish)
1420 when the download is finished, or errbacks if something went wrong."""
1422 class IEncryptedUploadable(Interface):
1423 def set_upload_status(upload_status):
1424 """Provide an IUploadStatus object that should be filled with status
1425 information. The IEncryptedUploadable is responsible for setting
1426 key-determination progress ('chk'), size, storage_index, and
1427 ciphertext-fetch progress. It may delegate some of this
1428 responsibility to others, in particular to the IUploadable."""
1431 """This behaves just like IUploadable.get_size()."""
1433 def get_all_encoding_parameters():
1434 """Return a Deferred that fires with a tuple of
1435 (k,happy,n,segment_size). The segment_size will be used as-is, and
1436 must match the following constraints: it must be a multiple of k, and
1437 it shouldn't be unreasonably larger than the file size (if
1438 segment_size is larger than filesize, the difference must be stored
1441 This usually passes through to the IUploadable method of the same
1444 The encoder strictly obeys the values returned by this method. To
1445 make an upload use non-default encoding parameters, you must arrange
1446 to control the values that this method returns.
1449 def get_storage_index():
1450 """Return a Deferred that fires with a 16-byte storage index.
1453 def read_encrypted(length, hash_only):
1454 """This behaves just like IUploadable.read(), but returns crypttext
1455 instead of plaintext. If hash_only is True, then this discards the
1456 data (and returns an empty list); this improves efficiency when
1457 resuming an interrupted upload (where we need to compute the
1458 plaintext hashes, but don't need the redundant encrypted data)."""
1460 def get_plaintext_hashtree_leaves(first, last, num_segments):
1461 """OBSOLETE; Get the leaf nodes of a merkle hash tree over the
1462 plaintext segments, i.e. get the tagged hashes of the given segments.
1463 The segment size is expected to be generated by the
1464 IEncryptedUploadable before any plaintext is read or ciphertext
1465 produced, so that the segment hashes can be generated with only a
1468 This returns a Deferred which fires with a sequence of hashes, using:
1470 tuple(segment_hashes[first:last])
1472 'num_segments' is used to assert that the number of segments that the
1473 IEncryptedUploadable handled matches the number of segments that the
1474 encoder was expecting.
1476 This method must not be called until the final byte has been read
1477 from read_encrypted(). Once this method is called, read_encrypted()
1478 can never be called again.
1481 def get_plaintext_hash():
1482 """OBSOLETE; Get the hash of the whole plaintext.
1484 This returns a Deferred which fires with a tagged SHA-256 hash of the
1485 whole plaintext, obtained from hashutil.plaintext_hash(data).
1489 """Just like IUploadable.close()."""
1491 class IUploadable(Interface):
1492 def set_upload_status(upload_status):
1493 """Provide an IUploadStatus object that should be filled with status
1494 information. The IUploadable is responsible for setting
1495 key-determination progress ('chk')."""
1497 def set_default_encoding_parameters(params):
1498 """Set the default encoding parameters, which must be a dict mapping
1499 strings to ints. The meaningful keys are 'k', 'happy', 'n', and
1500 'max_segment_size'. These might have an influence on the final
1501 encoding parameters returned by get_all_encoding_parameters(), if the
1502 Uploadable doesn't have more specific preferences.
1504 This call is optional: if it is not used, the Uploadable will use
1505 some built-in defaults. If used, this method must be called before
1506 any other IUploadable methods to have any effect.
1510 """Return a Deferred that will fire with the length of the data to be
1511 uploaded, in bytes. This will be called before the data is actually
1512 used, to compute encoding parameters.
1515 def get_all_encoding_parameters():
1516 """Return a Deferred that fires with a tuple of
1517 (k,happy,n,segment_size). The segment_size will be used as-is, and
1518 must match the following constraints: it must be a multiple of k, and
1519 it shouldn't be unreasonably larger than the file size (if
1520 segment_size is larger than filesize, the difference must be stored
1523 The relative values of k and n allow some IUploadables to request
1524 better redundancy than others (in exchange for consuming more space
1527 Larger values of segment_size reduce hash overhead, while smaller
1528 values reduce memory footprint and cause data to be delivered in
1529 smaller pieces (which may provide a smoother and more predictable
1530 download experience).
1532 The encoder strictly obeys the values returned by this method. To
1533 make an upload use non-default encoding parameters, you must arrange
1534 to control the values that this method returns. One way to influence
1535 them may be to call set_encoding_parameters() before calling
1536 get_all_encoding_parameters().
1539 def get_encryption_key():
1540 """Return a Deferred that fires with a 16-byte AES key. This key will
1541 be used to encrypt the data. The key will also be hashed to derive
1544 Uploadables which want to achieve convergence should hash their file
1545 contents and the serialized_encoding_parameters to form the key
1546 (which of course requires a full pass over the data). Uploadables can
1547 use the upload.ConvergentUploadMixin class to achieve this
1550 Uploadables which do not care about convergence (or do not wish to
1551 make multiple passes over the data) can simply return a
1552 strongly-random 16 byte string.
1554 get_encryption_key() may be called multiple times: the IUploadable is
1555 required to return the same value each time.
1559 """Return a Deferred that fires with a list of strings (perhaps with
1560 only a single element) which, when concatenated together, contain the
1561 next 'length' bytes of data. If EOF is near, this may provide fewer
1562 than 'length' bytes. The total number of bytes provided by read()
1563 before it signals EOF must equal the size provided by get_size().
1565 If the data must be acquired through multiple internal read
1566 operations, returning a list instead of a single string may help to
1567 reduce string copies.
1569 'length' will typically be equal to (min(get_size(),1MB)/req_shares),
1570 so a 10kB file means length=3kB, 100kB file means length=30kB,
1571 and >=1MB file means length=300kB.
1573 This method provides for a single full pass through the data. Later
1574 use cases may desire multiple passes or access to only parts of the
1575 data (such as a mutable file making small edits-in-place). This API
1576 will be expanded once those use cases are better understood.
1580 """The upload is finished, and whatever filehandle was in use may be
1583 class IUploadResults(Interface):
1584 """I am returned by upload() methods. I contain a number of public
1585 attributes which can be read to determine the results of the upload. Some
1586 of these are functional, some are timing information. All of these may be
1589 .file_size : the size of the file, in bytes
1590 .uri : the CHK read-cap for the file
1591 .ciphertext_fetched : how many bytes were fetched by the helper
1592 .sharemap: dict mapping share identifier to set of serverids
1593 (binary strings). This indicates which servers were given
1594 which shares. For immutable files, the shareid is an
1595 integer (the share number, from 0 to N-1). For mutable
1596 files, it is a string of the form 'seq%d-%s-sh%d',
1597 containing the sequence number, the roothash, and the
1599 .servermap : dict mapping server peerid to a set of share numbers
1600 .timings : dict of timing information, mapping name to seconds (float)
1601 total : total upload time, start to finish
1602 storage_index : time to compute the storage index
1603 peer_selection : time to decide which peers will be used
1604 contacting_helper : initial helper query to upload/no-upload decision
1605 existence_check : helper pre-upload existence check
1606 helper_total : initial helper query to helper finished pushing
1607 cumulative_fetch : helper waiting for ciphertext requests
1608 total_fetch : helper start to last ciphertext response
1609 cumulative_encoding : just time spent in zfec
1610 cumulative_sending : just time spent waiting for storage servers
1611 hashes_and_close : last segment push to shareholder close
1612 total_encode_and_push : first encode to shareholder close
1616 class IDownloadResults(Interface):
1617 """I am created internally by download() methods. I contain a number of
1618 public attributes which contain details about the download process.::
1620 .file_size : the size of the file, in bytes
1621 .servers_used : set of server peerids that were used during download
1622 .server_problems : dict mapping server peerid to a problem string. Only
1623 servers that had problems (bad hashes, disconnects)
1625 .servermap : dict mapping server peerid to a set of share numbers. Only
1626 servers that had any shares are listed here.
1627 .timings : dict of timing information, mapping name to seconds (float)
1628 peer_selection : time to ask servers about shares
1629 servers_peer_selection : dict of peerid to DYHB-query time
1630 uri_extension : time to fetch a copy of the URI extension block
1631 hashtrees : time to fetch the hash trees
1632 segments : time to fetch, decode, and deliver segments
1633 cumulative_fetch : time spent waiting for storage servers
1634 cumulative_decode : just time spent in zfec
1635 cumulative_decrypt : just time spent in decryption
1636 total : total download time, start to finish
1637 fetch_per_server : dict of peerid to list of per-segment fetch times
1641 class IUploader(Interface):
1642 def upload(uploadable):
1643 """Upload the file. 'uploadable' must impement IUploadable. This
1644 returns a Deferred which fires with an IUploadResults instance, from
1645 which the URI of the file can be obtained as results.uri ."""
1647 def upload_ssk(write_capability, new_version, uploadable):
1648 """TODO: how should this work?"""
1650 class ICheckable(Interface):
1651 def check(monitor, verify=False, add_lease=False):
1652 """Check upon my health, optionally repairing any problems.
1654 This returns a Deferred that fires with an instance that provides
1655 ICheckResults, or None if the object is non-distributed (i.e. LIT
1658 The monitor will be checked periodically to see if the operation has
1659 been cancelled. If so, no new queries will be sent, and the Deferred
1660 will fire (with a OperationCancelledError) immediately.
1662 Filenodes and dirnodes (which provide IFilesystemNode) are also
1663 checkable. Instances that represent verifier-caps will be checkable
1664 but not downloadable. Some objects (like LIT files) do not actually
1665 live in the grid, and their checkers return None (non-distributed
1666 files are always healthy).
1668 If verify=False, a relatively lightweight check will be performed: I
1669 will ask all servers if they have a share for me, and I will believe
1670 whatever they say. If there are at least N distinct shares on the
1671 grid, my results will indicate r.is_healthy()==True. This requires a
1672 roundtrip to each server, but does not transfer very much data, so
1673 the network bandwidth is fairly low.
1675 If verify=True, a more resource-intensive check will be performed:
1676 every share will be downloaded, and the hashes will be validated on
1677 every bit. I will ignore any shares that failed their hash checks. If
1678 there are at least N distinct valid shares on the grid, my results
1679 will indicate r.is_healthy()==True. This requires N/k times as much
1680 download bandwidth (and server disk IO) as a regular download. If a
1681 storage server is holding a corrupt share, or is experiencing memory
1682 failures during retrieval, or is malicious or buggy, then
1683 verification will detect the problem, but checking will not.
1685 If add_lease=True, I will ensure that an up-to-date lease is present
1686 on each share. The lease secrets will be derived from by node secret
1687 (in BASEDIR/private/secret), so either I will add a new lease to the
1688 share, or I will merely renew the lease that I already had. In a
1689 future version of the storage-server protocol (once Accounting has
1690 been implemented), there may be additional options here to define the
1691 kind of lease that is obtained (which account number to claim, etc).
1693 TODO: any problems seen during checking will be reported to the
1694 health-manager.furl, a centralized object which is responsible for
1695 figuring out why files are unhealthy so corrective action can be
1699 def check_and_repair(monitor, verify=False, add_lease=False):
1700 """Like check(), but if the file/directory is not healthy, attempt to
1703 Any non-healthy result will cause an immediate repair operation, to
1704 generate and upload new shares. After repair, the file will be as
1705 healthy as we can make it. Details about what sort of repair is done
1706 will be put in the check-and-repair results. The Deferred will not
1707 fire until the repair is complete.
1709 This returns a Deferred which fires with an instance of
1710 ICheckAndRepairResults."""
1712 class IDeepCheckable(Interface):
1713 def start_deep_check(verify=False, add_lease=False):
1714 """Check upon the health of me and everything I can reach.
1716 This is a recursive form of check(), useable only on dirnodes.
1718 I return a Monitor, with results that are an IDeepCheckResults
1721 TODO: If any of the directories I traverse are unrecoverable, the
1722 Monitor will report failure. If any of the files I check upon are
1723 unrecoverable, those problems will be reported in the
1724 IDeepCheckResults as usual, and the Monitor will not report a
1728 def start_deep_check_and_repair(verify=False, add_lease=False):
1729 """Check upon the health of me and everything I can reach. Repair
1730 anything that isn't healthy.
1732 This is a recursive form of check_and_repair(), useable only on
1735 I return a Monitor, with results that are an
1736 IDeepCheckAndRepairResults object.
1738 TODO: If any of the directories I traverse are unrecoverable, the
1739 Monitor will report failure. If any of the files I check upon are
1740 unrecoverable, those problems will be reported in the
1741 IDeepCheckResults as usual, and the Monitor will not report a
1745 class ICheckResults(Interface):
1746 """I contain the detailed results of a check/verify operation.
1749 def get_storage_index():
1750 """Return a string with the (binary) storage index."""
1751 def get_storage_index_string():
1752 """Return a string with the (printable) abbreviated storage index."""
1754 """Return the (string) URI of the object that was checked."""
1757 """Return a boolean, True if the file/dir is fully healthy, False if
1758 it is damaged in any way. Non-distributed LIT files always return
1761 def is_recoverable():
1762 """Return a boolean, True if the file/dir can be recovered, False if
1763 not. Unrecoverable files are obviously unhealthy. Non-distributed LIT
1764 files always return True."""
1766 def needs_rebalancing():
1767 """Return a boolean, True if the file/dir's reliability could be
1768 improved by moving shares to new servers. Non-distributed LIT files
1769 always return False."""
1773 """Return a dictionary that describes the state of the file/dir. LIT
1774 files always return an empty dictionary. Normal files and directories
1775 return a dictionary with the following keys (note that these use
1776 binary strings rather than base32-encoded ones) (also note that for
1777 mutable files, these counts are for the 'best' version):
1779 count-shares-good: the number of distinct good shares that were found
1780 count-shares-needed: 'k', the number of shares required for recovery
1781 count-shares-expected: 'N', the number of total shares generated
1782 count-good-share-hosts: the number of distinct storage servers with
1783 good shares. If this number is less than
1784 count-shares-good, then some shares are
1785 doubled up, increasing the correlation of
1786 failures. This indicates that one or more
1787 shares should be moved to an otherwise unused
1788 server, if one is available.
1789 count-corrupt-shares: the number of shares with integrity failures
1790 list-corrupt-shares: a list of 'share locators', one for each share
1791 that was found to be corrupt. Each share
1792 locator is a list of (serverid, storage_index,
1794 count-incompatible-shares: the number of shares which are of a share
1795 format unknown to this checker
1796 list-incompatible-shares: a list of 'share locators', one for each
1797 share that was found to be of an unknown
1798 format. Each share locator is a list of
1799 (serverid, storage_index, sharenum).
1800 servers-responding: list of (binary) storage server identifiers,
1801 one for each server which responded to the share
1802 query (even if they said they didn't have
1803 shares, and even if they said they did have
1804 shares but then didn't send them when asked, or
1805 dropped the connection, or returned a Failure,
1806 and even if they said they did have shares and
1807 sent incorrect ones when asked)
1808 sharemap: dict mapping share identifier to list of serverids
1809 (binary strings). This indicates which servers are holding
1810 which shares. For immutable files, the shareid is an
1811 integer (the share number, from 0 to N-1). For mutable
1812 files, it is a string of the form 'seq%d-%s-sh%d',
1813 containing the sequence number, the roothash, and the
1816 The following keys are most relevant for mutable files, but immutable
1817 files will provide sensible values too::
1819 count-wrong-shares: the number of shares for versions other than the
1820 'best' one (which is defined as being the
1821 recoverable version with the highest sequence
1822 number, then the highest roothash). These are
1823 either leftover shares from an older version
1824 (perhaps on a server that was offline when an
1825 update occurred), shares from an unrecoverable
1826 newer version, or shares from an alternate
1827 current version that results from an
1828 uncoordinated write collision. For a healthy
1829 file, this will equal 0.
1831 count-recoverable-versions: the number of recoverable versions of
1832 the file. For a healthy file, this will
1835 count-unrecoverable-versions: the number of unrecoverable versions
1836 of the file. For a healthy file, this
1842 """Return a string with a brief (one-line) summary of the results."""
1845 """Return a list of strings with more detailed results."""
1847 class ICheckAndRepairResults(Interface):
1848 """I contain the detailed results of a check/verify/repair operation.
1850 The IFilesystemNode.check()/verify()/repair() methods all return
1851 instances that provide ICheckAndRepairResults.
1854 def get_storage_index():
1855 """Return a string with the (binary) storage index."""
1856 def get_storage_index_string():
1857 """Return a string with the (printable) abbreviated storage index."""
1858 def get_repair_attempted():
1859 """Return a boolean, True if a repair was attempted. We might not
1860 attempt to repair the file because it was healthy, or healthy enough
1861 (i.e. some shares were missing but not enough to exceed some
1862 threshold), or because we don't know how to repair this object."""
1863 def get_repair_successful():
1864 """Return a boolean, True if repair was attempted and the file/dir
1865 was fully healthy afterwards. False if no repair was attempted or if
1866 a repair attempt failed."""
1867 def get_pre_repair_results():
1868 """Return an ICheckResults instance that describes the state of the
1869 file/dir before any repair was attempted."""
1870 def get_post_repair_results():
1871 """Return an ICheckResults instance that describes the state of the
1872 file/dir after any repair was attempted. If no repair was attempted,
1873 the pre-repair and post-repair results will be identical."""
1876 class IDeepCheckResults(Interface):
1877 """I contain the results of a deep-check operation.
1879 This is returned by a call to ICheckable.deep_check().
1882 def get_root_storage_index_string():
1883 """Return the storage index (abbreviated human-readable string) of
1884 the first object checked."""
1886 """Return a dictionary with the following keys::
1888 count-objects-checked: count of how many objects were checked
1889 count-objects-healthy: how many of those objects were completely
1891 count-objects-unhealthy: how many were damaged in some way
1892 count-objects-unrecoverable: how many were unrecoverable
1893 count-corrupt-shares: how many shares were found to have
1894 corruption, summed over all objects
1898 def get_corrupt_shares():
1899 """Return a set of (serverid, storage_index, sharenum) for all shares
1900 that were found to be corrupt. Both serverid and storage_index are
1903 def get_all_results():
1904 """Return a dictionary mapping pathname (a tuple of strings, ready to
1905 be slash-joined) to an ICheckResults instance, one for each object
1906 that was checked."""
1908 def get_results_for_storage_index(storage_index):
1909 """Retrive the ICheckResults instance for the given (binary)
1910 storage index. Raises KeyError if there are no results for that
1914 """Return a dictionary with the same keys as
1915 IDirectoryNode.deep_stats()."""
1917 class IDeepCheckAndRepairResults(Interface):
1918 """I contain the results of a deep-check-and-repair operation.
1920 This is returned by a call to ICheckable.deep_check_and_repair().
1923 def get_root_storage_index_string():
1924 """Return the storage index (abbreviated human-readable string) of
1925 the first object checked."""
1927 """Return a dictionary with the following keys::
1929 count-objects-checked: count of how many objects were checked
1930 count-objects-healthy-pre-repair: how many of those objects were
1931 completely healthy (before any
1933 count-objects-unhealthy-pre-repair: how many were damaged in
1935 count-objects-unrecoverable-pre-repair: how many were unrecoverable
1936 count-objects-healthy-post-repair: how many of those objects were
1937 completely healthy (after any
1939 count-objects-unhealthy-post-repair: how many were damaged in
1941 count-objects-unrecoverable-post-repair: how many were
1943 count-repairs-attempted: repairs were attempted on this many
1944 objects. The count-repairs- keys will
1945 always be provided, however unless
1946 repair=true is present, they will all
1948 count-repairs-successful: how many repairs resulted in healthy
1950 count-repairs-unsuccessful: how many repairs resulted did not
1951 results in completely healthy objects
1952 count-corrupt-shares-pre-repair: how many shares were found to
1953 have corruption, summed over all
1954 objects examined (before any
1956 count-corrupt-shares-post-repair: how many shares were found to
1957 have corruption, summed over all
1958 objects examined (after any
1963 """Return a dictionary with the same keys as
1964 IDirectoryNode.deep_stats()."""
1966 def get_corrupt_shares():
1967 """Return a set of (serverid, storage_index, sharenum) for all shares
1968 that were found to be corrupt before any repair was attempted. Both
1969 serverid and storage_index are binary.
1971 def get_remaining_corrupt_shares():
1972 """Return a set of (serverid, storage_index, sharenum) for all shares
1973 that were found to be corrupt after any repair was completed. Both
1974 serverid and storage_index are binary. These are shares that need
1975 manual inspection and probably deletion.
1977 def get_all_results():
1978 """Return a dictionary mapping pathname (a tuple of strings, ready to
1979 be slash-joined) to an ICheckAndRepairResults instance, one for each
1980 object that was checked."""
1982 def get_results_for_storage_index(storage_index):
1983 """Retrive the ICheckAndRepairResults instance for the given (binary)
1984 storage index. Raises KeyError if there are no results for that
1988 class IRepairable(Interface):
1989 def repair(check_results):
1990 """Attempt to repair the given object. Returns a Deferred that fires
1991 with a IRepairResults object.
1993 I must be called with an object that implements ICheckResults, as
1994 proof that you have actually discovered a problem with this file. I
1995 will use the data in the checker results to guide the repair process,
1996 such as which servers provided bad data and should therefore be
1997 avoided. The ICheckResults object is inside the
1998 ICheckAndRepairResults object, which is returned by the
1999 ICheckable.check() method::
2001 d = filenode.check(repair=False)
2002 def _got_results(check_and_repair_results):
2003 check_results = check_and_repair_results.get_pre_repair_results()
2004 return filenode.repair(check_results)
2005 d.addCallback(_got_results)
2009 class IRepairResults(Interface):
2010 """I contain the results of a repair operation."""
2013 class IClient(Interface):
2014 def upload(uploadable):
2015 """Upload some data into a CHK, get back the UploadResults for it.
2016 @param uploadable: something that implements IUploadable
2017 @return: a Deferred that fires with the UploadResults instance.
2018 To get the URI for this file, use results.uri .
2021 def create_mutable_file(contents=""):
2022 """Create a new mutable file (with initial) contents, get back the
2025 @param contents: (bytestring, callable, or None): this provides the
2026 initial contents of the mutable file. If 'contents' is a bytestring,
2027 it will be used as-is. If 'contents' is a callable, it will be
2028 invoked with the new MutableFileNode instance and is expected to
2029 return a bytestring with the initial contents of the file (the
2030 callable can use node.get_writekey() to decide how to encrypt the
2031 initial contents, e.g. for a brand new dirnode with initial
2032 children). contents=None is equivalent to an empty string. Using
2033 content_maker= is more efficient than creating a mutable file and
2034 setting its contents in two separate operations.
2036 @return: a Deferred that fires with tne (string) SSK URI for the new
2040 def create_dirnode(initial_children={}):
2041 """Create a new unattached dirnode, possibly with initial children.
2043 @param initial_children: dict with keys that are unicode child names,
2044 and values that are (childnode, metadata) tuples.
2046 @return: a Deferred that fires with the new IDirectoryNode instance.
2049 def create_node_from_uri(uri, rouri):
2050 """Create a new IFilesystemNode instance from the uri, synchronously.
2051 @param uri: a string or IURI-providing instance, or None. This could
2052 be for a LiteralFileNode, a CHK file node, a mutable file
2053 node, or a directory node
2054 @param rouri: a string or IURI-providing instance, or None. If the
2055 main uri is None, I will use the rouri instead. If I
2056 recognize the format of the main uri, I will ignore the
2057 rouri (because it can be derived from the writecap).
2059 @return: an instance that provides IFilesystemNode (or more usefully
2060 one of its subclasses). File-specifying URIs will result in
2061 IFileNode or IMutableFileNode -providing instances, like
2062 FileNode, LiteralFileNode, or MutableFileNode.
2063 Directory-specifying URIs will result in
2064 IDirectoryNode-providing instances, like DirectoryNode.
2067 class INodeMaker(Interface):
2068 """The NodeMaker is used to create IFilesystemNode instances. It can
2069 accept a filecap/dircap string and return the node right away. It can
2070 also create new nodes (i.e. upload a file, or create a mutable file)
2071 asynchronously. Once you have one of these nodes, you can use other
2072 methods to determine whether it is a file or directory, and to download
2073 or modify its contents.
2075 The NodeMaker encapsulates all the authorities that these
2076 IfilesystemNodes require (like references to the StorageFarmBroker). Each
2077 Tahoe process will typically have a single NodeMaker, but unit tests may
2078 create simplified/mocked forms for testing purposes.
2080 def create_from_cap(writecap, readcap=None):
2081 """I create an IFilesystemNode from the given writecap/readcap. I can
2082 only provide nodes for existing file/directory objects: use my other
2083 methods to create new objects. I return synchronously."""
2085 def create_mutable_file(contents=None, keysize=None):
2086 """I create a new mutable file, and return a Deferred which will fire
2087 with the IMutableFileNode instance when it is ready. If contents= is
2088 provided (a bytestring), it will be used as the initial contents of
2089 the new file, otherwise the file will contain zero bytes. keysize= is
2090 for use by unit tests, to create mutable files that are smaller than
2093 def create_new_mutable_directory(initial_children={}):
2094 """I create a new mutable directory, and return a Deferred which will
2095 fire with the IDirectoryNode instance when it is ready. If
2096 initial_children= is provided (a dict mapping unicode child name to
2097 (childnode, metadata_dict) tuples), the directory will be populated
2098 with those children, otherwise it will be empty."""
2100 class IClientStatus(Interface):
2101 def list_all_uploads():
2102 """Return a list of uploader objects, one for each upload which
2103 currently has an object available (tracked with weakrefs). This is
2104 intended for debugging purposes."""
2105 def list_active_uploads():
2106 """Return a list of active IUploadStatus objects."""
2107 def list_recent_uploads():
2108 """Return a list of IUploadStatus objects for the most recently
2111 def list_all_downloads():
2112 """Return a list of downloader objects, one for each download which
2113 currently has an object available (tracked with weakrefs). This is
2114 intended for debugging purposes."""
2115 def list_active_downloads():
2116 """Return a list of active IDownloadStatus objects."""
2117 def list_recent_downloads():
2118 """Return a list of IDownloadStatus objects for the most recently
2119 started downloads."""
2121 class IUploadStatus(Interface):
2123 """Return a timestamp (float with seconds since epoch) indicating
2124 when the operation was started."""
2125 def get_storage_index():
2126 """Return a string with the (binary) storage index in use on this
2127 upload. Returns None if the storage index has not yet been
2130 """Return an integer with the number of bytes that will eventually
2131 be uploaded for this file. Returns None if the size is not yet known.
2134 """Return True if this upload is using a Helper, False if not."""
2136 """Return a string describing the current state of the upload
2139 """Returns a tuple of floats, (chk, ciphertext, encode_and_push),
2140 each from 0.0 to 1.0 . 'chk' describes how much progress has been
2141 made towards hashing the file to determine a CHK encryption key: if
2142 non-convergent encryption is in use, this will be trivial, otherwise
2143 the whole file must be hashed. 'ciphertext' describes how much of the
2144 ciphertext has been pushed to the helper, and is '1.0' for non-helper
2145 uploads. 'encode_and_push' describes how much of the encode-and-push
2146 process has finished: for helper uploads this is dependent upon the
2147 helper providing progress reports. It might be reasonable to add all
2148 three numbers and report the sum to the user."""
2150 """Return True if the upload is currently active, False if not."""
2152 """Return an instance of UploadResults (which contains timing and
2153 sharemap information). Might return None if the upload is not yet
2156 """Each upload status gets a unique number: this method returns that
2157 number. This provides a handle to this particular upload, so a web
2158 page can generate a suitable hyperlink."""
2160 class IDownloadStatus(Interface):
2162 """Return a timestamp (float with seconds since epoch) indicating
2163 when the operation was started."""
2164 def get_storage_index():
2165 """Return a string with the (binary) storage index in use on this
2166 download. This may be None if there is no storage index (i.e. LIT
2169 """Return an integer with the number of bytes that will eventually be
2170 retrieved for this file. Returns None if the size is not yet known.
2173 """Return True if this download is using a Helper, False if not."""
2175 """Return a string describing the current state of the download
2178 """Returns a float (from 0.0 to 1.0) describing the amount of the
2179 download that has completed. This value will remain at 0.0 until the
2180 first byte of plaintext is pushed to the download target."""
2182 """Return True if the download is currently active, False if not."""
2184 """Each download status gets a unique number: this method returns
2185 that number. This provides a handle to this particular download, so a
2186 web page can generate a suitable hyperlink."""
2188 class IServermapUpdaterStatus(Interface):
2190 class IPublishStatus(Interface):
2192 class IRetrieveStatus(Interface):
2195 class NotCapableError(Exception):
2196 """You have tried to write to a read-only node."""
2198 class BadWriteEnablerError(Exception):
2201 class RIControlClient(RemoteInterface):
2203 def wait_for_client_connections(num_clients=int):
2204 """Do not return until we have connections to at least NUM_CLIENTS
2208 def upload_from_file_to_uri(filename=str,
2209 convergence=ChoiceOf(None,
2210 StringConstraint(2**20))):
2211 """Upload a file to the grid. This accepts a filename (which must be
2212 absolute) that points to a file on the node's local disk. The node will
2213 read the contents of this file, upload it to the grid, then return the
2214 URI at which it was uploaded. If convergence is None then a random
2215 encryption key will be used, else the plaintext will be hashed, then
2216 that hash will be mixed together with the "convergence" string to form
2221 def download_from_uri_to_file(uri=URI, filename=str):
2222 """Download a file from the grid, placing it on the node's local disk
2223 at the given filename (which must be absolute[?]). Returns the
2224 absolute filename where the file was written."""
2229 def get_memory_usage():
2230 """Return a dict describes the amount of memory currently in use. The
2231 keys are 'VmPeak', 'VmSize', and 'VmData'. The values are integers,
2232 measuring memory consupmtion in bytes."""
2233 return DictOf(str, int)
2235 def speed_test(count=int, size=int, mutable=Any()):
2236 """Write 'count' tempfiles to disk, all of the given size. Measure
2237 how long (in seconds) it takes to upload them all to the servers.
2238 Then measure how long it takes to download all of them. If 'mutable'
2239 is 'create', time creation of mutable files. If 'mutable' is
2240 'upload', then time access to the same mutable file instead of
2243 Returns a tuple of (upload_time, download_time).
2245 return (float, float)
2247 def measure_peer_response_time():
2248 """Send a short message to each connected peer, and measure the time
2249 it takes for them to respond to it. This is a rough measure of the
2250 application-level round trip time.
2252 @return: a dictionary mapping peerid to a float (RTT time in seconds)
2255 return DictOf(Nodeid, float)
2257 UploadResults = Any() #DictOf(str, str)
2259 class RIEncryptedUploadable(RemoteInterface):
2260 __remote_name__ = "RIEncryptedUploadable.tahoe.allmydata.com"
2265 def get_all_encoding_parameters():
2266 return (int, int, int, long)
2268 def read_encrypted(offset=Offset, length=ReadSize):
2275 class RICHKUploadHelper(RemoteInterface):
2276 __remote_name__ = "RIUploadHelper.tahoe.allmydata.com"
2280 Return a dictionary of version information.
2282 return DictOf(str, Any())
2284 def upload(reader=RIEncryptedUploadable):
2285 return UploadResults
2288 class RIHelper(RemoteInterface):
2289 __remote_name__ = "RIHelper.tahoe.allmydata.com"
2293 Return a dictionary of version information.
2295 return DictOf(str, Any())
2297 def upload_chk(si=StorageIndex):
2298 """See if a file with a given storage index needs uploading. The
2299 helper will ask the appropriate storage servers to see if the file
2300 has already been uploaded. If so, the helper will return a set of
2301 'upload results' that includes whatever hashes are needed to build
2302 the read-cap, and perhaps a truncated sharemap.
2304 If the file has not yet been uploaded (or if it was only partially
2305 uploaded), the helper will return an empty upload-results dictionary
2306 and also an RICHKUploadHelper object that will take care of the
2307 upload process. The client should call upload() on this object and
2308 pass it a reference to an RIEncryptedUploadable object that will
2309 provide ciphertext. When the upload is finished, the upload() method
2310 will finish and return the upload results.
2312 return (UploadResults, ChoiceOf(RICHKUploadHelper, None))
2315 class RIStatsProvider(RemoteInterface):
2316 __remote_name__ = "RIStatsProvider.tahoe.allmydata.com"
2318 Provides access to statistics and monitoring information.
2323 returns a dictionary containing 'counters' and 'stats', each a
2324 dictionary with string counter/stat name keys, and numeric values.
2325 counters are monotonically increasing measures of work done, and
2326 stats are instantaneous measures (potentially time averaged
2329 return DictOf(str, DictOf(str, ChoiceOf(float, int, long)))
2331 class RIStatsGatherer(RemoteInterface):
2332 __remote_name__ = "RIStatsGatherer.tahoe.allmydata.com"
2334 Provides a monitoring service for centralised collection of stats
2337 def provide(provider=RIStatsProvider, nickname=str):
2339 @param provider: a stats collector instance which should be polled
2340 periodically by the gatherer to collect stats.
2341 @param nickname: a name useful to identify the provided client
2346 class IStatsProducer(Interface):
2349 returns a dictionary, with str keys representing the names of stats
2350 to be monitored, and numeric values.
2353 class RIKeyGenerator(RemoteInterface):
2354 __remote_name__ = "RIKeyGenerator.tahoe.allmydata.com"
2356 Provides a service offering to make RSA key pairs.
2359 def get_rsa_key_pair(key_size=int):
2361 @param key_size: the size of the signature key.
2362 @return: tuple(verifying_key, signing_key)
2364 return TupleOf(str, str)
2367 class FileTooLargeError(Exception):
2370 class IValidatedThingProxy(Interface):
2372 """ Acquire a thing and validate it. Return a deferred which is
2373 eventually fired with self if the thing is valid or errbacked if it
2374 can't be acquired or validated."""
2376 class InsufficientVersionError(Exception):
2377 def __init__(self, needed, got):
2378 self.needed = needed
2381 return "InsufficientVersionError(need '%s', got %s)" % (self.needed,