2 from zope.interface import Interface
3 from foolscap.api import StringConstraint, ListOf, TupleOf, SetOf, DictOf, \
4 ChoiceOf, IntegerConstraint, Any, RemoteInterface, Referenceable
8 Hash = StringConstraint(maxLength=HASH_SIZE,
9 minLength=HASH_SIZE)# binary format 32-byte SHA256 hash
10 Nodeid = StringConstraint(maxLength=20,
11 minLength=20) # binary format 20-byte SHA1 hash
12 FURL = StringConstraint(1000)
13 StorageIndex = StringConstraint(16)
14 URI = StringConstraint(300) # kind of arbitrary
16 MAX_BUCKETS = 256 # per peer -- zfec offers at most 256 shares per file
18 DEFAULT_MAX_SEGMENT_SIZE = 128*1024
20 ShareData = StringConstraint(None)
21 URIExtensionData = StringConstraint(1000)
22 Number = IntegerConstraint(8) # 2**(8*8) == 16EiB ~= 18e18 ~= 18 exabytes
24 ReadSize = int # the 'int' constraint is 2**31 == 2Gib -- large files are processed in not-so-large increments
25 WriteEnablerSecret = Hash # used to protect mutable bucket modifications
26 LeaseRenewSecret = Hash # used to protect bucket lease renewal requests
27 LeaseCancelSecret = Hash # used to protect bucket lease cancellation requests
29 class RIStubClient(RemoteInterface):
30 """Each client publishes a service announcement for a dummy object called
31 the StubClient. This object doesn't actually offer any services, but the
32 announcement helps the Introducer keep track of which clients are
33 subscribed (so the grid admin can keep track of things like the size of
34 the grid and the client versions in use. This is the (empty)
35 RemoteInterface for the StubClient."""
37 class RIBucketWriter(RemoteInterface):
38 """ Objects of this kind live on the server side. """
39 def write(offset=Offset, data=ShareData):
44 If the data that has been written is incomplete or inconsistent then
45 the server will throw the data away, else it will store it for future
51 """Abandon all the data that has been written.
55 class RIBucketReader(RemoteInterface):
56 def read(offset=Offset, length=ReadSize):
59 def advise_corrupt_share(reason=str):
60 """Clients who discover hash failures in shares that they have
61 downloaded from me will use this method to inform me about the
62 failures. I will record their concern so that my operator can
63 manually inspect the shares in question. I return None.
65 This is a wrapper around RIStorageServer.advise_corrupt_share(),
66 which is tied to a specific share, and therefore does not need the
67 extra share-identifying arguments. Please see that method for full
71 TestVector = ListOf(TupleOf(Offset, ReadSize, str, str))
72 # elements are (offset, length, operator, specimen)
73 # operator is one of "lt, le, eq, ne, ge, gt"
74 # nop always passes and is used to fetch data while writing.
75 # you should use length==len(specimen) for everything except nop
76 DataVector = ListOf(TupleOf(Offset, ShareData))
77 # (offset, data). This limits us to 30 writes of 1MiB each per call
78 TestAndWriteVectorsForShares = DictOf(int,
81 ChoiceOf(None, Offset), # new_length
83 ReadVector = ListOf(TupleOf(Offset, ReadSize))
84 ReadData = ListOf(ShareData)
85 # returns data[offset:offset+length] for each element of TestVector
87 class RIStorageServer(RemoteInterface):
88 __remote_name__ = "RIStorageServer.tahoe.allmydata.com"
92 Return a dictionary of version information.
94 return DictOf(str, Any())
96 def allocate_buckets(storage_index=StorageIndex,
97 renew_secret=LeaseRenewSecret,
98 cancel_secret=LeaseCancelSecret,
99 sharenums=SetOf(int, maxLength=MAX_BUCKETS),
100 allocated_size=Offset, canary=Referenceable):
102 @param storage_index: the index of the bucket to be created or
104 @param sharenums: these are the share numbers (probably between 0 and
105 99) that the sender is proposing to store on this
107 @param renew_secret: This is the secret used to protect bucket refresh
108 This secret is generated by the client and
109 stored for later comparison by the server. Each
110 server is given a different secret.
111 @param cancel_secret: Like renew_secret, but protects bucket decref.
112 @param canary: If the canary is lost before close(), the bucket is
114 @return: tuple of (alreadygot, allocated), where alreadygot is what we
115 already have and allocated is what we hereby agree to accept.
116 New leases are added for shares in both lists.
118 return TupleOf(SetOf(int, maxLength=MAX_BUCKETS),
119 DictOf(int, RIBucketWriter, maxKeys=MAX_BUCKETS))
121 def add_lease(storage_index=StorageIndex,
122 renew_secret=LeaseRenewSecret,
123 cancel_secret=LeaseCancelSecret):
125 Add a new lease on the given bucket. If the renew_secret matches an
126 existing lease, that lease will be renewed instead. If there is no
127 bucket for the given storage_index, return silently. (note that in
128 tahoe-1.3.0 and earlier, IndexError was raised if there was no
131 return Any() # returns None now, but future versions might change
133 def renew_lease(storage_index=StorageIndex, renew_secret=LeaseRenewSecret):
135 Renew the lease on a given bucket, resetting the timer to 31 days.
136 Some networks will use this, some will not. If there is no bucket for
137 the given storage_index, IndexError will be raised.
139 For mutable shares, if the given renew_secret does not match an
140 existing lease, IndexError will be raised with a note listing the
141 server-nodeids on the existing leases, so leases on migrated shares
142 can be renewed or cancelled. For immutable shares, IndexError
143 (without the note) will be raised.
147 def cancel_lease(storage_index=StorageIndex,
148 cancel_secret=LeaseCancelSecret):
150 Cancel the lease on a given bucket. If this was the last lease on the
151 bucket, the bucket will be deleted. If there is no bucket for the
152 given storage_index, IndexError will be raised.
154 For mutable shares, if the given cancel_secret does not match an
155 existing lease, IndexError will be raised with a note listing the
156 server-nodeids on the existing leases, so leases on migrated shares
157 can be renewed or cancelled. For immutable shares, IndexError
158 (without the note) will be raised.
162 def get_buckets(storage_index=StorageIndex):
163 return DictOf(int, RIBucketReader, maxKeys=MAX_BUCKETS)
167 def slot_readv(storage_index=StorageIndex,
168 shares=ListOf(int), readv=ReadVector):
169 """Read a vector from the numbered shares associated with the given
170 storage index. An empty shares list means to return data from all
171 known shares. Returns a dictionary with one key per share."""
172 return DictOf(int, ReadData) # shnum -> results
174 def slot_testv_and_readv_and_writev(storage_index=StorageIndex,
175 secrets=TupleOf(WriteEnablerSecret,
178 tw_vectors=TestAndWriteVectorsForShares,
181 """General-purpose test-and-set operation for mutable slots. Perform
182 a bunch of comparisons against the existing shares. If they all pass,
183 then apply a bunch of write vectors to those shares. Then use the
184 read vectors to extract data from all the shares and return the data.
186 This method is, um, large. The goal is to allow clients to update all
187 the shares associated with a mutable file in a single round trip.
189 @param storage_index: the index of the bucket to be created or
191 @param write_enabler: a secret that is stored along with the slot.
192 Writes are accepted from any caller who can
193 present the matching secret. A different secret
194 should be used for each slot*server pair.
195 @param renew_secret: This is the secret used to protect bucket refresh
196 This secret is generated by the client and
197 stored for later comparison by the server. Each
198 server is given a different secret.
199 @param cancel_secret: Like renew_secret, but protects bucket decref.
201 The 'secrets' argument is a tuple of (write_enabler, renew_secret,
202 cancel_secret). The first is required to perform any write. The
203 latter two are used when allocating new shares. To simply acquire a
204 new lease on existing shares, use an empty testv and an empty writev.
206 Each share can have a separate test vector (i.e. a list of
207 comparisons to perform). If all vectors for all shares pass, then all
208 writes for all shares are recorded. Each comparison is a 4-tuple of
209 (offset, length, operator, specimen), which effectively does a bool(
210 (read(offset, length)) OPERATOR specimen ) and only performs the
211 write if all these evaluate to True. Basic test-and-set uses 'eq'.
212 Write-if-newer uses a seqnum and (offset, length, 'lt', specimen).
213 Write-if-same-or-newer uses 'le'.
215 Reads from the end of the container are truncated, and missing shares
216 behave like empty ones, so to assert that a share doesn't exist (for
217 use when creating a new share), use (0, 1, 'eq', '').
219 The write vector will be applied to the given share, expanding it if
220 necessary. A write vector applied to a share number that did not
221 exist previously will cause that share to be created.
223 Each write vector is accompanied by a 'new_length' argument. If
224 new_length is not None, use it to set the size of the container. This
225 can be used to pre-allocate space for a series of upcoming writes, or
226 truncate existing data. If the container is growing, new_length will
227 be applied before datav. If the container is shrinking, it will be
228 applied afterwards. If new_length==0, the share will be deleted.
230 The read vector is used to extract data from all known shares,
231 *before* any writes have been applied. The same vector is used for
232 all shares. This captures the state that was tested by the test
235 This method returns two values: a boolean and a dict. The boolean is
236 True if the write vectors were applied, False if not. The dict is
237 keyed by share number, and each value contains a list of strings, one
238 for each element of the read vector.
240 If the write_enabler is wrong, this will raise BadWriteEnablerError.
241 To enable share migration (using update_write_enabler), the exception
242 will have the nodeid used for the old write enabler embedded in it,
243 in the following string::
245 The write enabler was recorded by nodeid '%s'.
247 Note that the nodeid here is encoded using the same base32 encoding
248 used by Foolscap and allmydata.util.idlib.nodeid_b2a().
251 return TupleOf(bool, DictOf(int, ReadData))
253 def advise_corrupt_share(share_type=str, storage_index=StorageIndex,
254 shnum=int, reason=str):
255 """Clients who discover hash failures in shares that they have
256 downloaded from me will use this method to inform me about the
257 failures. I will record their concern so that my operator can
258 manually inspect the shares in question. I return None.
260 'share_type' is either 'mutable' or 'immutable'. 'storage_index' is a
261 (binary) storage index string, and 'shnum' is the integer share
262 number. 'reason' is a human-readable explanation of the problem,
263 probably including some expected hash values and the computed ones
264 which did not match. Corruption advisories for mutable shares should
265 include a hash of the public key (the same value that appears in the
266 mutable-file verify-cap), since the current share format does not
270 class IStorageBucketWriter(Interface):
272 Objects of this kind live on the client side.
274 def put_block(segmentnum=int, data=ShareData):
275 """@param data: For most segments, this data will be 'blocksize'
276 bytes in length. The last segment might be shorter.
277 @return: a Deferred that fires (with None) when the operation completes
280 def put_plaintext_hashes(hashes=ListOf(Hash)):
282 @return: a Deferred that fires (with None) when the operation completes
285 def put_crypttext_hashes(hashes=ListOf(Hash)):
287 @return: a Deferred that fires (with None) when the operation completes
290 def put_block_hashes(blockhashes=ListOf(Hash)):
292 @return: a Deferred that fires (with None) when the operation completes
295 def put_share_hashes(sharehashes=ListOf(TupleOf(int, Hash))):
297 @return: a Deferred that fires (with None) when the operation completes
300 def put_uri_extension(data=URIExtensionData):
301 """This block of data contains integrity-checking information (hashes
302 of plaintext, crypttext, and shares), as well as encoding parameters
303 that are necessary to recover the data. This is a serialized dict
304 mapping strings to other strings. The hash of this data is kept in
305 the URI and verified before any of the data is used. All buckets for
306 a given file contain identical copies of this data.
308 The serialization format is specified with the following pseudocode:
309 for k in sorted(dict.keys()):
310 assert re.match(r'^[a-zA-Z_\-]+$', k)
311 write(k + ':' + netstring(dict[k]))
313 @return: a Deferred that fires (with None) when the operation completes
317 """Finish writing and close the bucket. The share is not finalized
318 until this method is called: if the uploading client disconnects
319 before calling close(), the partially-written share will be
322 @return: a Deferred that fires (with None) when the operation completes
325 class IStorageBucketReader(Interface):
327 def get_block_data(blocknum=int, blocksize=int, size=int):
328 """Most blocks will be the same size. The last block might be shorter
334 def get_crypttext_hashes():
336 @return: ListOf(Hash)
339 def get_block_hashes(at_least_these=SetOf(int)):
341 @return: ListOf(Hash)
344 def get_share_hashes(at_least_these=SetOf(int)):
346 @return: ListOf(TupleOf(int, Hash))
349 def get_uri_extension():
351 @return: URIExtensionData
354 class IStorageBroker(Interface):
355 def get_servers_for_psi(peer_selection_index):
357 @return: list of IServer instances
359 def get_connected_servers():
361 @return: frozenset of connected IServer instances
363 def get_known_servers():
365 @return: frozenset of IServer instances
367 def get_all_serverids():
369 @return: frozenset of serverid strings
371 def get_nickname_for_serverid(serverid):
373 @return: unicode nickname, or None
376 # methods moved from IntroducerClient, need review
377 def get_all_connections():
378 """Return a frozenset of (nodeid, service_name, rref) tuples, one for
379 each active connection we've established to a remote service. This is
380 mostly useful for unit tests that need to wait until a certain number
381 of connections have been made."""
383 def get_all_connectors():
384 """Return a dict that maps from (nodeid, service_name) to a
385 RemoteServiceConnector instance for all services that we are actively
386 trying to connect to. Each RemoteServiceConnector has the following
389 service_name: the type of service provided, like 'storage'
390 announcement_time: when we first heard about this service
391 last_connect_time: when we last established a connection
392 last_loss_time: when we last lost a connection
394 version: the peer's version, from the most recent connection
395 oldest_supported: the peer's oldest supported version, same
397 rref: the RemoteReference, if connected, otherwise None
398 remote_host: the IAddress, if connected, otherwise None
400 This method is intended for monitoring interfaces, such as a web page
401 which describes connecting and connected peers.
404 def get_all_peerids():
405 """Return a frozenset of all peerids to whom we have a connection (to
406 one or more services) established. Mostly useful for unit tests."""
408 def get_all_connections_for(service_name):
409 """Return a frozenset of (nodeid, service_name, rref) tuples, one
410 for each active connection that provides the given SERVICE_NAME."""
412 def get_permuted_peers(service_name, key):
413 """Returns an ordered list of (peerid, rref) tuples, selecting from
414 the connections that provide SERVICE_NAME, using a hash-based
415 permutation keyed by KEY. This randomizes the service list in a
416 repeatable way, to distribute load over many peers.
420 class IURI(Interface):
421 def init_from_string(uri):
422 """Accept a string (as created by my to_string() method) and populate
423 this instance with its data. I am not normally called directly,
424 please use the module-level uri.from_string() function to convert
425 arbitrary URI strings into IURI-providing instances."""
428 """Return False if this URI be used to modify the data. Return True
429 if this URI cannot be used to modify the data."""
432 """Return True if the data can be modified by *somebody* (perhaps
433 someone who has a more powerful URI than this one)."""
435 # TODO: rename to get_read_cap()
437 """Return another IURI instance, which represents a read-only form of
438 this one. If is_readonly() is True, this returns self."""
440 def get_verify_cap():
441 """Return an instance that provides IVerifierURI, which can be used
442 to check on the availability of the file or directory, without
443 providing enough capabilities to actually read or modify the
444 contents. This may return None if the file does not need checking or
445 verification (e.g. LIT URIs).
449 """Return a string of printable ASCII characters, suitable for
450 passing into init_from_string."""
452 class IVerifierURI(Interface, IURI):
453 def init_from_string(uri):
454 """Accept a string (as created by my to_string() method) and populate
455 this instance with its data. I am not normally called directly,
456 please use the module-level uri.from_string() function to convert
457 arbitrary URI strings into IURI-providing instances."""
460 """Return a string of printable ASCII characters, suitable for
461 passing into init_from_string."""
463 class IDirnodeURI(Interface):
464 """I am a URI which represents a dirnode."""
466 class IFileURI(Interface):
467 """I am a URI which represents a filenode."""
469 """Return the length (in bytes) of the file that I represent."""
471 class IImmutableFileURI(IFileURI):
474 class IMutableFileURI(Interface):
475 """I am a URI which represents a mutable filenode."""
477 class IDirectoryURI(Interface):
480 class IReadonlyDirectoryURI(Interface):
483 class CapConstraintError(Exception):
484 """A constraint on a cap was violated."""
486 class MustBeDeepImmutableError(CapConstraintError):
487 """Mutable children cannot be added to an immutable directory.
488 Also, caps obtained from an immutable directory can trigger this error
489 if they are later found to refer to a mutable object and then used."""
491 class MustBeReadonlyError(CapConstraintError):
492 """Known write caps cannot be specified in a ro_uri field. Also,
493 caps obtained from a ro_uri field can trigger this error if they
494 are later found to be write caps and then used."""
496 class MustNotBeUnknownRWError(CapConstraintError):
497 """Cannot add an unknown child cap specified in a rw_uri field."""
499 # The hierarchy looks like this:
506 class IFilesystemNode(Interface):
508 """Return the strongest 'cap instance' associated with this node.
509 (writecap for writeable-mutable files/directories, readcap for
510 immutable or readonly-mutable files/directories). To convert this
511 into a string, call .to_string() on the result."""
514 """Return a readonly cap instance for this node. For immutable or
515 readonly nodes, get_cap() and get_readcap() return the same thing."""
517 def get_repair_cap():
518 """Return an IURI instance that can be used to repair the file, or
519 None if this node cannot be repaired (either because it is not
520 distributed, like a LIT file, or because the node does not represent
521 sufficient authority to create a repair-cap, like a read-only RSA
522 mutable file node [which cannot create the correct write-enablers]).
525 def get_verify_cap():
526 """Return an IVerifierURI instance that represents the
527 'verifiy/refresh capability' for this node. The holder of this
528 capability will be able to renew the lease for this node, protecting
529 it from garbage-collection. They will also be able to ask a server if
530 it holds a share for the file or directory.
534 """Return the URI string corresponding to the strongest cap associated
535 with this node. If this node is read-only, the URI will only offer
536 read-only access. If this node is read-write, the URI will offer
539 If you have read-write access to a node and wish to share merely
540 read-only access with others, use get_readonly_uri().
543 def get_write_uri(n):
544 """Return the URI string that can be used by others to get write
545 access to this node, if it is writeable. If this is a read-only node,
548 def get_readonly_uri():
549 """Return the URI string that can be used by others to get read-only
550 access to this node. The result is a read-only URI, regardless of
551 whether this node is read-only or read-write.
553 If you have merely read-only access to this node, get_readonly_uri()
554 will return the same thing as get_uri().
557 def get_storage_index():
558 """Return a string with the (binary) storage index in use on this
559 download. This may be None if there is no storage index (i.e. LIT
560 files and directories)."""
563 """Return True if this reference provides mutable access to the given
564 file or directory (i.e. if you can modify it), or False if not. Note
565 that even if this reference is read-only, someone else may hold a
566 read-write reference to it."""
569 """Return True if this file or directory is mutable (by *somebody*,
570 not necessarily you), False if it is is immutable. Note that a file
571 might be mutable overall, but your reference to it might be
572 read-only. On the other hand, all references to an immutable file
573 will be read-only; there are no read-write references to an immutable
578 """Return True if this is an unknown node."""
580 def is_allowed_in_immutable_directory():
581 """Return True if this node is allowed as a child of a deep-immutable
582 directory. This is true if either the node is of a known-immutable type,
583 or it is unknown and read-only.
587 """Raise any error associated with this node."""
590 """Return the length (in bytes) of the data this node represents. For
591 directory nodes, I return the size of the backing store. I return
592 synchronously and do not consult the network, so for mutable objects,
593 I will return the most recently observed size for the object, or None
594 if I don't remember a size. Use get_current_size, which returns a
595 Deferred, if you want more up-to-date information."""
597 def get_current_size():
598 """I return a Deferred that fires with the length (in bytes) of the
599 data this node represents.
602 class IFileNode(IFilesystemNode):
603 """I am a node which represents a file: a sequence of bytes. I am not a
604 container, like IDirectoryNode."""
606 class IImmutableFileNode(IFileNode):
607 def read(consumer, offset=0, size=None):
608 """Download a portion (possibly all) of the file's contents, making
609 them available to the given IConsumer. Return a Deferred that fires
610 (with the consumer) when the consumer is unregistered (either because
611 the last byte has been given to it, or because the consumer threw an
612 exception during write(), possibly because it no longer wants to
613 receive data). The portion downloaded will start at 'offset' and
614 contain 'size' bytes (or the remainder of the file if size==None).
616 The consumer will be used in non-streaming mode: an IPullProducer
617 will be attached to it.
619 The consumer will not receive data right away: several network trips
620 must occur first. The order of events will be::
622 consumer.registerProducer(p, streaming)
623 (if streaming == False)::
624 consumer does p.resumeProducing()
626 consumer does p.resumeProducing()
627 consumer.write(data).. (repeat until all data is written)
628 consumer.unregisterProducer()
629 deferred.callback(consumer)
631 If a download error occurs, or an exception is raised by
632 consumer.registerProducer() or consumer.write(), I will call
633 consumer.unregisterProducer() and then deliver the exception via
634 deferred.errback(). To cancel the download, the consumer should call
635 p.stopProducing(), which will result in an exception being delivered
636 via deferred.errback().
638 See src/allmydata/util/consumer.py for an example of a simple
639 download-to-memory consumer.
642 class IMutableFileNode(IFileNode):
643 """I provide access to a 'mutable file', which retains its identity
644 regardless of what contents are put in it.
646 The consistency-vs-availability problem means that there might be
647 multiple versions of a file present in the grid, some of which might be
648 unrecoverable (i.e. have fewer than 'k' shares). These versions are
649 loosely ordered: each has a sequence number and a hash, and any version
650 with seqnum=N was uploaded by a node which has seen at least one version
653 The 'servermap' (an instance of IMutableFileServerMap) is used to
654 describe the versions that are known to be present in the grid, and which
655 servers are hosting their shares. It is used to represent the 'state of
656 the world', and is used for this purpose by my test-and-set operations.
657 Downloading the contents of the mutable file will also return a
658 servermap. Uploading a new version into the mutable file requires a
659 servermap as input, and the semantics of the replace operation is
660 'replace the file with my new version if it looks like nobody else has
661 changed the file since my previous download'. Because the file is
662 distributed, this is not a perfect test-and-set operation, but it will do
663 its best. If the replace process sees evidence of a simultaneous write,
664 it will signal an UncoordinatedWriteError, so that the caller can take
668 Most readers will want to use the 'best' current version of the file, and
669 should use my 'download_best_version()' method.
671 To unconditionally replace the file, callers should use overwrite(). This
672 is the mode that user-visible mutable files will probably use.
674 To apply some delta to the file, call modify() with a callable modifier
675 function that can apply the modification that you want to make. This is
676 the mode that dirnodes will use, since most directory modification
677 operations can be expressed in terms of deltas to the directory state.
680 Three methods are available for users who need to perform more complex
681 operations. The first is get_servermap(), which returns an up-to-date
682 servermap using a specified mode. The second is download_version(), which
683 downloads a specific version (not necessarily the 'best' one). The third
684 is 'upload', which accepts new contents and a servermap (which must have
685 been updated with MODE_WRITE). The upload method will attempt to apply
686 the new contents as long as no other node has modified the file since the
687 servermap was updated. This might be useful to a caller who wants to
688 merge multiple versions into a single new one.
690 Note that each time the servermap is updated, a specific 'mode' is used,
691 which determines how many peers are queried. To use a servermap for my
692 replace() method, that servermap must have been updated in MODE_WRITE.
693 These modes are defined in allmydata.mutable.common, and consist of
694 MODE_READ, MODE_WRITE, MODE_ANYTHING, and MODE_CHECK. Please look in
695 allmydata/mutable/servermap.py for details about the differences.
697 Mutable files are currently limited in size (about 3.5MB max) and can
698 only be retrieved and updated all-at-once, as a single big string. Future
699 versions of our mutable files will remove this restriction.
702 def download_best_version():
703 """Download the 'best' available version of the file, meaning one of
704 the recoverable versions with the highest sequence number. If no
705 uncoordinated writes have occurred, and if enough shares are
706 available, then this will be the most recent version that has been
709 I update an internal servermap with MODE_READ, determine which
710 version of the file is indicated by
711 servermap.best_recoverable_version(), and return a Deferred that
712 fires with its contents. If no version is recoverable, the Deferred
713 will errback with UnrecoverableFileError.
716 def get_size_of_best_version():
717 """Find the size of the version that would be downloaded with
718 download_best_version(), without actually downloading the whole file.
720 I return a Deferred that fires with an integer.
723 def overwrite(new_contents):
724 """Unconditionally replace the contents of the mutable file with new
725 ones. This simply chains get_servermap(MODE_WRITE) and upload(). This
726 is only appropriate to use when the new contents of the file are
727 completely unrelated to the old ones, and you do not care about other
730 I return a Deferred that fires (with a PublishStatus object) when the
731 update has completed.
734 def modify(modifier_cb):
735 """Modify the contents of the file, by downloading the current
736 version, applying the modifier function (or bound method), then
737 uploading the new version. I return a Deferred that fires (with a
738 PublishStatus object) when the update is complete.
740 The modifier callable will be given three arguments: a string (with
741 the old contents), a 'first_time' boolean, and a servermap. As with
742 download_best_version(), the old contents will be from the best
743 recoverable version, but the modifier can use the servermap to make
744 other decisions (such as refusing to apply the delta if there are
745 multiple parallel versions, or if there is evidence of a newer
746 unrecoverable version). 'first_time' will be True the first time the
747 modifier is called, and False on any subsequent calls.
749 The callable should return a string with the new contents. The
750 callable must be prepared to be called multiple times, and must
751 examine the input string to see if the change that it wants to make
752 is already present in the old version. If it does not need to make
753 any changes, it can either return None, or return its input string.
755 If the modifier raises an exception, it will be returned in the
760 def get_servermap(mode):
761 """Return a Deferred that fires with an IMutableFileServerMap
762 instance, updated using the given mode.
765 def download_version(servermap, version):
766 """Download a specific version of the file, using the servermap
767 as a guide to where the shares are located.
769 I return a Deferred that fires with the requested contents, or
770 errbacks with UnrecoverableFileError. Note that a servermap which was
771 updated with MODE_ANYTHING or MODE_READ may not know about shares for
772 all versions (those modes stop querying servers as soon as they can
773 fulfil their goals), so you may want to use MODE_CHECK (which checks
774 everything) to get increased visibility.
777 def upload(new_contents, servermap):
778 """Replace the contents of the file with new ones. This requires a
779 servermap that was previously updated with MODE_WRITE.
781 I attempt to provide test-and-set semantics, in that I will avoid
782 modifying any share that is different than the version I saw in the
783 servermap. However, if another node is writing to the file at the
784 same time as me, I may manage to update some shares while they update
785 others. If I see any evidence of this, I will signal
786 UncoordinatedWriteError, and the file will be left in an inconsistent
787 state (possibly the version you provided, possibly the old version,
788 possibly somebody else's version, and possibly a mix of shares from
791 The recommended response to UncoordinatedWriteError is to either
792 return it to the caller (since they failed to coordinate their
793 writes), or to attempt some sort of recovery. It may be sufficient to
794 wait a random interval (with exponential backoff) and repeat your
795 operation. If I do not signal UncoordinatedWriteError, then I was
796 able to write the new version without incident.
798 I return a Deferred that fires (with a PublishStatus object) when the
799 publish has completed. I will update the servermap in-place with the
800 location of all new shares.
804 """Return this filenode's writekey, or None if the node does not have
805 write-capability. This may be used to assist with data structures
806 that need to make certain data available only to writers, such as the
807 read-write child caps in dirnodes. The recommended process is to have
808 reader-visible data be submitted to the filenode in the clear (where
809 it will be encrypted by the filenode using the readkey), but encrypt
810 writer-visible data using this writekey.
813 class NotEnoughSharesError(Exception):
814 """Download was unable to get enough shares"""
816 class NoSharesError(Exception):
817 """Download was unable to get any shares at all."""
819 class UploadUnhappinessError(Exception):
820 """Upload was unable to satisfy 'servers_of_happiness'"""
822 class UnableToFetchCriticalDownloadDataError(Exception):
823 """I was unable to fetch some piece of critical data which is supposed to
824 be identically present in all shares."""
826 class NoServersError(Exception):
827 """Upload wasn't given any servers to work with, usually indicating a
828 network or Introducer problem."""
830 class ExistingChildError(Exception):
831 """A directory node was asked to add or replace a child that already
832 exists, and overwrite= was set to False."""
834 class NoSuchChildError(Exception):
835 """A directory node was asked to fetch a child which does not exist."""
837 class ChildOfWrongTypeError(Exception):
838 """An operation was attempted on a child of the wrong type (file or directory)."""
840 class IDirectoryNode(IFilesystemNode):
841 """I represent a filesystem node that is a container, with a
842 name-to-child mapping, holding the tahoe equivalent of a directory. All
843 child names are unicode strings, and all children are some sort of
844 IFilesystemNode (a file, subdirectory, or unknown node).
849 The dirnode ('1') URI returned by this method can be used in
850 set_uri() on a different directory ('2') to 'mount' a reference to
851 this directory ('1') under the other ('2'). This URI is just a
852 string, so it can be passed around through email or other out-of-band
856 def get_readonly_uri():
858 The dirnode ('1') URI returned by this method can be used in
859 set_uri() on a different directory ('2') to 'mount' a reference to
860 this directory ('1') under the other ('2'). This URI is just a
861 string, so it can be passed around through email or other out-of-band
866 """I return a Deferred that fires with a dictionary mapping child
867 name (a unicode string) to (node, metadata_dict) tuples, in which
868 'node' is an IFilesystemNode and 'metadata_dict' is a dictionary of
872 """I return a Deferred that fires with a boolean, True if there
873 exists a child of the given name, False if not. The child name must
874 be a unicode string."""
877 """I return a Deferred that fires with a specific named child node,
878 which is an IFilesystemNode. The child name must be a unicode string.
879 I raise NoSuchChildError if I do not have a child by that name."""
881 def get_metadata_for(name):
882 """I return a Deferred that fires with the metadata dictionary for
883 a specific named child node. The child name must be a unicode string.
884 This metadata is stored in the *edge*, not in the child, so it is
885 attached to the parent dirnode rather than the child node.
886 I raise NoSuchChildError if I do not have a child by that name."""
888 def set_metadata_for(name, metadata):
889 """I replace any existing metadata for the named child with the new
890 metadata. The child name must be a unicode string. This metadata is
891 stored in the *edge*, not in the child, so it is attached to the
892 parent dirnode rather than the child node. I return a Deferred
893 (that fires with this dirnode) when the operation is complete.
894 I raise NoSuchChildError if I do not have a child by that name."""
896 def get_child_at_path(path):
897 """Transform a child path into an IFilesystemNode.
899 I perform a recursive series of 'get' operations to find the named
900 descendant node. I return a Deferred that fires with the node, or
901 errbacks with NoSuchChildError if the node could not be found.
903 The path can be either a single string (slash-separated) or a list of
904 path-name elements. All elements must be unicode strings.
907 def get_child_and_metadata_at_path(path):
908 """Transform a child path into an IFilesystemNode and metadata.
910 I am like get_child_at_path(), but my Deferred fires with a tuple of
911 (node, metadata). The metadata comes from the last edge. If the path
912 is empty, the metadata will be an empty dictionary.
915 def set_uri(name, writecap, readcap=None, metadata=None, overwrite=True):
916 """I add a child (by writecap+readcap) at the specific name. I return
917 a Deferred that fires when the operation finishes. If overwrite= is
918 True, I will replace any existing child of the same name, otherwise
919 an existing child will cause me to return ExistingChildError. The
920 child name must be a unicode string.
922 The child caps could be for a file, or for a directory. If you have
923 both the writecap and readcap, you should provide both arguments.
924 If you have only one cap and don't know whether it is read-only,
925 provide it as the writecap argument and leave the readcap as None.
926 If you have only one cap that is known to be read-only, provide it
927 as the readcap argument and leave the writecap as None.
928 The filecaps are typically obtained from an IFilesystemNode with
929 get_uri() and get_readonly_uri().
931 If metadata= is provided, I will use it as the metadata for the named
932 edge. This will replace any existing metadata. If metadata= is left
933 as the default value of None, I will set ['mtime'] to the current
934 time, and I will set ['ctime'] to the current time if there was not
935 already a child by this name present. This roughly matches the
936 ctime/mtime semantics of traditional filesystems. See the
937 "About the metadata" section of webapi.txt for futher information.
939 If this directory node is read-only, the Deferred will errback with a
940 NotWriteableError."""
942 def set_children(entries, overwrite=True):
943 """Add multiple children (by writecap+readcap) to a directory node.
944 Takes a dictionary, with childname as keys and (writecap, readcap)
945 tuples (or (writecap, readcap, metadata) triples) as values. Returns
946 a Deferred that fires (with this dirnode) when the operation
947 finishes. This is equivalent to calling set_uri() multiple times, but
948 is much more efficient. All child names must be unicode strings.
951 def set_node(name, child, metadata=None, overwrite=True):
952 """I add a child at the specific name. I return a Deferred that fires
953 when the operation finishes. This Deferred will fire with the child
954 node that was just added. I will replace any existing child of the
955 same name. The child name must be a unicode string. The 'child'
956 instance must be an instance providing IFilesystemNode.
958 If metadata= is provided, I will use it as the metadata for the named
959 edge. This will replace any existing metadata. If metadata= is left
960 as the default value of None, I will set ['mtime'] to the current
961 time, and I will set ['ctime'] to the current time if there was not
962 already a child by this name present. This roughly matches the
963 ctime/mtime semantics of traditional filesystems. See the
964 "About the metadata" section of webapi.txt for futher information.
966 If this directory node is read-only, the Deferred will errback with a
967 NotWriteableError."""
969 def set_nodes(entries, overwrite=True):
970 """Add multiple children to a directory node. Takes a dict mapping
971 unicode childname to (child_node, metdata) tuples. If metdata=None,
972 the original metadata is left unmodified. Returns a Deferred that
973 fires (with this dirnode) when the operation finishes. This is
974 equivalent to calling set_node() multiple times, but is much more
977 def add_file(name, uploadable, metadata=None, overwrite=True):
978 """I upload a file (using the given IUploadable), then attach the
979 resulting ImmutableFileNode to the directory at the given name. I set
980 metadata the same way as set_uri and set_node. The child name must be
983 I return a Deferred that fires (with the IFileNode of the uploaded
984 file) when the operation completes."""
986 def delete(name, must_exist=True, must_be_directory=False, must_be_file=False):
987 """I remove the child at the specific name. I return a Deferred that
988 fires when the operation finishes. The child name must be a unicode
989 string. If must_exist is True and I do not have a child by that name,
990 I raise NoSuchChildError. If must_be_directory is True and the child
991 is a file, or if must_be_file is True and the child is a directory,
992 I raise ChildOfWrongTypeError."""
994 def create_subdirectory(name, initial_children={}, overwrite=True, metadata=None):
995 """I create and attach a directory at the given name. The new
996 directory can be empty, or it can be populated with children
997 according to 'initial_children', which takes a dictionary in the same
998 format as set_nodes (i.e. mapping unicode child name to (childnode,
999 metadata) tuples). The child name must be a unicode string. I return
1000 a Deferred that fires (with the new directory node) when the
1001 operation finishes."""
1003 def move_child_to(current_child_name, new_parent, new_child_name=None,
1005 """I take one of my children and move them to a new parent. The child
1006 is referenced by name. On the new parent, the child will live under
1007 'new_child_name', which defaults to 'current_child_name'. TODO: what
1008 should we do about metadata? I return a Deferred that fires when the
1009 operation finishes. The child name must be a unicode string. I raise
1010 NoSuchChildError if I do not have a child by that name."""
1012 def build_manifest():
1013 """I generate a table of everything reachable from this directory.
1014 I also compute deep-stats as described below.
1016 I return a Monitor. The Monitor's results will be a dictionary with
1019 res['manifest']: a list of (path, cap) tuples for all nodes
1020 (directories and files) reachable from this one.
1021 'path' will be a tuple of unicode strings. The
1022 origin dirnode will be represented by an empty path
1024 res['verifycaps']: a list of (printable) verifycap strings, one for
1025 each reachable non-LIT node. This is a set:
1026 it will contain no duplicates.
1027 res['storage-index']: a list of (base32) storage index strings,
1028 one for each reachable non-LIT node. This is
1029 a set: it will contain no duplicates.
1030 res['stats']: a dictionary, the same that is generated by
1031 start_deep_stats() below.
1033 The Monitor will also have an .origin_si attribute with the (binary)
1034 storage index of the starting point.
1037 def start_deep_stats():
1038 """Return a Monitor, examining all nodes (directories and files)
1039 reachable from this one. The Monitor's results will be a dictionary
1040 with the following keys::
1042 count-immutable-files: count of how many CHK files are in the set
1043 count-mutable-files: same, for mutable files (does not include
1045 count-literal-files: same, for LIT files
1046 count-files: sum of the above three
1048 count-directories: count of directories
1050 size-immutable-files: total bytes for all CHK files in the set
1051 size-mutable-files (TODO): same, for current version of all mutable
1052 files, does not include directories
1053 size-literal-files: same, for LIT files
1054 size-directories: size of mutable files used by directories
1056 largest-directory: number of bytes in the largest directory
1057 largest-directory-children: number of children in the largest
1059 largest-immutable-file: number of bytes in the largest CHK file
1061 size-mutable-files is not yet implemented, because it would involve
1062 even more queries than deep_stats does.
1064 The Monitor will also have an .origin_si attribute with the (binary)
1065 storage index of the starting point.
1067 This operation will visit every directory node underneath this one,
1068 and can take a long time to run. On a typical workstation with good
1069 bandwidth, this can examine roughly 15 directories per second (and
1070 takes several minutes of 100% CPU for ~1700 directories).
1073 class ICodecEncoder(Interface):
1074 def set_params(data_size, required_shares, max_shares):
1075 """Set up the parameters of this encoder.
1077 This prepares the encoder to perform an operation that converts a
1078 single block of data into a number of shares, such that a future
1079 ICodecDecoder can use a subset of these shares to recover the
1080 original data. This operation is invoked by calling encode(). Once
1081 the encoding parameters are set up, the encode operation can be
1082 invoked multiple times.
1084 set_params() prepares the encoder to accept blocks of input data that
1085 are exactly 'data_size' bytes in length. The encoder will be prepared
1086 to produce 'max_shares' shares for each encode() operation (although
1087 see the 'desired_share_ids' to use less CPU). The encoding math will
1088 be chosen such that the decoder can get by with as few as
1089 'required_shares' of these shares and still reproduce the original
1090 data. For example, set_params(1000, 5, 5) offers no redundancy at
1091 all, whereas set_params(1000, 1, 10) provides 10x redundancy.
1093 Numerical Restrictions: 'data_size' is required to be an integral
1094 multiple of 'required_shares'. In general, the caller should choose
1095 required_shares and max_shares based upon their reliability
1096 requirements and the number of peers available (the total storage
1097 space used is roughly equal to max_shares*data_size/required_shares),
1098 then choose data_size to achieve the memory footprint desired (larger
1099 data_size means more efficient operation, smaller data_size means
1100 smaller memory footprint).
1102 In addition, 'max_shares' must be equal to or greater than
1103 'required_shares'. Of course, setting them to be equal causes
1104 encode() to degenerate into a particularly slow form of the 'split'
1107 See encode() for more details about how these parameters are used.
1109 set_params() must be called before any other ICodecEncoder methods
1114 """Return the 3-tuple of data_size, required_shares, max_shares"""
1116 def get_encoder_type():
1117 """Return a short string that describes the type of this encoder.
1119 There is required to be a global table of encoder classes. This method
1120 returns an index into this table; the value at this index is an
1121 encoder class, and this encoder is an instance of that class.
1124 def get_block_size():
1125 """Return the length of the shares that encode() will produce.
1128 def encode_proposal(data, desired_share_ids=None):
1129 """Encode some data.
1131 'data' must be a string (or other buffer object), and len(data) must
1132 be equal to the 'data_size' value passed earlier to set_params().
1134 This will return a Deferred that will fire with two lists. The first
1135 is a list of shares, each of which is a string (or other buffer
1136 object) such that len(share) is the same as what get_share_size()
1137 returned earlier. The second is a list of shareids, in which each is
1138 an integer. The lengths of the two lists will always be equal to each
1139 other. The user should take care to keep each share closely
1140 associated with its shareid, as one is useless without the other.
1142 The length of this output list will normally be the same as the value
1143 provided to the 'max_shares' parameter of set_params(). This may be
1144 different if 'desired_share_ids' is provided.
1146 'desired_share_ids', if provided, is required to be a sequence of
1147 ints, each of which is required to be >= 0 and < max_shares. If not
1148 provided, encode() will produce 'max_shares' shares, as if
1149 'desired_share_ids' were set to range(max_shares). You might use this
1150 if you initially thought you were going to use 10 peers, started
1151 encoding, and then two of the peers dropped out: you could use
1152 desired_share_ids= to skip the work (both memory and CPU) of
1153 producing shares for the peers which are no longer available.
1157 def encode(inshares, desired_share_ids=None):
1158 """Encode some data. This may be called multiple times. Each call is
1161 inshares is a sequence of length required_shares, containing buffers
1162 (i.e. strings), where each buffer contains the next contiguous
1163 non-overlapping segment of the input data. Each buffer is required to
1164 be the same length, and the sum of the lengths of the buffers is
1165 required to be exactly the data_size promised by set_params(). (This
1166 implies that the data has to be padded before being passed to
1167 encode(), unless of course it already happens to be an even multiple
1168 of required_shares in length.)
1170 Note: the requirement to break up your data into
1171 'required_shares' chunks of exactly the right length before
1172 calling encode() is surprising from point of view of a user
1173 who doesn't know how FEC works. It feels like an
1174 implementation detail that has leaked outside the abstraction
1175 barrier. Is there a use case in which the data to be encoded
1176 might already be available in pre-segmented chunks, such that
1177 it is faster or less work to make encode() take a list rather
1178 than splitting a single string?
1180 Yes, there is: suppose you are uploading a file with K=64,
1181 N=128, segsize=262,144. Then each in-share will be of size
1182 4096. If you use this .encode() API then your code could first
1183 read each successive 4096-byte chunk from the file and store
1184 each one in a Python string and store each such Python string
1185 in a Python list. Then you could call .encode(), passing that
1186 list as "inshares". The encoder would generate the other 64
1187 "secondary shares" and return to you a new list containing
1188 references to the same 64 Python strings that you passed in
1189 (as the primary shares) plus references to the new 64 Python
1192 (You could even imagine that your code could use readv() so
1193 that the operating system can arrange to get all of those
1194 bytes copied from the file into the Python list of Python
1195 strings as efficiently as possible instead of having a loop
1196 written in C or in Python to copy the next part of the file
1197 into the next string.)
1199 On the other hand if you instead use the .encode_proposal()
1200 API (above), then your code can first read in all of the
1201 262,144 bytes of the segment from the file into a Python
1202 string, then call .encode_proposal() passing the segment data
1203 as the "data" argument. The encoder would basically first
1204 split the "data" argument into a list of 64 in-shares of 4096
1205 byte each, and then do the same thing that .encode() does. So
1206 this would result in a little bit more copying of data and a
1207 little bit higher of a "maximum memory usage" during the
1208 process, although it might or might not make a practical
1209 difference for our current use cases.
1211 Note that "inshares" is a strange name for the parameter if
1212 you think of the parameter as being just for feeding in data
1213 to the codec. It makes more sense if you think of the result
1214 of this encoding as being the set of shares from inshares plus
1215 an extra set of "secondary shares" (or "check shares"). It is
1216 a surprising name! If the API is going to be surprising then
1217 the name should be surprising. If we switch to
1218 encode_proposal() above then we should also switch to an
1221 'desired_share_ids', if provided, is required to be a sequence of
1222 ints, each of which is required to be >= 0 and < max_shares. If not
1223 provided, encode() will produce 'max_shares' shares, as if
1224 'desired_share_ids' were set to range(max_shares). You might use this
1225 if you initially thought you were going to use 10 peers, started
1226 encoding, and then two of the peers dropped out: you could use
1227 desired_share_ids= to skip the work (both memory and CPU) of
1228 producing shares for the peers which are no longer available.
1230 For each call, encode() will return a Deferred that fires with two
1231 lists, one containing shares and the other containing the shareids.
1232 The get_share_size() method can be used to determine the length of
1233 the share strings returned by encode(). Each shareid is a small
1234 integer, exactly as passed into 'desired_share_ids' (or
1235 range(max_shares), if desired_share_ids was not provided).
1237 The shares and their corresponding shareids are required to be kept
1238 together during storage and retrieval. Specifically, the share data is
1239 useless by itself: the decoder needs to be told which share is which
1240 by providing it with both the shareid and the actual share data.
1242 This function will allocate an amount of memory roughly equal to::
1244 (max_shares - required_shares) * get_share_size()
1246 When combined with the memory that the caller must allocate to
1247 provide the input data, this leads to a memory footprint roughly
1248 equal to the size of the resulting encoded shares (i.e. the expansion
1249 factor times the size of the input segment).
1254 # returning a list of (shareidN,shareN) tuples instead of a pair of
1255 # lists (shareids..,shares..). Brian thought the tuples would
1256 # encourage users to keep the share and shareid together throughout
1257 # later processing, Zooko pointed out that the code to iterate
1258 # through two lists is not really more complicated than using a list
1259 # of tuples and there's also a performance improvement
1261 # having 'data_size' not required to be an integral multiple of
1262 # 'required_shares'. Doing this would require encode() to perform
1263 # padding internally, and we'd prefer to have any padding be done
1264 # explicitly by the caller. Yes, it is an abstraction leak, but
1265 # hopefully not an onerous one.
1268 class ICodecDecoder(Interface):
1269 def set_params(data_size, required_shares, max_shares):
1270 """Set the params. They have to be exactly the same ones that were
1271 used for encoding."""
1273 def get_needed_shares():
1274 """Return the number of shares needed to reconstruct the data.
1275 set_params() is required to be called before this."""
1277 def decode(some_shares, their_shareids):
1278 """Decode a partial list of shares into data.
1280 'some_shares' is required to be a sequence of buffers of sharedata, a
1281 subset of the shares returned by ICodecEncode.encode(). Each share is
1282 required to be of the same length. The i'th element of their_shareids
1283 is required to be the shareid of the i'th buffer in some_shares.
1285 This returns a Deferred which fires with a sequence of buffers. This
1286 sequence will contain all of the segments of the original data, in
1287 order. The sum of the lengths of all of the buffers will be the
1288 'data_size' value passed into the original ICodecEncode.set_params()
1289 call. To get back the single original input block of data, use
1290 ''.join(output_buffers), or you may wish to simply write them in
1291 order to an output file.
1293 Note that some of the elements in the result sequence may be
1294 references to the elements of the some_shares input sequence. In
1295 particular, this means that if those share objects are mutable (e.g.
1296 arrays) and if they are changed, then both the input (the
1297 'some_shares' parameter) and the output (the value given when the
1298 deferred is triggered) will change.
1300 The length of 'some_shares' is required to be exactly the value of
1301 'required_shares' passed into the original ICodecEncode.set_params()
1305 class IEncoder(Interface):
1306 """I take an object that provides IEncryptedUploadable, which provides
1307 encrypted data, and a list of shareholders. I then encode, hash, and
1308 deliver shares to those shareholders. I will compute all the necessary
1309 Merkle hash trees that are necessary to validate the crypttext that
1310 eventually comes back from the shareholders. I provide the URI Extension
1311 Block Hash, and the encoding parameters, both of which must be included
1314 I do not choose shareholders, that is left to the IUploader. I must be
1315 given a dict of RemoteReferences to storage buckets that are ready and
1316 willing to receive data.
1320 """Specify the number of bytes that will be encoded. This must be
1321 peformed before get_serialized_params() can be called.
1323 def set_params(params):
1324 """Override the default encoding parameters. 'params' is a tuple of
1325 (k,d,n), where 'k' is the number of required shares, 'd' is the
1326 servers_of_happiness, and 'n' is the total number of shares that will
1329 Encoding parameters can be set in three ways. 1: The Encoder class
1330 provides defaults (3/7/10). 2: the Encoder can be constructed with
1331 an 'options' dictionary, in which the
1332 needed_and_happy_and_total_shares' key can be a (k,d,n) tuple. 3:
1333 set_params((k,d,n)) can be called.
1335 If you intend to use set_params(), you must call it before
1336 get_share_size or get_param are called.
1339 def set_encrypted_uploadable(u):
1340 """Provide a source of encrypted upload data. 'u' must implement
1341 IEncryptedUploadable.
1343 When this is called, the IEncryptedUploadable will be queried for its
1344 length and the storage_index that should be used.
1346 This returns a Deferred that fires with this Encoder instance.
1348 This must be performed before start() can be called.
1351 def get_param(name):
1352 """Return an encoding parameter, by name.
1354 'storage_index': return a string with the (16-byte truncated SHA-256
1355 hash) storage index to which these shares should be
1358 'share_counts': return a tuple describing how many shares are used:
1359 (needed_shares, servers_of_happiness, total_shares)
1361 'num_segments': return an int with the number of segments that
1364 'segment_size': return an int with the size of each segment.
1366 'block_size': return the size of the individual blocks that will
1367 be delivered to a shareholder's put_block() method. By
1368 knowing this, the shareholder will be able to keep all
1369 blocks in a single file and still provide random access
1370 when reading them. # TODO: can we avoid exposing this?
1372 'share_size': an int with the size of the data that will be stored
1373 on each shareholder. This is aggregate amount of data
1374 that will be sent to the shareholder, summed over all
1375 the put_block() calls I will ever make. It is useful to
1376 determine this size before asking potential
1377 shareholders whether they will grant a lease or not,
1378 since their answers will depend upon how much space we
1379 need. TODO: this might also include some amount of
1380 overhead, like the size of all the hashes. We need to
1381 decide whether this is useful or not.
1383 'serialized_params': a string with a concise description of the
1384 codec name and its parameters. This may be passed
1385 into the IUploadable to let it make sure that
1386 the same file encoded with different parameters
1387 will result in different storage indexes.
1389 Once this is called, set_size() and set_params() may not be called.
1392 def set_shareholders(shareholders, servermap):
1393 """Tell the encoder where to put the encoded shares. 'shareholders'
1394 must be a dictionary that maps share number (an integer ranging from
1395 0 to n-1) to an instance that provides IStorageBucketWriter.
1396 'servermap' is a dictionary that maps share number (as defined above)
1397 to a set of peerids. This must be performed before start() can be
1401 """Begin the encode/upload process. This involves reading encrypted
1402 data from the IEncryptedUploadable, encoding it, uploading the shares
1403 to the shareholders, then sending the hash trees.
1405 set_encrypted_uploadable() and set_shareholders() must be called
1406 before this can be invoked.
1408 This returns a Deferred that fires with a verify cap when the upload
1409 process is complete. The verifycap, plus the encryption key, is
1410 sufficient to construct the read cap.
1413 class IDecoder(Interface):
1414 """I take a list of shareholders and some setup information, then
1415 download, validate, decode, and decrypt data from them, writing the
1416 results to an output file.
1418 I do not locate the shareholders, that is left to the IDownloader. I must
1419 be given a dict of RemoteReferences to storage buckets that are ready to
1424 """I take a file-like object (providing write and close) to which all
1425 the plaintext data will be written.
1427 TODO: producer/consumer . Maybe write() should return a Deferred that
1428 indicates when it will accept more data? But probably having the
1429 IDecoder be a producer is easier to glue to IConsumer pieces.
1432 def set_shareholders(shareholders):
1433 """I take a dictionary that maps share identifiers (small integers)
1434 to RemoteReferences that provide RIBucketReader. This must be called
1438 """I start the download. This process involves retrieving data and
1439 hash chains from the shareholders, using the hashes to validate the
1440 data, decoding the shares into segments, decrypting the segments,
1441 then writing the resulting plaintext to the output file.
1443 I return a Deferred that will fire (with self) when the download is
1447 class IDownloadTarget(Interface):
1448 # Note that if the IDownloadTarget is also an IConsumer, the downloader
1449 # will register itself as a producer. This allows the target to invoke
1450 # downloader.pauseProducing, resumeProducing, and stopProducing.
1452 """Called before any calls to write() or close(). If an error
1453 occurs before any data is available, fail() may be called without
1454 a previous call to open().
1456 'size' is the length of the file being downloaded, in bytes."""
1459 """Output some data to the target."""
1461 """Inform the target that there is no more data to be written."""
1463 """fail() is called to indicate that the download has failed. 'why'
1464 is a Failure object indicating what went wrong. No further methods
1465 will be invoked on the IDownloadTarget after fail()."""
1466 def register_canceller(cb):
1467 """The CiphertextDownloader uses this to register a no-argument function
1468 that the target can call to cancel the download. Once this canceller
1469 is invoked, no further calls to write() or close() will be made."""
1471 """When the CiphertextDownloader is done, this finish() function will be
1472 called. Whatever it returns will be returned to the invoker of
1473 Downloader.download.
1476 class IDownloader(Interface):
1477 def download(uri, target):
1478 """Perform a CHK download, sending the data to the given target.
1479 'target' must provide IDownloadTarget.
1481 Returns a Deferred that fires (with the results of target.finish)
1482 when the download is finished, or errbacks if something went wrong."""
1484 class IEncryptedUploadable(Interface):
1485 def set_upload_status(upload_status):
1486 """Provide an IUploadStatus object that should be filled with status
1487 information. The IEncryptedUploadable is responsible for setting
1488 key-determination progress ('chk'), size, storage_index, and
1489 ciphertext-fetch progress. It may delegate some of this
1490 responsibility to others, in particular to the IUploadable."""
1493 """This behaves just like IUploadable.get_size()."""
1495 def get_all_encoding_parameters():
1496 """Return a Deferred that fires with a tuple of
1497 (k,happy,n,segment_size). The segment_size will be used as-is, and
1498 must match the following constraints: it must be a multiple of k, and
1499 it shouldn't be unreasonably larger than the file size (if
1500 segment_size is larger than filesize, the difference must be stored
1503 This usually passes through to the IUploadable method of the same
1506 The encoder strictly obeys the values returned by this method. To
1507 make an upload use non-default encoding parameters, you must arrange
1508 to control the values that this method returns.
1511 def get_storage_index():
1512 """Return a Deferred that fires with a 16-byte storage index.
1515 def read_encrypted(length, hash_only):
1516 """This behaves just like IUploadable.read(), but returns crypttext
1517 instead of plaintext. If hash_only is True, then this discards the
1518 data (and returns an empty list); this improves efficiency when
1519 resuming an interrupted upload (where we need to compute the
1520 plaintext hashes, but don't need the redundant encrypted data)."""
1522 def get_plaintext_hashtree_leaves(first, last, num_segments):
1523 """OBSOLETE; Get the leaf nodes of a merkle hash tree over the
1524 plaintext segments, i.e. get the tagged hashes of the given segments.
1525 The segment size is expected to be generated by the
1526 IEncryptedUploadable before any plaintext is read or ciphertext
1527 produced, so that the segment hashes can be generated with only a
1530 This returns a Deferred which fires with a sequence of hashes, using:
1532 tuple(segment_hashes[first:last])
1534 'num_segments' is used to assert that the number of segments that the
1535 IEncryptedUploadable handled matches the number of segments that the
1536 encoder was expecting.
1538 This method must not be called until the final byte has been read
1539 from read_encrypted(). Once this method is called, read_encrypted()
1540 can never be called again.
1543 def get_plaintext_hash():
1544 """OBSOLETE; Get the hash of the whole plaintext.
1546 This returns a Deferred which fires with a tagged SHA-256 hash of the
1547 whole plaintext, obtained from hashutil.plaintext_hash(data).
1551 """Just like IUploadable.close()."""
1553 class IUploadable(Interface):
1554 def set_upload_status(upload_status):
1555 """Provide an IUploadStatus object that should be filled with status
1556 information. The IUploadable is responsible for setting
1557 key-determination progress ('chk')."""
1559 def set_default_encoding_parameters(params):
1560 """Set the default encoding parameters, which must be a dict mapping
1561 strings to ints. The meaningful keys are 'k', 'happy', 'n', and
1562 'max_segment_size'. These might have an influence on the final
1563 encoding parameters returned by get_all_encoding_parameters(), if the
1564 Uploadable doesn't have more specific preferences.
1566 This call is optional: if it is not used, the Uploadable will use
1567 some built-in defaults. If used, this method must be called before
1568 any other IUploadable methods to have any effect.
1572 """Return a Deferred that will fire with the length of the data to be
1573 uploaded, in bytes. This will be called before the data is actually
1574 used, to compute encoding parameters.
1577 def get_all_encoding_parameters():
1578 """Return a Deferred that fires with a tuple of
1579 (k,happy,n,segment_size). The segment_size will be used as-is, and
1580 must match the following constraints: it must be a multiple of k, and
1581 it shouldn't be unreasonably larger than the file size (if
1582 segment_size is larger than filesize, the difference must be stored
1585 The relative values of k and n allow some IUploadables to request
1586 better redundancy than others (in exchange for consuming more space
1589 Larger values of segment_size reduce hash overhead, while smaller
1590 values reduce memory footprint and cause data to be delivered in
1591 smaller pieces (which may provide a smoother and more predictable
1592 download experience).
1594 The encoder strictly obeys the values returned by this method. To
1595 make an upload use non-default encoding parameters, you must arrange
1596 to control the values that this method returns. One way to influence
1597 them may be to call set_encoding_parameters() before calling
1598 get_all_encoding_parameters().
1601 def get_encryption_key():
1602 """Return a Deferred that fires with a 16-byte AES key. This key will
1603 be used to encrypt the data. The key will also be hashed to derive
1606 Uploadables which want to achieve convergence should hash their file
1607 contents and the serialized_encoding_parameters to form the key
1608 (which of course requires a full pass over the data). Uploadables can
1609 use the upload.ConvergentUploadMixin class to achieve this
1612 Uploadables which do not care about convergence (or do not wish to
1613 make multiple passes over the data) can simply return a
1614 strongly-random 16 byte string.
1616 get_encryption_key() may be called multiple times: the IUploadable is
1617 required to return the same value each time.
1621 """Return a Deferred that fires with a list of strings (perhaps with
1622 only a single element) which, when concatenated together, contain the
1623 next 'length' bytes of data. If EOF is near, this may provide fewer
1624 than 'length' bytes. The total number of bytes provided by read()
1625 before it signals EOF must equal the size provided by get_size().
1627 If the data must be acquired through multiple internal read
1628 operations, returning a list instead of a single string may help to
1629 reduce string copies. However, the length of the concatenated strings
1630 must equal the amount of data requested, unless EOF is encountered.
1631 Long reads, or short reads without EOF, are not allowed. read()
1632 should return the same amount of data as a local disk file read, just
1633 in a different shape and asynchronously.
1635 'length' will typically be equal to (min(get_size(),1MB)/req_shares),
1636 so a 10kB file means length=3kB, 100kB file means length=30kB,
1637 and >=1MB file means length=300kB.
1639 This method provides for a single full pass through the data. Later
1640 use cases may desire multiple passes or access to only parts of the
1641 data (such as a mutable file making small edits-in-place). This API
1642 will be expanded once those use cases are better understood.
1646 """The upload is finished, and whatever filehandle was in use may be
1649 class IUploadResults(Interface):
1650 """I am returned by upload() methods. I contain a number of public
1651 attributes which can be read to determine the results of the upload. Some
1652 of these are functional, some are timing information. All of these may be
1655 .file_size : the size of the file, in bytes
1656 .uri : the CHK read-cap for the file
1657 .ciphertext_fetched : how many bytes were fetched by the helper
1658 .sharemap: dict mapping share identifier to set of serverids
1659 (binary strings). This indicates which servers were given
1660 which shares. For immutable files, the shareid is an
1661 integer (the share number, from 0 to N-1). For mutable
1662 files, it is a string of the form 'seq%d-%s-sh%d',
1663 containing the sequence number, the roothash, and the
1665 .servermap : dict mapping server peerid to a set of share numbers
1666 .timings : dict of timing information, mapping name to seconds (float)
1667 total : total upload time, start to finish
1668 storage_index : time to compute the storage index
1669 peer_selection : time to decide which peers will be used
1670 contacting_helper : initial helper query to upload/no-upload decision
1671 existence_check : helper pre-upload existence check
1672 helper_total : initial helper query to helper finished pushing
1673 cumulative_fetch : helper waiting for ciphertext requests
1674 total_fetch : helper start to last ciphertext response
1675 cumulative_encoding : just time spent in zfec
1676 cumulative_sending : just time spent waiting for storage servers
1677 hashes_and_close : last segment push to shareholder close
1678 total_encode_and_push : first encode to shareholder close
1682 class IDownloadResults(Interface):
1683 """I am created internally by download() methods. I contain a number of
1684 public attributes which contain details about the download process.::
1686 .file_size : the size of the file, in bytes
1687 .servers_used : set of server peerids that were used during download
1688 .server_problems : dict mapping server peerid to a problem string. Only
1689 servers that had problems (bad hashes, disconnects)
1691 .servermap : dict mapping server peerid to a set of share numbers. Only
1692 servers that had any shares are listed here.
1693 .timings : dict of timing information, mapping name to seconds (float)
1694 peer_selection : time to ask servers about shares
1695 servers_peer_selection : dict of peerid to DYHB-query time
1696 uri_extension : time to fetch a copy of the URI extension block
1697 hashtrees : time to fetch the hash trees
1698 segments : time to fetch, decode, and deliver segments
1699 cumulative_fetch : time spent waiting for storage servers
1700 cumulative_decode : just time spent in zfec
1701 cumulative_decrypt : just time spent in decryption
1702 total : total download time, start to finish
1703 fetch_per_server : dict of peerid to list of per-segment fetch times
1707 class IUploader(Interface):
1708 def upload(uploadable):
1709 """Upload the file. 'uploadable' must impement IUploadable. This
1710 returns a Deferred which fires with an IUploadResults instance, from
1711 which the URI of the file can be obtained as results.uri ."""
1713 def upload_ssk(write_capability, new_version, uploadable):
1714 """TODO: how should this work?"""
1716 class ICheckable(Interface):
1717 def check(monitor, verify=False, add_lease=False):
1718 """Check upon my health, optionally repairing any problems.
1720 This returns a Deferred that fires with an instance that provides
1721 ICheckResults, or None if the object is non-distributed (i.e. LIT
1724 The monitor will be checked periodically to see if the operation has
1725 been cancelled. If so, no new queries will be sent, and the Deferred
1726 will fire (with a OperationCancelledError) immediately.
1728 Filenodes and dirnodes (which provide IFilesystemNode) are also
1729 checkable. Instances that represent verifier-caps will be checkable
1730 but not downloadable. Some objects (like LIT files) do not actually
1731 live in the grid, and their checkers return None (non-distributed
1732 files are always healthy).
1734 If verify=False, a relatively lightweight check will be performed: I
1735 will ask all servers if they have a share for me, and I will believe
1736 whatever they say. If there are at least N distinct shares on the
1737 grid, my results will indicate r.is_healthy()==True. This requires a
1738 roundtrip to each server, but does not transfer very much data, so
1739 the network bandwidth is fairly low.
1741 If verify=True, a more resource-intensive check will be performed:
1742 every share will be downloaded, and the hashes will be validated on
1743 every bit. I will ignore any shares that failed their hash checks. If
1744 there are at least N distinct valid shares on the grid, my results
1745 will indicate r.is_healthy()==True. This requires N/k times as much
1746 download bandwidth (and server disk IO) as a regular download. If a
1747 storage server is holding a corrupt share, or is experiencing memory
1748 failures during retrieval, or is malicious or buggy, then
1749 verification will detect the problem, but checking will not.
1751 If add_lease=True, I will ensure that an up-to-date lease is present
1752 on each share. The lease secrets will be derived from by node secret
1753 (in BASEDIR/private/secret), so either I will add a new lease to the
1754 share, or I will merely renew the lease that I already had. In a
1755 future version of the storage-server protocol (once Accounting has
1756 been implemented), there may be additional options here to define the
1757 kind of lease that is obtained (which account number to claim, etc).
1759 TODO: any problems seen during checking will be reported to the
1760 health-manager.furl, a centralized object which is responsible for
1761 figuring out why files are unhealthy so corrective action can be
1765 def check_and_repair(monitor, verify=False, add_lease=False):
1766 """Like check(), but if the file/directory is not healthy, attempt to
1769 Any non-healthy result will cause an immediate repair operation, to
1770 generate and upload new shares. After repair, the file will be as
1771 healthy as we can make it. Details about what sort of repair is done
1772 will be put in the check-and-repair results. The Deferred will not
1773 fire until the repair is complete.
1775 This returns a Deferred which fires with an instance of
1776 ICheckAndRepairResults."""
1778 class IDeepCheckable(Interface):
1779 def start_deep_check(verify=False, add_lease=False):
1780 """Check upon the health of me and everything I can reach.
1782 This is a recursive form of check(), useable only on dirnodes.
1784 I return a Monitor, with results that are an IDeepCheckResults
1787 TODO: If any of the directories I traverse are unrecoverable, the
1788 Monitor will report failure. If any of the files I check upon are
1789 unrecoverable, those problems will be reported in the
1790 IDeepCheckResults as usual, and the Monitor will not report a
1794 def start_deep_check_and_repair(verify=False, add_lease=False):
1795 """Check upon the health of me and everything I can reach. Repair
1796 anything that isn't healthy.
1798 This is a recursive form of check_and_repair(), useable only on
1801 I return a Monitor, with results that are an
1802 IDeepCheckAndRepairResults object.
1804 TODO: If any of the directories I traverse are unrecoverable, the
1805 Monitor will report failure. If any of the files I check upon are
1806 unrecoverable, those problems will be reported in the
1807 IDeepCheckResults as usual, and the Monitor will not report a
1811 class ICheckResults(Interface):
1812 """I contain the detailed results of a check/verify operation.
1815 def get_storage_index():
1816 """Return a string with the (binary) storage index."""
1817 def get_storage_index_string():
1818 """Return a string with the (printable) abbreviated storage index."""
1820 """Return the (string) URI of the object that was checked."""
1823 """Return a boolean, True if the file/dir is fully healthy, False if
1824 it is damaged in any way. Non-distributed LIT files always return
1827 def is_recoverable():
1828 """Return a boolean, True if the file/dir can be recovered, False if
1829 not. Unrecoverable files are obviously unhealthy. Non-distributed LIT
1830 files always return True."""
1832 def needs_rebalancing():
1833 """Return a boolean, True if the file/dir's reliability could be
1834 improved by moving shares to new servers. Non-distributed LIT files
1835 always return False."""
1839 """Return a dictionary that describes the state of the file/dir. LIT
1840 files always return an empty dictionary. Normal files and directories
1841 return a dictionary with the following keys (note that these use
1842 binary strings rather than base32-encoded ones) (also note that for
1843 mutable files, these counts are for the 'best' version):
1845 count-shares-good: the number of distinct good shares that were found
1846 count-shares-needed: 'k', the number of shares required for recovery
1847 count-shares-expected: 'N', the number of total shares generated
1848 count-good-share-hosts: the number of distinct storage servers with
1849 good shares. If this number is less than
1850 count-shares-good, then some shares are
1851 doubled up, increasing the correlation of
1852 failures. This indicates that one or more
1853 shares should be moved to an otherwise unused
1854 server, if one is available.
1855 count-corrupt-shares: the number of shares with integrity failures
1856 list-corrupt-shares: a list of 'share locators', one for each share
1857 that was found to be corrupt. Each share
1858 locator is a list of (serverid, storage_index,
1860 count-incompatible-shares: the number of shares which are of a share
1861 format unknown to this checker
1862 list-incompatible-shares: a list of 'share locators', one for each
1863 share that was found to be of an unknown
1864 format. Each share locator is a list of
1865 (serverid, storage_index, sharenum).
1866 servers-responding: list of (binary) storage server identifiers,
1867 one for each server which responded to the share
1868 query (even if they said they didn't have
1869 shares, and even if they said they did have
1870 shares but then didn't send them when asked, or
1871 dropped the connection, or returned a Failure,
1872 and even if they said they did have shares and
1873 sent incorrect ones when asked)
1874 sharemap: dict mapping share identifier to list of serverids
1875 (binary strings). This indicates which servers are holding
1876 which shares. For immutable files, the shareid is an
1877 integer (the share number, from 0 to N-1). For mutable
1878 files, it is a string of the form 'seq%d-%s-sh%d',
1879 containing the sequence number, the roothash, and the
1882 The following keys are most relevant for mutable files, but immutable
1883 files will provide sensible values too::
1885 count-wrong-shares: the number of shares for versions other than the
1886 'best' one (which is defined as being the
1887 recoverable version with the highest sequence
1888 number, then the highest roothash). These are
1889 either leftover shares from an older version
1890 (perhaps on a server that was offline when an
1891 update occurred), shares from an unrecoverable
1892 newer version, or shares from an alternate
1893 current version that results from an
1894 uncoordinated write collision. For a healthy
1895 file, this will equal 0.
1897 count-recoverable-versions: the number of recoverable versions of
1898 the file. For a healthy file, this will
1901 count-unrecoverable-versions: the number of unrecoverable versions
1902 of the file. For a healthy file, this
1908 """Return a string with a brief (one-line) summary of the results."""
1911 """Return a list of strings with more detailed results."""
1913 class ICheckAndRepairResults(Interface):
1914 """I contain the detailed results of a check/verify/repair operation.
1916 The IFilesystemNode.check()/verify()/repair() methods all return
1917 instances that provide ICheckAndRepairResults.
1920 def get_storage_index():
1921 """Return a string with the (binary) storage index."""
1922 def get_storage_index_string():
1923 """Return a string with the (printable) abbreviated storage index."""
1924 def get_repair_attempted():
1925 """Return a boolean, True if a repair was attempted. We might not
1926 attempt to repair the file because it was healthy, or healthy enough
1927 (i.e. some shares were missing but not enough to exceed some
1928 threshold), or because we don't know how to repair this object."""
1929 def get_repair_successful():
1930 """Return a boolean, True if repair was attempted and the file/dir
1931 was fully healthy afterwards. False if no repair was attempted or if
1932 a repair attempt failed."""
1933 def get_pre_repair_results():
1934 """Return an ICheckResults instance that describes the state of the
1935 file/dir before any repair was attempted."""
1936 def get_post_repair_results():
1937 """Return an ICheckResults instance that describes the state of the
1938 file/dir after any repair was attempted. If no repair was attempted,
1939 the pre-repair and post-repair results will be identical."""
1942 class IDeepCheckResults(Interface):
1943 """I contain the results of a deep-check operation.
1945 This is returned by a call to ICheckable.deep_check().
1948 def get_root_storage_index_string():
1949 """Return the storage index (abbreviated human-readable string) of
1950 the first object checked."""
1952 """Return a dictionary with the following keys::
1954 count-objects-checked: count of how many objects were checked
1955 count-objects-healthy: how many of those objects were completely
1957 count-objects-unhealthy: how many were damaged in some way
1958 count-objects-unrecoverable: how many were unrecoverable
1959 count-corrupt-shares: how many shares were found to have
1960 corruption, summed over all objects
1964 def get_corrupt_shares():
1965 """Return a set of (serverid, storage_index, sharenum) for all shares
1966 that were found to be corrupt. Both serverid and storage_index are
1969 def get_all_results():
1970 """Return a dictionary mapping pathname (a tuple of strings, ready to
1971 be slash-joined) to an ICheckResults instance, one for each object
1972 that was checked."""
1974 def get_results_for_storage_index(storage_index):
1975 """Retrive the ICheckResults instance for the given (binary)
1976 storage index. Raises KeyError if there are no results for that
1980 """Return a dictionary with the same keys as
1981 IDirectoryNode.deep_stats()."""
1983 class IDeepCheckAndRepairResults(Interface):
1984 """I contain the results of a deep-check-and-repair operation.
1986 This is returned by a call to ICheckable.deep_check_and_repair().
1989 def get_root_storage_index_string():
1990 """Return the storage index (abbreviated human-readable string) of
1991 the first object checked."""
1993 """Return a dictionary with the following keys::
1995 count-objects-checked: count of how many objects were checked
1996 count-objects-healthy-pre-repair: how many of those objects were
1997 completely healthy (before any
1999 count-objects-unhealthy-pre-repair: how many were damaged in
2001 count-objects-unrecoverable-pre-repair: how many were unrecoverable
2002 count-objects-healthy-post-repair: how many of those objects were
2003 completely healthy (after any
2005 count-objects-unhealthy-post-repair: how many were damaged in
2007 count-objects-unrecoverable-post-repair: how many were
2009 count-repairs-attempted: repairs were attempted on this many
2010 objects. The count-repairs- keys will
2011 always be provided, however unless
2012 repair=true is present, they will all
2014 count-repairs-successful: how many repairs resulted in healthy
2016 count-repairs-unsuccessful: how many repairs resulted did not
2017 results in completely healthy objects
2018 count-corrupt-shares-pre-repair: how many shares were found to
2019 have corruption, summed over all
2020 objects examined (before any
2022 count-corrupt-shares-post-repair: how many shares were found to
2023 have corruption, summed over all
2024 objects examined (after any
2029 """Return a dictionary with the same keys as
2030 IDirectoryNode.deep_stats()."""
2032 def get_corrupt_shares():
2033 """Return a set of (serverid, storage_index, sharenum) for all shares
2034 that were found to be corrupt before any repair was attempted. Both
2035 serverid and storage_index are binary.
2037 def get_remaining_corrupt_shares():
2038 """Return a set of (serverid, storage_index, sharenum) for all shares
2039 that were found to be corrupt after any repair was completed. Both
2040 serverid and storage_index are binary. These are shares that need
2041 manual inspection and probably deletion.
2043 def get_all_results():
2044 """Return a dictionary mapping pathname (a tuple of strings, ready to
2045 be slash-joined) to an ICheckAndRepairResults instance, one for each
2046 object that was checked."""
2048 def get_results_for_storage_index(storage_index):
2049 """Retrive the ICheckAndRepairResults instance for the given (binary)
2050 storage index. Raises KeyError if there are no results for that
2054 class IRepairable(Interface):
2055 def repair(check_results):
2056 """Attempt to repair the given object. Returns a Deferred that fires
2057 with a IRepairResults object.
2059 I must be called with an object that implements ICheckResults, as
2060 proof that you have actually discovered a problem with this file. I
2061 will use the data in the checker results to guide the repair process,
2062 such as which servers provided bad data and should therefore be
2063 avoided. The ICheckResults object is inside the
2064 ICheckAndRepairResults object, which is returned by the
2065 ICheckable.check() method::
2067 d = filenode.check(repair=False)
2068 def _got_results(check_and_repair_results):
2069 check_results = check_and_repair_results.get_pre_repair_results()
2070 return filenode.repair(check_results)
2071 d.addCallback(_got_results)
2075 class IRepairResults(Interface):
2076 """I contain the results of a repair operation."""
2077 def get_successful(self):
2078 """Returns a boolean: True if the repair made the file healthy, False
2079 if not. Repair failure generally indicates a file that has been
2080 damaged beyond repair."""
2083 class IClient(Interface):
2084 def upload(uploadable):
2085 """Upload some data into a CHK, get back the UploadResults for it.
2086 @param uploadable: something that implements IUploadable
2087 @return: a Deferred that fires with the UploadResults instance.
2088 To get the URI for this file, use results.uri .
2091 def create_mutable_file(contents=""):
2092 """Create a new mutable file (with initial) contents, get back the
2095 @param contents: (bytestring, callable, or None): this provides the
2096 initial contents of the mutable file. If 'contents' is a bytestring,
2097 it will be used as-is. If 'contents' is a callable, it will be
2098 invoked with the new MutableFileNode instance and is expected to
2099 return a bytestring with the initial contents of the file (the
2100 callable can use node.get_writekey() to decide how to encrypt the
2101 initial contents, e.g. for a brand new dirnode with initial
2102 children). contents=None is equivalent to an empty string. Using
2103 content_maker= is more efficient than creating a mutable file and
2104 setting its contents in two separate operations.
2106 @return: a Deferred that fires with an IMutableFileNode instance.
2109 def create_dirnode(initial_children={}):
2110 """Create a new unattached dirnode, possibly with initial children.
2112 @param initial_children: dict with keys that are unicode child names,
2113 and values that are (childnode, metadata) tuples.
2115 @return: a Deferred that fires with the new IDirectoryNode instance.
2118 def create_node_from_uri(uri, rouri):
2119 """Create a new IFilesystemNode instance from the uri, synchronously.
2120 @param uri: a string or IURI-providing instance, or None. This could
2121 be for a LiteralFileNode, a CHK file node, a mutable file
2122 node, or a directory node
2123 @param rouri: a string or IURI-providing instance, or None. If the
2124 main uri is None, I will use the rouri instead. If I
2125 recognize the format of the main uri, I will ignore the
2126 rouri (because it can be derived from the writecap).
2128 @return: an instance that provides IFilesystemNode (or more usefully
2129 one of its subclasses). File-specifying URIs will result in
2130 IFileNode-providing instances, like ImmutableFileNode,
2131 LiteralFileNode, or MutableFileNode. Directory-specifying
2132 URIs will result in IDirectoryNode-providing instances, like
2136 class INodeMaker(Interface):
2137 """The NodeMaker is used to create IFilesystemNode instances. It can
2138 accept a filecap/dircap string and return the node right away. It can
2139 also create new nodes (i.e. upload a file, or create a mutable file)
2140 asynchronously. Once you have one of these nodes, you can use other
2141 methods to determine whether it is a file or directory, and to download
2142 or modify its contents.
2144 The NodeMaker encapsulates all the authorities that these
2145 IFilesystemNodes require (like references to the StorageFarmBroker). Each
2146 Tahoe process will typically have a single NodeMaker, but unit tests may
2147 create simplified/mocked forms for testing purposes.
2149 def create_from_cap(writecap, readcap=None, **kwargs):
2150 """I create an IFilesystemNode from the given writecap/readcap. I can
2151 only provide nodes for existing file/directory objects: use my other
2152 methods to create new objects. I return synchronously."""
2154 def create_mutable_file(contents=None, keysize=None):
2155 """I create a new mutable file, and return a Deferred which will fire
2156 with the IMutableFileNode instance when it is ready. If contents= is
2157 provided (a bytestring), it will be used as the initial contents of
2158 the new file, otherwise the file will contain zero bytes. keysize= is
2159 for use by unit tests, to create mutable files that are smaller than
2162 def create_new_mutable_directory(initial_children={}):
2163 """I create a new mutable directory, and return a Deferred which will
2164 fire with the IDirectoryNode instance when it is ready. If
2165 initial_children= is provided (a dict mapping unicode child name to
2166 (childnode, metadata_dict) tuples), the directory will be populated
2167 with those children, otherwise it will be empty."""
2169 class IClientStatus(Interface):
2170 def list_all_uploads():
2171 """Return a list of uploader objects, one for each upload which
2172 currently has an object available (tracked with weakrefs). This is
2173 intended for debugging purposes."""
2174 def list_active_uploads():
2175 """Return a list of active IUploadStatus objects."""
2176 def list_recent_uploads():
2177 """Return a list of IUploadStatus objects for the most recently
2180 def list_all_downloads():
2181 """Return a list of downloader objects, one for each download which
2182 currently has an object available (tracked with weakrefs). This is
2183 intended for debugging purposes."""
2184 def list_active_downloads():
2185 """Return a list of active IDownloadStatus objects."""
2186 def list_recent_downloads():
2187 """Return a list of IDownloadStatus objects for the most recently
2188 started downloads."""
2190 class IUploadStatus(Interface):
2192 """Return a timestamp (float with seconds since epoch) indicating
2193 when the operation was started."""
2194 def get_storage_index():
2195 """Return a string with the (binary) storage index in use on this
2196 upload. Returns None if the storage index has not yet been
2199 """Return an integer with the number of bytes that will eventually
2200 be uploaded for this file. Returns None if the size is not yet known.
2203 """Return True if this upload is using a Helper, False if not."""
2205 """Return a string describing the current state of the upload
2208 """Returns a tuple of floats, (chk, ciphertext, encode_and_push),
2209 each from 0.0 to 1.0 . 'chk' describes how much progress has been
2210 made towards hashing the file to determine a CHK encryption key: if
2211 non-convergent encryption is in use, this will be trivial, otherwise
2212 the whole file must be hashed. 'ciphertext' describes how much of the
2213 ciphertext has been pushed to the helper, and is '1.0' for non-helper
2214 uploads. 'encode_and_push' describes how much of the encode-and-push
2215 process has finished: for helper uploads this is dependent upon the
2216 helper providing progress reports. It might be reasonable to add all
2217 three numbers and report the sum to the user."""
2219 """Return True if the upload is currently active, False if not."""
2221 """Return an instance of UploadResults (which contains timing and
2222 sharemap information). Might return None if the upload is not yet
2225 """Each upload status gets a unique number: this method returns that
2226 number. This provides a handle to this particular upload, so a web
2227 page can generate a suitable hyperlink."""
2229 class IDownloadStatus(Interface):
2231 """Return a timestamp (float with seconds since epoch) indicating
2232 when the operation was started."""
2233 def get_storage_index():
2234 """Return a string with the (binary) storage index in use on this
2235 download. This may be None if there is no storage index (i.e. LIT
2238 """Return an integer with the number of bytes that will eventually be
2239 retrieved for this file. Returns None if the size is not yet known.
2242 """Return True if this download is using a Helper, False if not."""
2244 """Return a string describing the current state of the download
2247 """Returns a float (from 0.0 to 1.0) describing the amount of the
2248 download that has completed. This value will remain at 0.0 until the
2249 first byte of plaintext is pushed to the download target."""
2251 """Return True if the download is currently active, False if not."""
2253 """Each download status gets a unique number: this method returns
2254 that number. This provides a handle to this particular download, so a
2255 web page can generate a suitable hyperlink."""
2257 class IServermapUpdaterStatus(Interface):
2259 class IPublishStatus(Interface):
2261 class IRetrieveStatus(Interface):
2264 class NotCapableError(Exception):
2265 """You have tried to write to a read-only node."""
2267 class BadWriteEnablerError(Exception):
2270 class RIControlClient(RemoteInterface):
2272 def wait_for_client_connections(num_clients=int):
2273 """Do not return until we have connections to at least NUM_CLIENTS
2277 def upload_from_file_to_uri(filename=str,
2278 convergence=ChoiceOf(None,
2279 StringConstraint(2**20))):
2280 """Upload a file to the grid. This accepts a filename (which must be
2281 absolute) that points to a file on the node's local disk. The node will
2282 read the contents of this file, upload it to the grid, then return the
2283 URI at which it was uploaded. If convergence is None then a random
2284 encryption key will be used, else the plaintext will be hashed, then
2285 that hash will be mixed together with the "convergence" string to form
2290 def download_from_uri_to_file(uri=URI, filename=str):
2291 """Download a file from the grid, placing it on the node's local disk
2292 at the given filename (which must be absolute[?]). Returns the
2293 absolute filename where the file was written."""
2298 def get_memory_usage():
2299 """Return a dict describes the amount of memory currently in use. The
2300 keys are 'VmPeak', 'VmSize', and 'VmData'. The values are integers,
2301 measuring memory consupmtion in bytes."""
2302 return DictOf(str, int)
2304 def speed_test(count=int, size=int, mutable=Any()):
2305 """Write 'count' tempfiles to disk, all of the given size. Measure
2306 how long (in seconds) it takes to upload them all to the servers.
2307 Then measure how long it takes to download all of them. If 'mutable'
2308 is 'create', time creation of mutable files. If 'mutable' is
2309 'upload', then time access to the same mutable file instead of
2312 Returns a tuple of (upload_time, download_time).
2314 return (float, float)
2316 def measure_peer_response_time():
2317 """Send a short message to each connected peer, and measure the time
2318 it takes for them to respond to it. This is a rough measure of the
2319 application-level round trip time.
2321 @return: a dictionary mapping peerid to a float (RTT time in seconds)
2324 return DictOf(Nodeid, float)
2326 UploadResults = Any() #DictOf(str, str)
2328 class RIEncryptedUploadable(RemoteInterface):
2329 __remote_name__ = "RIEncryptedUploadable.tahoe.allmydata.com"
2334 def get_all_encoding_parameters():
2335 return (int, int, int, long)
2337 def read_encrypted(offset=Offset, length=ReadSize):
2344 class RICHKUploadHelper(RemoteInterface):
2345 __remote_name__ = "RIUploadHelper.tahoe.allmydata.com"
2349 Return a dictionary of version information.
2351 return DictOf(str, Any())
2353 def upload(reader=RIEncryptedUploadable):
2354 return UploadResults
2357 class RIHelper(RemoteInterface):
2358 __remote_name__ = "RIHelper.tahoe.allmydata.com"
2362 Return a dictionary of version information.
2364 return DictOf(str, Any())
2366 def upload_chk(si=StorageIndex):
2367 """See if a file with a given storage index needs uploading. The
2368 helper will ask the appropriate storage servers to see if the file
2369 has already been uploaded. If so, the helper will return a set of
2370 'upload results' that includes whatever hashes are needed to build
2371 the read-cap, and perhaps a truncated sharemap.
2373 If the file has not yet been uploaded (or if it was only partially
2374 uploaded), the helper will return an empty upload-results dictionary
2375 and also an RICHKUploadHelper object that will take care of the
2376 upload process. The client should call upload() on this object and
2377 pass it a reference to an RIEncryptedUploadable object that will
2378 provide ciphertext. When the upload is finished, the upload() method
2379 will finish and return the upload results.
2381 return (UploadResults, ChoiceOf(RICHKUploadHelper, None))
2384 class RIStatsProvider(RemoteInterface):
2385 __remote_name__ = "RIStatsProvider.tahoe.allmydata.com"
2387 Provides access to statistics and monitoring information.
2392 returns a dictionary containing 'counters' and 'stats', each a
2393 dictionary with string counter/stat name keys, and numeric values.
2394 counters are monotonically increasing measures of work done, and
2395 stats are instantaneous measures (potentially time averaged
2398 return DictOf(str, DictOf(str, ChoiceOf(float, int, long)))
2400 class RIStatsGatherer(RemoteInterface):
2401 __remote_name__ = "RIStatsGatherer.tahoe.allmydata.com"
2403 Provides a monitoring service for centralised collection of stats
2406 def provide(provider=RIStatsProvider, nickname=str):
2408 @param provider: a stats collector instance which should be polled
2409 periodically by the gatherer to collect stats.
2410 @param nickname: a name useful to identify the provided client
2415 class IStatsProducer(Interface):
2418 returns a dictionary, with str keys representing the names of stats
2419 to be monitored, and numeric values.
2422 class RIKeyGenerator(RemoteInterface):
2423 __remote_name__ = "RIKeyGenerator.tahoe.allmydata.com"
2425 Provides a service offering to make RSA key pairs.
2428 def get_rsa_key_pair(key_size=int):
2430 @param key_size: the size of the signature key.
2431 @return: tuple(verifying_key, signing_key)
2433 return TupleOf(str, str)
2436 class FileTooLargeError(Exception):
2439 class IValidatedThingProxy(Interface):
2441 """ Acquire a thing and validate it. Return a deferred which is
2442 eventually fired with self if the thing is valid or errbacked if it
2443 can't be acquired or validated."""
2445 class InsufficientVersionError(Exception):
2446 def __init__(self, needed, got):
2447 self.needed = needed
2450 return "InsufficientVersionError(need '%s', got %s)" % (self.needed,
2453 class EmptyPathnameComponentError(Exception):
2454 """The webapi disallows empty pathname components."""