2 from zope.interface import Interface
3 from foolscap.schema import StringConstraint, ListOf, TupleOf, SetOf, DictOf, \
4 ChoiceOf, IntegerConstraint, Any
5 from foolscap import RemoteInterface, Referenceable
9 Hash = StringConstraint(maxLength=HASH_SIZE,
10 minLength=HASH_SIZE)# binary format 32-byte SHA256 hash
11 Nodeid = StringConstraint(maxLength=20,
12 minLength=20) # binary format 20-byte SHA1 hash
13 FURL = StringConstraint(1000)
14 StorageIndex = StringConstraint(16)
15 URI = StringConstraint(300) # kind of arbitrary
17 MAX_BUCKETS = 256 # per peer -- zfec offers at most 256 shares per file
19 ShareData = StringConstraint(None)
20 URIExtensionData = StringConstraint(1000)
21 Number = IntegerConstraint(8) # 2**(8*8) == 16EiB ~= 18e18 ~= 18 exabytes
23 ReadSize = int # the 'int' constraint is 2**31 == 2Gib -- large files are processed in not-so-large increments
24 WriteEnablerSecret = Hash # used to protect mutable bucket modifications
25 LeaseRenewSecret = Hash # used to protect bucket lease renewal requests
26 LeaseCancelSecret = Hash # used to protect bucket lease cancellation requests
28 class RIStubClient(RemoteInterface):
29 """Each client publishes a service announcement for a dummy object called
30 the StubClient. This object doesn't actually offer any services, but the
31 announcement helps the Introducer keep track of which clients are
32 subscribed (so the grid admin can keep track of things like the size of
33 the grid and the client versions in use. This is the (empty)
34 RemoteInterface for the StubClient."""
36 class RIBucketWriter(RemoteInterface):
37 """ Objects of this kind live on the server side. """
38 def write(offset=Offset, data=ShareData):
43 If the data that has been written is incomplete or inconsistent then
44 the server will throw the data away, else it will store it for future
50 """Abandon all the data that has been written.
54 class RIBucketReader(RemoteInterface):
55 def read(offset=Offset, length=ReadSize):
58 def advise_corrupt_share(reason=str):
59 """Clients who discover hash failures in shares that they have
60 downloaded from me will use this method to inform me about the
61 failures. I will record their concern so that my operator can
62 manually inspect the shares in question. I return None.
64 This is a wrapper around RIStorageServer.advise_corrupt_share(),
65 which is tied to a specific share, and therefore does not need the
66 extra share-identifying arguments. Please see that method for full
70 TestVector = ListOf(TupleOf(Offset, ReadSize, str, str))
71 # elements are (offset, length, operator, specimen)
72 # operator is one of "lt, le, eq, ne, ge, gt"
73 # nop always passes and is used to fetch data while writing.
74 # you should use length==len(specimen) for everything except nop
75 DataVector = ListOf(TupleOf(Offset, ShareData))
76 # (offset, data). This limits us to 30 writes of 1MiB each per call
77 TestAndWriteVectorsForShares = DictOf(int,
80 ChoiceOf(None, Offset), # new_length
82 ReadVector = ListOf(TupleOf(Offset, ReadSize))
83 ReadData = ListOf(ShareData)
84 # returns data[offset:offset+length] for each element of TestVector
86 class RIStorageServer(RemoteInterface):
87 __remote_name__ = "RIStorageServer.tahoe.allmydata.com"
91 Return a dictionary of version information.
93 return DictOf(str, Any())
95 def allocate_buckets(storage_index=StorageIndex,
96 renew_secret=LeaseRenewSecret,
97 cancel_secret=LeaseCancelSecret,
98 sharenums=SetOf(int, maxLength=MAX_BUCKETS),
99 allocated_size=Offset, canary=Referenceable):
101 @param storage_index: the index of the bucket to be created or
103 @param sharenums: these are the share numbers (probably between 0 and
104 99) that the sender is proposing to store on this
106 @param renew_secret: This is the secret used to protect bucket refresh
107 This secret is generated by the client and
108 stored for later comparison by the server. Each
109 server is given a different secret.
110 @param cancel_secret: Like renew_secret, but protects bucket decref.
111 @param canary: If the canary is lost before close(), the bucket is
113 @return: tuple of (alreadygot, allocated), where alreadygot is what we
114 already have and allocated is what we hereby agree to accept.
115 New leases are added for shares in both lists.
117 return TupleOf(SetOf(int, maxLength=MAX_BUCKETS),
118 DictOf(int, RIBucketWriter, maxKeys=MAX_BUCKETS))
120 def add_lease(storage_index=StorageIndex,
121 renew_secret=LeaseRenewSecret,
122 cancel_secret=LeaseCancelSecret):
124 Add a new lease on the given bucket. If the renew_secret matches an
125 existing lease, that lease will be renewed instead. If there is no
126 bucket for the given storage_index, return silently. (note that in
127 tahoe-1.3.0 and earlier, IndexError was raised if there was no
130 return Any() # returns None now, but future versions might change
132 def renew_lease(storage_index=StorageIndex, renew_secret=LeaseRenewSecret):
134 Renew the lease on a given bucket, resetting the timer to 31 days.
135 Some networks will use this, some will not. If there is no bucket for
136 the given storage_index, IndexError will be raised.
138 For mutable shares, if the given renew_secret does not match an
139 existing lease, IndexError will be raised with a note listing the
140 server-nodeids on the existing leases, so leases on migrated shares
141 can be renewed or cancelled. For immutable shares, IndexError
142 (without the note) will be raised.
146 def cancel_lease(storage_index=StorageIndex,
147 cancel_secret=LeaseCancelSecret):
149 Cancel the lease on a given bucket. If this was the last lease on the
150 bucket, the bucket will be deleted. If there is no bucket for the
151 given storage_index, IndexError will be raised.
153 For mutable shares, if the given cancel_secret does not match an
154 existing lease, IndexError will be raised with a note listing the
155 server-nodeids on the existing leases, so leases on migrated shares
156 can be renewed or cancelled. For immutable shares, IndexError
157 (without the note) will be raised.
161 def get_buckets(storage_index=StorageIndex):
162 return DictOf(int, RIBucketReader, maxKeys=MAX_BUCKETS)
166 def slot_readv(storage_index=StorageIndex,
167 shares=ListOf(int), readv=ReadVector):
168 """Read a vector from the numbered shares associated with the given
169 storage index. An empty shares list means to return data from all
170 known shares. Returns a dictionary with one key per share."""
171 return DictOf(int, ReadData) # shnum -> results
173 def slot_testv_and_readv_and_writev(storage_index=StorageIndex,
174 secrets=TupleOf(WriteEnablerSecret,
177 tw_vectors=TestAndWriteVectorsForShares,
180 """General-purpose test-and-set operation for mutable slots. Perform
181 a bunch of comparisons against the existing shares. If they all pass,
182 then apply a bunch of write vectors to those shares. Then use the
183 read vectors to extract data from all the shares and return the data.
185 This method is, um, large. The goal is to allow clients to update all
186 the shares associated with a mutable file in a single round trip.
188 @param storage_index: the index of the bucket to be created or
190 @param write_enabler: a secret that is stored along with the slot.
191 Writes are accepted from any caller who can
192 present the matching secret. A different secret
193 should be used for each slot*server pair.
194 @param renew_secret: This is the secret used to protect bucket refresh
195 This secret is generated by the client and
196 stored for later comparison by the server. Each
197 server is given a different secret.
198 @param cancel_secret: Like renew_secret, but protects bucket decref.
200 The 'secrets' argument is a tuple of (write_enabler, renew_secret,
201 cancel_secret). The first is required to perform any write. The
202 latter two are used when allocating new shares. To simply acquire a
203 new lease on existing shares, use an empty testv and an empty writev.
205 Each share can have a separate test vector (i.e. a list of
206 comparisons to perform). If all vectors for all shares pass, then all
207 writes for all shares are recorded. Each comparison is a 4-tuple of
208 (offset, length, operator, specimen), which effectively does a bool(
209 (read(offset, length)) OPERATOR specimen ) and only performs the
210 write if all these evaluate to True. Basic test-and-set uses 'eq'.
211 Write-if-newer uses a seqnum and (offset, length, 'lt', specimen).
212 Write-if-same-or-newer uses 'le'.
214 Reads from the end of the container are truncated, and missing shares
215 behave like empty ones, so to assert that a share doesn't exist (for
216 use when creating a new share), use (0, 1, 'eq', '').
218 The write vector will be applied to the given share, expanding it if
219 necessary. A write vector applied to a share number that did not
220 exist previously will cause that share to be created.
222 Each write vector is accompanied by a 'new_length' argument. If
223 new_length is not None, use it to set the size of the container. This
224 can be used to pre-allocate space for a series of upcoming writes, or
225 truncate existing data. If the container is growing, new_length will
226 be applied before datav. If the container is shrinking, it will be
227 applied afterwards. If new_length==0, the share will be deleted.
229 The read vector is used to extract data from all known shares,
230 *before* any writes have been applied. The same vector is used for
231 all shares. This captures the state that was tested by the test
234 This method returns two values: a boolean and a dict. The boolean is
235 True if the write vectors were applied, False if not. The dict is
236 keyed by share number, and each value contains a list of strings, one
237 for each element of the read vector.
239 If the write_enabler is wrong, this will raise BadWriteEnablerError.
240 To enable share migration (using update_write_enabler), the exception
241 will have the nodeid used for the old write enabler embedded in it,
242 in the following string::
244 The write enabler was recorded by nodeid '%s'.
246 Note that the nodeid here is encoded using the same base32 encoding
247 used by Foolscap and allmydata.util.idlib.nodeid_b2a().
250 return TupleOf(bool, DictOf(int, ReadData))
252 def advise_corrupt_share(share_type=str, storage_index=StorageIndex,
253 shnum=int, reason=str):
254 """Clients who discover hash failures in shares that they have
255 downloaded from me will use this method to inform me about the
256 failures. I will record their concern so that my operator can
257 manually inspect the shares in question. I return None.
259 'share_type' is either 'mutable' or 'immutable'. 'storage_index' is a
260 (binary) storage index string, and 'shnum' is the integer share
261 number. 'reason' is a human-readable explanation of the problem,
262 probably including some expected hash values and the computed ones
263 which did not match. Corruption advisories for mutable shares should
264 include a hash of the public key (the same value that appears in the
265 mutable-file verify-cap), since the current share format does not
269 class IStorageBucketWriter(Interface):
271 Objects of this kind live on the client side.
273 def put_block(segmentnum=int, data=ShareData):
274 """@param data: For most segments, this data will be 'blocksize'
275 bytes in length. The last segment might be shorter.
276 @return: a Deferred that fires (with None) when the operation completes
279 def put_plaintext_hashes(hashes=ListOf(Hash)):
281 @return: a Deferred that fires (with None) when the operation completes
284 def put_crypttext_hashes(hashes=ListOf(Hash)):
286 @return: a Deferred that fires (with None) when the operation completes
289 def put_block_hashes(blockhashes=ListOf(Hash)):
291 @return: a Deferred that fires (with None) when the operation completes
294 def put_share_hashes(sharehashes=ListOf(TupleOf(int, Hash))):
296 @return: a Deferred that fires (with None) when the operation completes
299 def put_uri_extension(data=URIExtensionData):
300 """This block of data contains integrity-checking information (hashes
301 of plaintext, crypttext, and shares), as well as encoding parameters
302 that are necessary to recover the data. This is a serialized dict
303 mapping strings to other strings. The hash of this data is kept in
304 the URI and verified before any of the data is used. All buckets for
305 a given file contain identical copies of this data.
307 The serialization format is specified with the following pseudocode:
308 for k in sorted(dict.keys()):
309 assert re.match(r'^[a-zA-Z_\-]+$', k)
310 write(k + ':' + netstring(dict[k]))
312 @return: a Deferred that fires (with None) when the operation completes
316 """Finish writing and close the bucket. The share is not finalized
317 until this method is called: if the uploading client disconnects
318 before calling close(), the partially-written share will be
321 @return: a Deferred that fires (with None) when the operation completes
324 class IStorageBucketReader(Interface):
326 def get_block_data(blocknum=int, blocksize=int, size=int):
327 """Most blocks will be the same size. The last block might be shorter
333 def get_crypttext_hashes():
335 @return: ListOf(Hash)
338 def get_block_hashes(at_least_these=SetOf(int)):
340 @return: ListOf(Hash)
343 def get_share_hashes(at_least_these=SetOf(int)):
345 @return: ListOf(TupleOf(int, Hash))
348 def get_uri_extension():
350 @return: URIExtensionData
355 # hm, we need a solution for forward references in schemas
356 FileNode_ = Any() # TODO: foolscap needs constraints on copyables
357 DirectoryNode_ = Any() # TODO: same
358 AnyNode_ = ChoiceOf(FileNode_, DirectoryNode_)
361 class IURI(Interface):
362 def init_from_string(uri):
363 """Accept a string (as created by my to_string() method) and populate
364 this instance with its data. I am not normally called directly,
365 please use the module-level uri.from_string() function to convert
366 arbitrary URI strings into IURI-providing instances."""
369 """Return False if this URI be used to modify the data. Return True
370 if this URI cannot be used to modify the data."""
373 """Return True if the data can be modified by *somebody* (perhaps
374 someone who has a more powerful URI than this one)."""
377 """Return another IURI instance, which represents a read-only form of
378 this one. If is_readonly() is True, this returns self."""
380 def get_verify_cap():
381 """Return an instance that provides IVerifierURI, which can be used
382 to check on the availability of the file or directory, without
383 providing enough capabilities to actually read or modify the
384 contents. This may return None if the file does not need checking or
385 verification (e.g. LIT URIs).
389 """Return a string of printable ASCII characters, suitable for
390 passing into init_from_string."""
392 class IVerifierURI(Interface, IURI):
393 def init_from_string(uri):
394 """Accept a string (as created by my to_string() method) and populate
395 this instance with its data. I am not normally called directly,
396 please use the module-level uri.from_string() function to convert
397 arbitrary URI strings into IURI-providing instances."""
400 """Return a string of printable ASCII characters, suitable for
401 passing into init_from_string."""
403 class IDirnodeURI(Interface):
404 """I am a URI which represents a dirnode."""
407 class IFileURI(Interface):
408 """I am a URI which represents a filenode."""
410 """Return the length (in bytes) of the file that I represent."""
412 class IImmutableFileURI(IFileURI):
415 class IMutableFileURI(Interface):
416 """I am a URI which represents a mutable filenode."""
417 class INewDirectoryURI(Interface):
419 class IReadonlyNewDirectoryURI(Interface):
423 class IFilesystemNode(Interface):
426 Return the URI that can be used by others to get access to this
427 node. If this node is read-only, the URI will only offer read-only
428 access. If this node is read-write, the URI will offer read-write
431 If you have read-write access to a node and wish to share merely
432 read-only access with others, use get_readonly_uri().
435 def get_readonly_uri():
436 """Return the directory URI that can be used by others to get
437 read-only access to this directory node. The result is a read-only
438 URI, regardless of whether this dirnode is read-only or read-write.
440 If you have merely read-only access to this dirnode,
441 get_readonly_uri() will return the same thing as get_uri().
444 def get_repair_cap():
445 """Return an IURI instance that can be used to repair the file, or
446 None if this node cannot be repaired (either because it is not
447 distributed, like a LIT file, or because the node does not represent
448 sufficient authority to create a repair-cap, like a read-only RSA
449 mutable file node [which cannot create the correct write-enablers]).
452 def get_verify_cap():
453 """Return an IVerifierURI instance that represents the
454 'verifiy/refresh capability' for this node. The holder of this
455 capability will be able to renew the lease for this node, protecting
456 it from garbage-collection. They will also be able to ask a server if
457 it holds a share for the file or directory.
460 def get_storage_index():
461 """Return a string with the (binary) storage index in use on this
462 download. This may be None if there is no storage index (i.e. LIT
466 """Return True if this reference provides mutable access to the given
467 file or directory (i.e. if you can modify it), or False if not. Note
468 that even if this reference is read-only, someone else may hold a
469 read-write reference to it."""
472 """Return True if this file or directory is mutable (by *somebody*,
473 not necessarily you), False if it is is immutable. Note that a file
474 might be mutable overall, but your reference to it might be
475 read-only. On the other hand, all references to an immutable file
476 will be read-only; there are no read-write references to an immutable
480 class IMutableFilesystemNode(IFilesystemNode):
483 class IFileNode(IFilesystemNode):
484 def download(target):
485 """Download the file's contents to a given IDownloadTarget"""
487 def download_to_data():
488 """Download the file's contents. Return a Deferred that fires
489 with those contents."""
492 """Return the length (in bytes) of the data this node represents."""
494 def read(consumer, offset=0, size=None):
495 """Download a portion (possibly all) of the file's contents, making
496 them available to the given IConsumer. Return a Deferred that fires
497 (with the consumer) when the consumer is unregistered (either because
498 the last byte has been given to it, or because the consumer threw an
499 exception during write(), possibly because it no longer wants to
500 receive data). The portion downloaded will start at 'offset' and
501 contain 'size' bytes (or the remainder of the file if size==None).
503 The consumer will be used in non-streaming mode: an IPullProducer
504 will be attached to it.
506 The consumer will not receive data right away: several network trips
507 must occur first. The order of events will be::
509 consumer.registerProducer(p, streaming)
510 (if streaming == False)::
511 consumer does p.resumeProducing()
513 consumer does p.resumeProducing()
514 consumer.write(data).. (repeat until all data is written)
515 consumer.unregisterProducer()
516 deferred.callback(consumer)
518 If a download error occurs, or an exception is raised by
519 consumer.registerProducer() or consumer.write(), I will call
520 consumer.unregisterProducer() and then deliver the exception via
521 deferred.errback(). To cancel the download, the consumer should call
522 p.stopProducing(), which will result in an exception being delivered
523 via deferred.errback().
525 A simple download-to-memory consumer example would look like this::
527 class MemoryConsumer:
528 implements(IConsumer)
532 def registerProducer(self, p, streaming):
533 assert streaming == False
536 def write(self, data):
537 self.chunks.append(data)
538 def unregisterProducer(self):
540 d = filenode.read(MemoryConsumer())
541 d.addCallback(lambda mc: "".join(mc.chunks))
546 class IMutableFileNode(IFileNode, IMutableFilesystemNode):
547 """I provide access to a 'mutable file', which retains its identity
548 regardless of what contents are put in it.
550 The consistency-vs-availability problem means that there might be
551 multiple versions of a file present in the grid, some of which might be
552 unrecoverable (i.e. have fewer than 'k' shares). These versions are
553 loosely ordered: each has a sequence number and a hash, and any version
554 with seqnum=N was uploaded by a node which has seen at least one version
557 The 'servermap' (an instance of IMutableFileServerMap) is used to
558 describe the versions that are known to be present in the grid, and which
559 servers are hosting their shares. It is used to represent the 'state of
560 the world', and is used for this purpose by my test-and-set operations.
561 Downloading the contents of the mutable file will also return a
562 servermap. Uploading a new version into the mutable file requires a
563 servermap as input, and the semantics of the replace operation is
564 'replace the file with my new version if it looks like nobody else has
565 changed the file since my previous download'. Because the file is
566 distributed, this is not a perfect test-and-set operation, but it will do
567 its best. If the replace process sees evidence of a simultaneous write,
568 it will signal an UncoordinatedWriteError, so that the caller can take
572 Most readers will want to use the 'best' current version of the file, and
573 should use my 'download_best_version()' method.
575 To unconditionally replace the file, callers should use overwrite(). This
576 is the mode that user-visible mutable files will probably use.
578 To apply some delta to the file, call modify() with a callable modifier
579 function that can apply the modification that you want to make. This is
580 the mode that dirnodes will use, since most directory modification
581 operations can be expressed in terms of deltas to the directory state.
584 Three methods are available for users who need to perform more complex
585 operations. The first is get_servermap(), which returns an up-to-date
586 servermap using a specified mode. The second is download_version(), which
587 downloads a specific version (not necessarily the 'best' one). The third
588 is 'upload', which accepts new contents and a servermap (which must have
589 been updated with MODE_WRITE). The upload method will attempt to apply
590 the new contents as long as no other node has modified the file since the
591 servermap was updated. This might be useful to a caller who wants to
592 merge multiple versions into a single new one.
594 Note that each time the servermap is updated, a specific 'mode' is used,
595 which determines how many peers are queried. To use a servermap for my
596 replace() method, that servermap must have been updated in MODE_WRITE.
597 These modes are defined in allmydata.mutable.common, and consist of
598 MODE_READ, MODE_WRITE, MODE_ANYTHING, and MODE_CHECK. Please look in
599 allmydata/mutable/servermap.py for details about the differences.
601 Mutable files are currently limited in size (about 3.5MB max) and can
602 only be retrieved and updated all-at-once, as a single big string. Future
603 versions of our mutable files will remove this restriction.
606 def download_best_version():
607 """Download the 'best' available version of the file, meaning one of
608 the recoverable versions with the highest sequence number. If no
609 uncoordinated writes have occurred, and if enough shares are
610 available, then this will be the most recent version that has been
613 I update an internal servermap with MODE_READ, determine which
614 version of the file is indicated by
615 servermap.best_recoverable_version(), and return a Deferred that
616 fires with its contents. If no version is recoverable, the Deferred
617 will errback with UnrecoverableFileError.
620 def get_size_of_best_version():
621 """Find the size of the version that would be downloaded with
622 download_best_version(), without actually downloading the whole file.
624 I return a Deferred that fires with an integer.
627 def overwrite(new_contents):
628 """Unconditionally replace the contents of the mutable file with new
629 ones. This simply chains get_servermap(MODE_WRITE) and upload(). This
630 is only appropriate to use when the new contents of the file are
631 completely unrelated to the old ones, and you do not care about other
634 I return a Deferred that fires (with a PublishStatus object) when the
635 update has completed.
638 def modify(modifier_cb):
639 """Modify the contents of the file, by downloading the current
640 version, applying the modifier function (or bound method), then
641 uploading the new version. I return a Deferred that fires (with a
642 PublishStatus object) when the update is complete.
644 The modifier callable will be given three arguments: a string (with
645 the old contents), a 'first_time' boolean, and a servermap. As with
646 download_best_version(), the old contents will be from the best
647 recoverable version, but the modifier can use the servermap to make
648 other decisions (such as refusing to apply the delta if there are
649 multiple parallel versions, or if there is evidence of a newer
650 unrecoverable version). 'first_time' will be True the first time the
651 modifier is called, and False on any subsequent calls.
653 The callable should return a string with the new contents. The
654 callable must be prepared to be called multiple times, and must
655 examine the input string to see if the change that it wants to make
656 is already present in the old version. If it does not need to make
657 any changes, it can either return None, or return its input string.
659 If the modifier raises an exception, it will be returned in the
664 def get_servermap(mode):
665 """Return a Deferred that fires with an IMutableFileServerMap
666 instance, updated using the given mode.
669 def download_version(servermap, version):
670 """Download a specific version of the file, using the servermap
671 as a guide to where the shares are located.
673 I return a Deferred that fires with the requested contents, or
674 errbacks with UnrecoverableFileError. Note that a servermap which was
675 updated with MODE_ANYTHING or MODE_READ may not know about shares for
676 all versions (those modes stop querying servers as soon as they can
677 fulfil their goals), so you may want to use MODE_CHECK (which checks
678 everything) to get increased visibility.
681 def upload(new_contents, servermap):
682 """Replace the contents of the file with new ones. This requires a
683 servermap that was previously updated with MODE_WRITE.
685 I attempt to provide test-and-set semantics, in that I will avoid
686 modifying any share that is different than the version I saw in the
687 servermap. However, if another node is writing to the file at the
688 same time as me, I may manage to update some shares while they update
689 others. If I see any evidence of this, I will signal
690 UncoordinatedWriteError, and the file will be left in an inconsistent
691 state (possibly the version you provided, possibly the old version,
692 possibly somebody else's version, and possibly a mix of shares from
695 The recommended response to UncoordinatedWriteError is to either
696 return it to the caller (since they failed to coordinate their
697 writes), or to attempt some sort of recovery. It may be sufficient to
698 wait a random interval (with exponential backoff) and repeat your
699 operation. If I do not signal UncoordinatedWriteError, then I was
700 able to write the new version without incident.
702 I return a Deferred that fires (with a PublishStatus object) when the
703 publish has completed. I will update the servermap in-place with the
704 location of all new shares.
708 """Return this filenode's writekey, or None if the node does not have
709 write-capability. This may be used to assist with data structures
710 that need to make certain data available only to writers, such as the
711 read-write child caps in dirnodes. The recommended process is to have
712 reader-visible data be submitted to the filenode in the clear (where
713 it will be encrypted by the filenode using the readkey), but encrypt
714 writer-visible data using this writekey.
717 class NotEnoughSharesError(Exception):
718 def __init__(self, msg, got, needed):
719 Exception.__init__(self, msg)
722 self.servermap = None
724 class UnableToFetchCriticalDownloadDataError(Exception):
725 """I was unable to fetch some piece of critical data which is supposed to
726 be identically present in all shares."""
728 class NoServersError(Exception):
729 """Upload wasn't given any servers to work with, usually indicating a
730 network or Introducer problem."""
732 class ExistingChildError(Exception):
733 """A directory node was asked to add or replace a child that already
734 exists, and overwrite= was set to False."""
736 class NoSuchChildError(Exception):
737 """A directory node was asked to fetch a child which does not exist."""
739 class IDirectoryNode(IMutableFilesystemNode):
740 """I represent a name-to-child mapping, holding the tahoe equivalent of a
741 directory. All child names are unicode strings, and all children are some
742 sort of IFilesystemNode (either files or subdirectories).
747 The dirnode ('1') URI returned by this method can be used in
748 set_uri() on a different directory ('2') to 'mount' a reference to
749 this directory ('1') under the other ('2'). This URI is just a
750 string, so it can be passed around through email or other out-of-band
754 def get_readonly_uri():
756 The dirnode ('1') URI returned by this method can be used in
757 set_uri() on a different directory ('2') to 'mount' a reference to
758 this directory ('1') under the other ('2'). This URI is just a
759 string, so it can be passed around through email or other out-of-band
764 """I return a Deferred that fires with a dictionary mapping child
765 name (a unicode string) to (node, metadata_dict) tuples, in which
766 'node' is either an IFileNode or IDirectoryNode, and 'metadata_dict'
767 is a dictionary of metadata."""
770 """I return a Deferred that fires with a boolean, True if there
771 exists a child of the given name, False if not. The child name must
772 be a unicode string."""
775 """I return a Deferred that fires with a specific named child node,
776 either an IFileNode or an IDirectoryNode. The child name must be a
777 unicode string. I raise NoSuchChildError if I do not have a child by
780 def get_metadata_for(name):
781 """I return a Deferred that fires with the metadata dictionary for a
782 specific named child node. This metadata is stored in the *edge*, not
783 in the child, so it is attached to the parent dirnode rather than the
784 child dir-or-file-node. The child name must be a unicode string. I
785 raise NoSuchChildError if I do not have a child by that name."""
787 def set_metadata_for(name, metadata):
788 """I replace any existing metadata for the named child with the new
789 metadata. The child name must be a unicode string. This metadata is
790 stored in the *edge*, not in the child, so it is attached to the
791 parent dirnode rather than the child dir-or-file-node. I return a
792 Deferred (that fires with this dirnode) when the operation is
793 complete. I raise NoSuchChildError if I do not have a child by that
796 def get_child_at_path(path):
797 """Transform a child path into an IDirectoryNode or IFileNode.
799 I perform a recursive series of 'get' operations to find the named
800 descendant node. I return a Deferred that fires with the node, or
801 errbacks with NoSuchChildError if the node could not be found.
803 The path can be either a single string (slash-separated) or a list of
804 path-name elements. All elements must be unicode strings.
807 def get_child_and_metadata_at_path(path):
808 """Transform a child path into an IDirectoryNode/IFileNode and
811 I am like get_child_at_path(), but my Deferred fires with a tuple of
812 (node, metadata). The metadata comes from the last edge. If the path
813 is empty, the metadata will be an empty dictionary.
816 def set_uri(name, child_uri, metadata=None, overwrite=True):
817 """I add a child (by URI) at the specific name. I return a Deferred
818 that fires when the operation finishes. If overwrite= is True, I will
819 replace any existing child of the same name, otherwise an existing
820 child will cause me to return ExistingChildError. The child name must
823 The child_uri could be for a file, or for a directory (either
824 read-write or read-only, using a URI that came from get_uri() ).
826 If metadata= is provided, I will use it as the metadata for the named
827 edge. This will replace any existing metadata. If metadata= is left
828 as the default value of None, I will set ['mtime'] to the current
829 time, and I will set ['ctime'] to the current time if there was not
830 already a child by this name present. This roughly matches the
831 ctime/mtime semantics of traditional filesystems.
833 If this directory node is read-only, the Deferred will errback with a
836 def set_children(entries, overwrite=True):
837 """Add multiple (name, child_uri) pairs (or (name, child_uri,
838 metadata) triples) to a directory node. Returns a Deferred that fires
839 (with None) when the operation finishes. This is equivalent to
840 calling set_uri() multiple times, but is much more efficient. All
841 child names must be unicode strings.
844 def set_node(name, child, metadata=None, overwrite=True):
845 """I add a child at the specific name. I return a Deferred that fires
846 when the operation finishes. This Deferred will fire with the child
847 node that was just added. I will replace any existing child of the
848 same name. The child name must be a unicode string. The 'child'
849 instance must be an instance providing IDirectoryNode or IFileNode.
851 If metadata= is provided, I will use it as the metadata for the named
852 edge. This will replace any existing metadata. If metadata= is left
853 as the default value of None, I will set ['mtime'] to the current
854 time, and I will set ['ctime'] to the current time if there was not
855 already a child by this name present. This roughly matches the
856 ctime/mtime semantics of traditional filesystems.
858 If this directory node is read-only, the Deferred will errback with a
861 def set_nodes(entries, overwrite=True):
862 """Add multiple (name, child_node) pairs (or (name, child_node,
863 metadata) triples) to a directory node. Returns a Deferred that fires
864 (with None) when the operation finishes. This is equivalent to
865 calling set_node() multiple times, but is much more efficient. All
866 child names must be unicode strings."""
869 def add_file(name, uploadable, metadata=None, overwrite=True):
870 """I upload a file (using the given IUploadable), then attach the
871 resulting FileNode to the directory at the given name. I set metadata
872 the same way as set_uri and set_node. The child name must be a
875 I return a Deferred that fires (with the IFileNode of the uploaded
876 file) when the operation completes."""
879 """I remove the child at the specific name. I return a Deferred that
880 fires when the operation finishes. The child name must be a unicode
881 string. I raise NoSuchChildError if I do not have a child by that
884 def create_empty_directory(name, overwrite=True):
885 """I create and attach an empty directory at the given name. The
886 child name must be a unicode string. I return a Deferred that fires
887 when the operation finishes."""
889 def move_child_to(current_child_name, new_parent, new_child_name=None,
891 """I take one of my children and move them to a new parent. The child
892 is referenced by name. On the new parent, the child will live under
893 'new_child_name', which defaults to 'current_child_name'. TODO: what
894 should we do about metadata? I return a Deferred that fires when the
895 operation finishes. The child name must be a unicode string. I raise
896 NoSuchChildError if I do not have a child by that name."""
898 def build_manifest():
899 """I generate a table of everything reachable from this directory.
900 I also compute deep-stats as described below.
902 I return a Monitor. The Monitor's results will be a dictionary with
905 res['manifest']: a list of (path, cap) tuples for all nodes
906 (directories and files) reachable from this one.
907 'path' will be a tuple of unicode strings. The
908 origin dirnode will be represented by an empty path
910 res['verifycaps']: a list of (printable) verifycap strings, one for
911 each reachable non-LIT node. This is a set:
912 it will contain no duplicates.
913 res['storage-index']: a list of (base32) storage index strings,
914 one for each reachable non-LIT node. This is
915 a set: it will contain no duplicates.
916 res['stats']: a dictionary, the same that is generated by
917 start_deep_stats() below.
919 The Monitor will also have an .origin_si attribute with the (binary)
920 storage index of the starting point.
923 def start_deep_stats():
924 """Return a Monitor, examining all nodes (directories and files)
925 reachable from this one. The Monitor's results will be a dictionary
926 with the following keys::
928 count-immutable-files: count of how many CHK files are in the set
929 count-mutable-files: same, for mutable files (does not include
931 count-literal-files: same, for LIT files
932 count-files: sum of the above three
934 count-directories: count of directories
936 size-immutable-files: total bytes for all CHK files in the set
937 size-mutable-files (TODO): same, for current version of all mutable
938 files, does not include directories
939 size-literal-files: same, for LIT files
940 size-directories: size of mutable files used by directories
942 largest-directory: number of bytes in the largest directory
943 largest-directory-children: number of children in the largest
945 largest-immutable-file: number of bytes in the largest CHK file
947 size-mutable-files is not yet implemented, because it would involve
948 even more queries than deep_stats does.
950 The Monitor will also have an .origin_si attribute with the (binary)
951 storage index of the starting point.
953 This operation will visit every directory node underneath this one,
954 and can take a long time to run. On a typical workstation with good
955 bandwidth, this can examine roughly 15 directories per second (and
956 takes several minutes of 100% CPU for ~1700 directories).
959 class ICodecEncoder(Interface):
960 def set_params(data_size, required_shares, max_shares):
961 """Set up the parameters of this encoder.
963 This prepares the encoder to perform an operation that converts a
964 single block of data into a number of shares, such that a future
965 ICodecDecoder can use a subset of these shares to recover the
966 original data. This operation is invoked by calling encode(). Once
967 the encoding parameters are set up, the encode operation can be
968 invoked multiple times.
970 set_params() prepares the encoder to accept blocks of input data that
971 are exactly 'data_size' bytes in length. The encoder will be prepared
972 to produce 'max_shares' shares for each encode() operation (although
973 see the 'desired_share_ids' to use less CPU). The encoding math will
974 be chosen such that the decoder can get by with as few as
975 'required_shares' of these shares and still reproduce the original
976 data. For example, set_params(1000, 5, 5) offers no redundancy at
977 all, whereas set_params(1000, 1, 10) provides 10x redundancy.
979 Numerical Restrictions: 'data_size' is required to be an integral
980 multiple of 'required_shares'. In general, the caller should choose
981 required_shares and max_shares based upon their reliability
982 requirements and the number of peers available (the total storage
983 space used is roughly equal to max_shares*data_size/required_shares),
984 then choose data_size to achieve the memory footprint desired (larger
985 data_size means more efficient operation, smaller data_size means
986 smaller memory footprint).
988 In addition, 'max_shares' must be equal to or greater than
989 'required_shares'. Of course, setting them to be equal causes
990 encode() to degenerate into a particularly slow form of the 'split'
993 See encode() for more details about how these parameters are used.
995 set_params() must be called before any other ICodecEncoder methods
1000 """Return the 3-tuple of data_size, required_shares, max_shares"""
1002 def get_encoder_type():
1003 """Return a short string that describes the type of this encoder.
1005 There is required to be a global table of encoder classes. This method
1006 returns an index into this table; the value at this index is an
1007 encoder class, and this encoder is an instance of that class.
1010 def get_block_size():
1011 """Return the length of the shares that encode() will produce.
1014 def encode_proposal(data, desired_share_ids=None):
1015 """Encode some data.
1017 'data' must be a string (or other buffer object), and len(data) must
1018 be equal to the 'data_size' value passed earlier to set_params().
1020 This will return a Deferred that will fire with two lists. The first
1021 is a list of shares, each of which is a string (or other buffer
1022 object) such that len(share) is the same as what get_share_size()
1023 returned earlier. The second is a list of shareids, in which each is
1024 an integer. The lengths of the two lists will always be equal to each
1025 other. The user should take care to keep each share closely
1026 associated with its shareid, as one is useless without the other.
1028 The length of this output list will normally be the same as the value
1029 provided to the 'max_shares' parameter of set_params(). This may be
1030 different if 'desired_share_ids' is provided.
1032 'desired_share_ids', if provided, is required to be a sequence of
1033 ints, each of which is required to be >= 0 and < max_shares. If not
1034 provided, encode() will produce 'max_shares' shares, as if
1035 'desired_share_ids' were set to range(max_shares). You might use this
1036 if you initially thought you were going to use 10 peers, started
1037 encoding, and then two of the peers dropped out: you could use
1038 desired_share_ids= to skip the work (both memory and CPU) of
1039 producing shares for the peers which are no longer available.
1043 def encode(inshares, desired_share_ids=None):
1044 """Encode some data. This may be called multiple times. Each call is
1047 inshares is a sequence of length required_shares, containing buffers
1048 (i.e. strings), where each buffer contains the next contiguous
1049 non-overlapping segment of the input data. Each buffer is required to
1050 be the same length, and the sum of the lengths of the buffers is
1051 required to be exactly the data_size promised by set_params(). (This
1052 implies that the data has to be padded before being passed to
1053 encode(), unless of course it already happens to be an even multiple
1054 of required_shares in length.)
1056 ALSO: the requirement to break up your data into 'required_shares'
1057 chunks before calling encode() feels a bit surprising, at least from
1058 the point of view of a user who doesn't know how FEC works. It feels
1059 like an implementation detail that has leaked outside the
1060 abstraction barrier. Can you imagine a use case in which the data to
1061 be encoded might already be available in pre-segmented chunks, such
1062 that it is faster or less work to make encode() take a list rather
1063 than splitting a single string?
1065 ALSO ALSO: I think 'inshares' is a misleading term, since encode()
1066 is supposed to *produce* shares, so what it *accepts* should be
1067 something other than shares. Other places in this interface use the
1068 word 'data' for that-which-is-not-shares.. maybe we should use that
1071 'desired_share_ids', if provided, is required to be a sequence of
1072 ints, each of which is required to be >= 0 and < max_shares. If not
1073 provided, encode() will produce 'max_shares' shares, as if
1074 'desired_share_ids' were set to range(max_shares). You might use this
1075 if you initially thought you were going to use 10 peers, started
1076 encoding, and then two of the peers dropped out: you could use
1077 desired_share_ids= to skip the work (both memory and CPU) of
1078 producing shares for the peers which are no longer available.
1080 For each call, encode() will return a Deferred that fires with two
1081 lists, one containing shares and the other containing the shareids.
1082 The get_share_size() method can be used to determine the length of
1083 the share strings returned by encode(). Each shareid is a small
1084 integer, exactly as passed into 'desired_share_ids' (or
1085 range(max_shares), if desired_share_ids was not provided).
1087 The shares and their corresponding shareids are required to be kept
1088 together during storage and retrieval. Specifically, the share data is
1089 useless by itself: the decoder needs to be told which share is which
1090 by providing it with both the shareid and the actual share data.
1092 This function will allocate an amount of memory roughly equal to::
1094 (max_shares - required_shares) * get_share_size()
1096 When combined with the memory that the caller must allocate to
1097 provide the input data, this leads to a memory footprint roughly
1098 equal to the size of the resulting encoded shares (i.e. the expansion
1099 factor times the size of the input segment).
1104 # returning a list of (shareidN,shareN) tuples instead of a pair of
1105 # lists (shareids..,shares..). Brian thought the tuples would
1106 # encourage users to keep the share and shareid together throughout
1107 # later processing, Zooko pointed out that the code to iterate
1108 # through two lists is not really more complicated than using a list
1109 # of tuples and there's also a performance improvement
1111 # having 'data_size' not required to be an integral multiple of
1112 # 'required_shares'. Doing this would require encode() to perform
1113 # padding internally, and we'd prefer to have any padding be done
1114 # explicitly by the caller. Yes, it is an abstraction leak, but
1115 # hopefully not an onerous one.
1118 class ICodecDecoder(Interface):
1119 def set_params(data_size, required_shares, max_shares):
1120 """Set the params. They have to be exactly the same ones that were used for encoding. """
1122 def get_needed_shares():
1123 """Return the number of shares needed to reconstruct the data.
1124 set_params() is required to be called before this."""
1126 def decode(some_shares, their_shareids):
1127 """Decode a partial list of shares into data.
1129 'some_shares' is required to be a sequence of buffers of sharedata, a
1130 subset of the shares returned by ICodecEncode.encode(). Each share is
1131 required to be of the same length. The i'th element of their_shareids
1132 is required to be the shareid of the i'th buffer in some_shares.
1134 This returns a Deferred which fires with a sequence of buffers. This
1135 sequence will contain all of the segments of the original data, in
1136 order. The sum of the lengths of all of the buffers will be the
1137 'data_size' value passed into the original ICodecEncode.set_params()
1138 call. To get back the single original input block of data, use
1139 ''.join(output_buffers), or you may wish to simply write them in
1140 order to an output file.
1142 Note that some of the elements in the result sequence may be
1143 references to the elements of the some_shares input sequence. In
1144 particular, this means that if those share objects are mutable (e.g.
1145 arrays) and if they are changed, then both the input (the
1146 'some_shares' parameter) and the output (the value given when the
1147 deferred is triggered) will change.
1149 The length of 'some_shares' is required to be exactly the value of
1150 'required_shares' passed into the original ICodecEncode.set_params()
1154 class IEncoder(Interface):
1155 """I take an object that provides IEncryptedUploadable, which provides
1156 encrypted data, and a list of shareholders. I then encode, hash, and
1157 deliver shares to those shareholders. I will compute all the necessary
1158 Merkle hash trees that are necessary to validate the crypttext that
1159 eventually comes back from the shareholders. I provide the URI Extension
1160 Block Hash, and the encoding parameters, both of which must be included
1163 I do not choose shareholders, that is left to the IUploader. I must be
1164 given a dict of RemoteReferences to storage buckets that are ready and
1165 willing to receive data.
1169 """Specify the number of bytes that will be encoded. This must be
1170 peformed before get_serialized_params() can be called.
1172 def set_params(params):
1173 """Override the default encoding parameters. 'params' is a tuple of
1174 (k,d,n), where 'k' is the number of required shares, 'd' is the
1175 shares_of_happiness, and 'n' is the total number of shares that will
1178 Encoding parameters can be set in three ways. 1: The Encoder class
1179 provides defaults (3/7/10). 2: the Encoder can be constructed with
1180 an 'options' dictionary, in which the
1181 needed_and_happy_and_total_shares' key can be a (k,d,n) tuple. 3:
1182 set_params((k,d,n)) can be called.
1184 If you intend to use set_params(), you must call it before
1185 get_share_size or get_param are called.
1188 def set_encrypted_uploadable(u):
1189 """Provide a source of encrypted upload data. 'u' must implement
1190 IEncryptedUploadable.
1192 When this is called, the IEncryptedUploadable will be queried for its
1193 length and the storage_index that should be used.
1195 This returns a Deferred that fires with this Encoder instance.
1197 This must be performed before start() can be called.
1200 def get_param(name):
1201 """Return an encoding parameter, by name.
1203 'storage_index': return a string with the (16-byte truncated SHA-256
1204 hash) storage index to which these shares should be
1207 'share_counts': return a tuple describing how many shares are used:
1208 (needed_shares, shares_of_happiness, total_shares)
1210 'num_segments': return an int with the number of segments that
1213 'segment_size': return an int with the size of each segment.
1215 'block_size': return the size of the individual blocks that will
1216 be delivered to a shareholder's put_block() method. By
1217 knowing this, the shareholder will be able to keep all
1218 blocks in a single file and still provide random access
1219 when reading them. # TODO: can we avoid exposing this?
1221 'share_size': an int with the size of the data that will be stored
1222 on each shareholder. This is aggregate amount of data
1223 that will be sent to the shareholder, summed over all
1224 the put_block() calls I will ever make. It is useful to
1225 determine this size before asking potential
1226 shareholders whether they will grant a lease or not,
1227 since their answers will depend upon how much space we
1228 need. TODO: this might also include some amount of
1229 overhead, like the size of all the hashes. We need to
1230 decide whether this is useful or not.
1232 'serialized_params': a string with a concise description of the
1233 codec name and its parameters. This may be passed
1234 into the IUploadable to let it make sure that
1235 the same file encoded with different parameters
1236 will result in different storage indexes.
1238 Once this is called, set_size() and set_params() may not be called.
1241 def set_shareholders(shareholders):
1242 """Tell the encoder where to put the encoded shares. 'shareholders'
1243 must be a dictionary that maps share number (an integer ranging from
1244 0 to n-1) to an instance that provides IStorageBucketWriter. This
1245 must be performed before start() can be called."""
1248 """Begin the encode/upload process. This involves reading encrypted
1249 data from the IEncryptedUploadable, encoding it, uploading the shares
1250 to the shareholders, then sending the hash trees.
1252 set_encrypted_uploadable() and set_shareholders() must be called
1253 before this can be invoked.
1255 This returns a Deferred that fires with a verify cap when the upload process is
1256 complete. The verifycap, plus the encryption key, is sufficient to construct the read
1260 class IDecoder(Interface):
1261 """I take a list of shareholders and some setup information, then
1262 download, validate, decode, and decrypt data from them, writing the
1263 results to an output file.
1265 I do not locate the shareholders, that is left to the IDownloader. I must
1266 be given a dict of RemoteReferences to storage buckets that are ready to
1271 """I take a file-like object (providing write and close) to which all
1272 the plaintext data will be written.
1274 TODO: producer/consumer . Maybe write() should return a Deferred that
1275 indicates when it will accept more data? But probably having the
1276 IDecoder be a producer is easier to glue to IConsumer pieces.
1279 def set_shareholders(shareholders):
1280 """I take a dictionary that maps share identifiers (small integers)
1281 to RemoteReferences that provide RIBucketReader. This must be called
1285 """I start the download. This process involves retrieving data and
1286 hash chains from the shareholders, using the hashes to validate the
1287 data, decoding the shares into segments, decrypting the segments,
1288 then writing the resulting plaintext to the output file.
1290 I return a Deferred that will fire (with self) when the download is
1294 class IDownloadTarget(Interface):
1295 # Note that if the IDownloadTarget is also an IConsumer, the downloader
1296 # will register itself as a producer. This allows the target to invoke
1297 # downloader.pauseProducing, resumeProducing, and stopProducing.
1299 """Called before any calls to write() or close(). If an error
1300 occurs before any data is available, fail() may be called without
1301 a previous call to open().
1303 'size' is the length of the file being downloaded, in bytes."""
1306 """Output some data to the target."""
1308 """Inform the target that there is no more data to be written."""
1310 """fail() is called to indicate that the download has failed. 'why'
1311 is a Failure object indicating what went wrong. No further methods
1312 will be invoked on the IDownloadTarget after fail()."""
1313 def register_canceller(cb):
1314 """The CiphertextDownloader uses this to register a no-argument function
1315 that the target can call to cancel the download. Once this canceller
1316 is invoked, no further calls to write() or close() will be made."""
1318 """When the CiphertextDownloader is done, this finish() function will be
1319 called. Whatever it returns will be returned to the invoker of
1320 Downloader.download.
1322 # The following methods are just because that target might be a repairer.DownUpConnector,
1323 # and just because the current CHKUpload object expects to find the storage index and
1324 # encoding parameters in its Uploadable.
1325 def set_storageindex(storageindex):
1326 """ Set the storage index. """
1327 def set_encodingparams(encodingparams):
1328 """ Set the encoding parameters. """
1330 class IDownloader(Interface):
1331 def download(uri, target):
1332 """Perform a CHK download, sending the data to the given target.
1333 'target' must provide IDownloadTarget.
1335 Returns a Deferred that fires (with the results of target.finish)
1336 when the download is finished, or errbacks if something went wrong."""
1338 class IEncryptedUploadable(Interface):
1339 def set_upload_status(upload_status):
1340 """Provide an IUploadStatus object that should be filled with status
1341 information. The IEncryptedUploadable is responsible for setting
1342 key-determination progress ('chk'), size, storage_index, and
1343 ciphertext-fetch progress. It may delegate some of this
1344 responsibility to others, in particular to the IUploadable."""
1347 """This behaves just like IUploadable.get_size()."""
1349 def get_all_encoding_parameters():
1350 """Return a Deferred that fires with a tuple of
1351 (k,happy,n,segment_size). The segment_size will be used as-is, and
1352 must match the following constraints: it must be a multiple of k, and
1353 it shouldn't be unreasonably larger than the file size (if
1354 segment_size is larger than filesize, the difference must be stored
1357 This usually passes through to the IUploadable method of the same
1360 The encoder strictly obeys the values returned by this method. To
1361 make an upload use non-default encoding parameters, you must arrange
1362 to control the values that this method returns.
1365 def get_storage_index():
1366 """Return a Deferred that fires with a 16-byte storage index.
1369 def read_encrypted(length, hash_only):
1370 """This behaves just like IUploadable.read(), but returns crypttext
1371 instead of plaintext. If hash_only is True, then this discards the
1372 data (and returns an empty list); this improves efficiency when
1373 resuming an interrupted upload (where we need to compute the
1374 plaintext hashes, but don't need the redundant encrypted data)."""
1376 def get_plaintext_hashtree_leaves(first, last, num_segments):
1377 """OBSOLETE; Get the leaf nodes of a merkle hash tree over the plaintext
1378 segments, i.e. get the tagged hashes of the given segments. The
1379 segment size is expected to be generated by the IEncryptedUploadable
1380 before any plaintext is read or ciphertext produced, so that the
1381 segment hashes can be generated with only a single pass.
1383 This returns a Deferred which fires with a sequence of hashes, using:
1385 tuple(segment_hashes[first:last])
1387 'num_segments' is used to assert that the number of segments that the
1388 IEncryptedUploadable handled matches the number of segments that the
1389 encoder was expecting.
1391 This method must not be called until the final byte has been read
1392 from read_encrypted(). Once this method is called, read_encrypted()
1393 can never be called again.
1396 def get_plaintext_hash():
1397 """OBSOLETE; Get the hash of the whole plaintext.
1399 This returns a Deferred which fires with a tagged SHA-256 hash of the
1400 whole plaintext, obtained from hashutil.plaintext_hash(data).
1404 """Just like IUploadable.close()."""
1406 class IUploadable(Interface):
1407 def set_upload_status(upload_status):
1408 """Provide an IUploadStatus object that should be filled with status
1409 information. The IUploadable is responsible for setting
1410 key-determination progress ('chk')."""
1412 def set_default_encoding_parameters(params):
1413 """Set the default encoding parameters, which must be a dict mapping
1414 strings to ints. The meaningful keys are 'k', 'happy', 'n', and
1415 'max_segment_size'. These might have an influence on the final
1416 encoding parameters returned by get_all_encoding_parameters(), if the
1417 Uploadable doesn't have more specific preferences.
1419 This call is optional: if it is not used, the Uploadable will use
1420 some built-in defaults. If used, this method must be called before
1421 any other IUploadable methods to have any effect.
1425 """Return a Deferred that will fire with the length of the data to be
1426 uploaded, in bytes. This will be called before the data is actually
1427 used, to compute encoding parameters.
1430 def get_all_encoding_parameters():
1431 """Return a Deferred that fires with a tuple of
1432 (k,happy,n,segment_size). The segment_size will be used as-is, and
1433 must match the following constraints: it must be a multiple of k, and
1434 it shouldn't be unreasonably larger than the file size (if
1435 segment_size is larger than filesize, the difference must be stored
1438 The relative values of k and n allow some IUploadables to request
1439 better redundancy than others (in exchange for consuming more space
1442 Larger values of segment_size reduce hash overhead, while smaller
1443 values reduce memory footprint and cause data to be delivered in
1444 smaller pieces (which may provide a smoother and more predictable
1445 download experience).
1447 The encoder strictly obeys the values returned by this method. To
1448 make an upload use non-default encoding parameters, you must arrange
1449 to control the values that this method returns. One way to influence
1450 them may be to call set_encoding_parameters() before calling
1451 get_all_encoding_parameters().
1454 def get_encryption_key():
1455 """Return a Deferred that fires with a 16-byte AES key. This key will
1456 be used to encrypt the data. The key will also be hashed to derive
1459 Uploadables which want to achieve convergence should hash their file
1460 contents and the serialized_encoding_parameters to form the key
1461 (which of course requires a full pass over the data). Uploadables can
1462 use the upload.ConvergentUploadMixin class to achieve this
1465 Uploadables which do not care about convergence (or do not wish to
1466 make multiple passes over the data) can simply return a
1467 strongly-random 16 byte string.
1469 get_encryption_key() may be called multiple times: the IUploadable is
1470 required to return the same value each time.
1474 """Return a Deferred that fires with a list of strings (perhaps with
1475 only a single element) which, when concatenated together, contain the
1476 next 'length' bytes of data. If EOF is near, this may provide fewer
1477 than 'length' bytes. The total number of bytes provided by read()
1478 before it signals EOF must equal the size provided by get_size().
1480 If the data must be acquired through multiple internal read
1481 operations, returning a list instead of a single string may help to
1482 reduce string copies.
1484 'length' will typically be equal to (min(get_size(),1MB)/req_shares),
1485 so a 10kB file means length=3kB, 100kB file means length=30kB,
1486 and >=1MB file means length=300kB.
1488 This method provides for a single full pass through the data. Later
1489 use cases may desire multiple passes or access to only parts of the
1490 data (such as a mutable file making small edits-in-place). This API
1491 will be expanded once those use cases are better understood.
1495 """The upload is finished, and whatever filehandle was in use may be
1498 class IUploadResults(Interface):
1499 """I am returned by upload() methods. I contain a number of public
1500 attributes which can be read to determine the results of the upload. Some
1501 of these are functional, some are timing information. All of these may be
1504 .file_size : the size of the file, in bytes
1505 .uri : the CHK read-cap for the file
1506 .ciphertext_fetched : how many bytes were fetched by the helper
1507 .sharemap: dict mapping share identifier to set of serverids
1508 (binary strings). This indicates which servers were given
1509 which shares. For immutable files, the shareid is an
1510 integer (the share number, from 0 to N-1). For mutable
1511 files, it is a string of the form 'seq%d-%s-sh%d',
1512 containing the sequence number, the roothash, and the
1514 .servermap : dict mapping server peerid to a set of share numbers
1515 .timings : dict of timing information, mapping name to seconds (float)
1516 total : total upload time, start to finish
1517 storage_index : time to compute the storage index
1518 peer_selection : time to decide which peers will be used
1519 contacting_helper : initial helper query to upload/no-upload decision
1520 existence_check : helper pre-upload existence check
1521 helper_total : initial helper query to helper finished pushing
1522 cumulative_fetch : helper waiting for ciphertext requests
1523 total_fetch : helper start to last ciphertext response
1524 cumulative_encoding : just time spent in zfec
1525 cumulative_sending : just time spent waiting for storage servers
1526 hashes_and_close : last segment push to shareholder close
1527 total_encode_and_push : first encode to shareholder close
1531 class IDownloadResults(Interface):
1532 """I am created internally by download() methods. I contain a number of
1533 public attributes which contain details about the download process.::
1535 .file_size : the size of the file, in bytes
1536 .servers_used : set of server peerids that were used during download
1537 .server_problems : dict mapping server peerid to a problem string. Only
1538 servers that had problems (bad hashes, disconnects) are
1540 .servermap : dict mapping server peerid to a set of share numbers. Only
1541 servers that had any shares are listed here.
1542 .timings : dict of timing information, mapping name to seconds (float)
1543 peer_selection : time to ask servers about shares
1544 servers_peer_selection : dict of peerid to DYHB-query time
1545 uri_extension : time to fetch a copy of the URI extension block
1546 hashtrees : time to fetch the hash trees
1547 segments : time to fetch, decode, and deliver segments
1548 cumulative_fetch : time spent waiting for storage servers
1549 cumulative_decode : just time spent in zfec
1550 cumulative_decrypt : just time spent in decryption
1551 total : total download time, start to finish
1552 fetch_per_server : dict of peerid to list of per-segment fetch times
1556 class IUploader(Interface):
1557 def upload(uploadable):
1558 """Upload the file. 'uploadable' must impement IUploadable. This
1559 returns a Deferred which fires with an UploadResults instance, from
1560 which the URI of the file can be obtained as results.uri ."""
1562 def upload_ssk(write_capability, new_version, uploadable):
1563 """TODO: how should this work?"""
1565 class ICheckable(Interface):
1566 def check(monitor, verify=False, add_lease=False):
1567 """Check upon my health, optionally repairing any problems.
1569 This returns a Deferred that fires with an instance that provides
1570 ICheckResults, or None if the object is non-distributed (i.e. LIT
1573 The monitor will be checked periodically to see if the operation has
1574 been cancelled. If so, no new queries will be sent, and the Deferred
1575 will fire (with a OperationCancelledError) immediately.
1577 Filenodes and dirnodes (which provide IFilesystemNode) are also
1578 checkable. Instances that represent verifier-caps will be checkable
1579 but not downloadable. Some objects (like LIT files) do not actually
1580 live in the grid, and their checkers return None (non-distributed
1581 files are always healthy).
1583 If verify=False, a relatively lightweight check will be performed: I
1584 will ask all servers if they have a share for me, and I will believe
1585 whatever they say. If there are at least N distinct shares on the
1586 grid, my results will indicate r.is_healthy()==True. This requires a
1587 roundtrip to each server, but does not transfer very much data, so
1588 the network bandwidth is fairly low.
1590 If verify=True, a more resource-intensive check will be performed:
1591 every share will be downloaded, and the hashes will be validated on
1592 every bit. I will ignore any shares that failed their hash checks. If
1593 there are at least N distinct valid shares on the grid, my results
1594 will indicate r.is_healthy()==True. This requires N/k times as much
1595 download bandwidth (and server disk IO) as a regular download. If a
1596 storage server is holding a corrupt share, or is experiencing memory
1597 failures during retrieval, or is malicious or buggy, then
1598 verification will detect the problem, but checking will not.
1600 If add_lease=True, I will ensure that an up-to-date lease is present
1601 on each share. The lease secrets will be derived from by node secret
1602 (in BASEDIR/private/secret), so either I will add a new lease to the
1603 share, or I will merely renew the lease that I already had. In a
1604 future version of the storage-server protocol (once Accounting has
1605 been implemented), there may be additional options here to define the
1606 kind of lease that is obtained (which account number to claim, etc).
1608 TODO: any problems seen during checking will be reported to the
1609 health-manager.furl, a centralized object which is responsible for
1610 figuring out why files are unhealthy so corrective action can be
1614 def check_and_repair(monitor, verify=False, add_lease=False):
1615 """Like check(), but if the file/directory is not healthy, attempt to
1618 Any non-healthy result will cause an immediate repair operation, to
1619 generate and upload new shares. After repair, the file will be as
1620 healthy as we can make it. Details about what sort of repair is done
1621 will be put in the check-and-repair results. The Deferred will not
1622 fire until the repair is complete.
1624 This returns a Deferred which fires with an instance of
1625 ICheckAndRepairResults."""
1627 class IDeepCheckable(Interface):
1628 def start_deep_check(verify=False, add_lease=False):
1629 """Check upon the health of me and everything I can reach.
1631 This is a recursive form of check(), useable only on dirnodes.
1633 I return a Monitor, with results that are an IDeepCheckResults
1636 TODO: If any of the directories I traverse are unrecoverable, the
1637 Monitor will report failure. If any of the files I check upon are
1638 unrecoverable, those problems will be reported in the
1639 IDeepCheckResults as usual, and the Monitor will not report a
1643 def start_deep_check_and_repair(verify=False, add_lease=False):
1644 """Check upon the health of me and everything I can reach. Repair
1645 anything that isn't healthy.
1647 This is a recursive form of check_and_repair(), useable only on
1650 I return a Monitor, with results that are an
1651 IDeepCheckAndRepairResults object.
1653 TODO: If any of the directories I traverse are unrecoverable, the
1654 Monitor will report failure. If any of the files I check upon are
1655 unrecoverable, those problems will be reported in the
1656 IDeepCheckResults as usual, and the Monitor will not report a
1660 class ICheckResults(Interface):
1661 """I contain the detailed results of a check/verify operation.
1664 def get_storage_index():
1665 """Return a string with the (binary) storage index."""
1666 def get_storage_index_string():
1667 """Return a string with the (printable) abbreviated storage index."""
1669 """Return the (string) URI of the object that was checked."""
1672 """Return a boolean, True if the file/dir is fully healthy, False if
1673 it is damaged in any way. Non-distributed LIT files always return
1676 def is_recoverable():
1677 """Return a boolean, True if the file/dir can be recovered, False if
1678 not. Unrecoverable files are obviously unhealthy. Non-distributed LIT
1679 files always return True."""
1681 def needs_rebalancing():
1682 """Return a boolean, True if the file/dir's reliability could be
1683 improved by moving shares to new servers. Non-distributed LIT files
1684 always return False."""
1688 """Return a dictionary that describes the state of the file/dir. LIT
1689 files always return an empty dictionary. Normal files and directories return a
1690 dictionary with the following keys (note that these use binary strings rather than
1691 base32-encoded ones) (also note that for mutable files, these counts are for the 'best'
1694 count-shares-good: the number of distinct good shares that were found
1695 count-shares-needed: 'k', the number of shares required for recovery
1696 count-shares-expected: 'N', the number of total shares generated
1697 count-good-share-hosts: the number of distinct storage servers with
1698 good shares. If this number is less than
1699 count-shares-good, then some shares are
1700 doubled up, increasing the correlation of
1701 failures. This indicates that one or more
1702 shares should be moved to an otherwise unused
1703 server, if one is available.
1704 count-corrupt-shares: the number of shares with integrity failures
1705 list-corrupt-shares: a list of 'share locators', one for each share
1706 that was found to be corrupt. Each share
1707 locator is a list of (serverid, storage_index,
1709 count-incompatible-shares: the number of shares which are of a share format unknown to
1711 list-incompatible-shares: a list of 'share locators', one for each share that was found
1712 to be of an unknown format. Each share locator is a list of
1713 (serverid, storage_index, sharenum).
1714 servers-responding: list of (binary) storage server identifiers,
1715 one for each server which responded to the share
1716 query (even if they said they didn't have shares,
1717 and even if they said they did have shares but then
1718 didn't send them when asked, or dropped the
1719 connection, or returned a Failure, and even if they
1720 said they did have shares and sent incorrect ones
1722 sharemap: dict mapping share identifier to list of serverids
1723 (binary strings). This indicates which servers are holding
1724 which shares. For immutable files, the shareid is an
1725 integer (the share number, from 0 to N-1). For mutable
1726 files, it is a string of the form 'seq%d-%s-sh%d',
1727 containing the sequence number, the roothash, and the
1730 The following keys are most relevant for mutable files, but immutable
1731 files will provide sensible values too::
1733 count-wrong-shares: the number of shares for versions other than the
1734 'best' one (which is defined as being the
1735 recoverable version with the highest sequence
1736 number, then the highest roothash). These are
1737 either leftover shares from an older version
1738 (perhaps on a server that was offline when an
1739 update occurred), shares from an unrecoverable
1740 newer version, or shares from an alternate
1741 current version that results from an
1742 uncoordinated write collision. For a healthy
1743 file, this will equal 0.
1745 count-recoverable-versions: the number of recoverable versions of
1746 the file. For a healthy file, this will
1749 count-unrecoverable-versions: the number of unrecoverable versions
1750 of the file. For a healthy file, this
1756 """Return a string with a brief (one-line) summary of the results."""
1759 """Return a list of strings with more detailed results."""
1761 class ICheckAndRepairResults(Interface):
1762 """I contain the detailed results of a check/verify/repair operation.
1764 The IFilesystemNode.check()/verify()/repair() methods all return
1765 instances that provide ICheckAndRepairResults.
1768 def get_storage_index():
1769 """Return a string with the (binary) storage index."""
1770 def get_storage_index_string():
1771 """Return a string with the (printable) abbreviated storage index."""
1772 def get_repair_attempted():
1773 """Return a boolean, True if a repair was attempted."""
1774 def get_repair_successful():
1775 """Return a boolean, True if repair was attempted and the file/dir
1776 was fully healthy afterwards. False if no repair was attempted or if
1777 a repair attempt failed."""
1778 def get_pre_repair_results():
1779 """Return an ICheckResults instance that describes the state of the
1780 file/dir before any repair was attempted."""
1781 def get_post_repair_results():
1782 """Return an ICheckResults instance that describes the state of the
1783 file/dir after any repair was attempted. If no repair was attempted,
1784 the pre-repair and post-repair results will be identical."""
1787 class IDeepCheckResults(Interface):
1788 """I contain the results of a deep-check operation.
1790 This is returned by a call to ICheckable.deep_check().
1793 def get_root_storage_index_string():
1794 """Return the storage index (abbreviated human-readable string) of
1795 the first object checked."""
1797 """Return a dictionary with the following keys::
1799 count-objects-checked: count of how many objects were checked
1800 count-objects-healthy: how many of those objects were completely
1802 count-objects-unhealthy: how many were damaged in some way
1803 count-objects-unrecoverable: how many were unrecoverable
1804 count-corrupt-shares: how many shares were found to have
1805 corruption, summed over all objects
1809 def get_corrupt_shares():
1810 """Return a set of (serverid, storage_index, sharenum) for all shares
1811 that were found to be corrupt. Both serverid and storage_index are
1814 def get_all_results():
1815 """Return a dictionary mapping pathname (a tuple of strings, ready to
1816 be slash-joined) to an ICheckResults instance, one for each object
1817 that was checked."""
1819 def get_results_for_storage_index(storage_index):
1820 """Retrive the ICheckResults instance for the given (binary)
1821 storage index. Raises KeyError if there are no results for that
1825 """Return a dictionary with the same keys as
1826 IDirectoryNode.deep_stats()."""
1828 class IDeepCheckAndRepairResults(Interface):
1829 """I contain the results of a deep-check-and-repair operation.
1831 This is returned by a call to ICheckable.deep_check_and_repair().
1834 def get_root_storage_index_string():
1835 """Return the storage index (abbreviated human-readable string) of
1836 the first object checked."""
1838 """Return a dictionary with the following keys::
1840 count-objects-checked: count of how many objects were checked
1841 count-objects-healthy-pre-repair: how many of those objects were
1842 completely healthy (before any
1844 count-objects-unhealthy-pre-repair: how many were damaged in
1846 count-objects-unrecoverable-pre-repair: how many were unrecoverable
1847 count-objects-healthy-post-repair: how many of those objects were
1848 completely healthy (after any
1850 count-objects-unhealthy-post-repair: how many were damaged in
1852 count-objects-unrecoverable-post-repair: how many were
1854 count-repairs-attempted: repairs were attempted on this many
1855 objects. The count-repairs- keys will
1856 always be provided, however unless
1857 repair=true is present, they will all
1859 count-repairs-successful: how many repairs resulted in healthy
1861 count-repairs-unsuccessful: how many repairs resulted did not
1862 results in completely healthy objects
1863 count-corrupt-shares-pre-repair: how many shares were found to
1864 have corruption, summed over all
1865 objects examined (before any
1867 count-corrupt-shares-post-repair: how many shares were found to
1868 have corruption, summed over all
1869 objects examined (after any
1874 """Return a dictionary with the same keys as
1875 IDirectoryNode.deep_stats()."""
1877 def get_corrupt_shares():
1878 """Return a set of (serverid, storage_index, sharenum) for all shares
1879 that were found to be corrupt before any repair was attempted. Both
1880 serverid and storage_index are binary.
1882 def get_remaining_corrupt_shares():
1883 """Return a set of (serverid, storage_index, sharenum) for all shares
1884 that were found to be corrupt after any repair was completed. Both
1885 serverid and storage_index are binary. These are shares that need
1886 manual inspection and probably deletion.
1888 def get_all_results():
1889 """Return a dictionary mapping pathname (a tuple of strings, ready to
1890 be slash-joined) to an ICheckAndRepairResults instance, one for each
1891 object that was checked."""
1893 def get_results_for_storage_index(storage_index):
1894 """Retrive the ICheckAndRepairResults instance for the given (binary)
1895 storage index. Raises KeyError if there are no results for that
1899 class IRepairable(Interface):
1900 def repair(check_results):
1901 """Attempt to repair the given object. Returns a Deferred that fires
1902 with a IRepairResults object.
1904 I must be called with an object that implements ICheckResults, as
1905 proof that you have actually discovered a problem with this file. I
1906 will use the data in the checker results to guide the repair process,
1907 such as which servers provided bad data and should therefore be
1908 avoided. The ICheckResults object is inside the
1909 ICheckAndRepairResults object, which is returned by the
1910 ICheckable.check() method::
1912 d = filenode.check(repair=False)
1913 def _got_results(check_and_repair_results):
1914 check_results = check_and_repair_results.get_pre_repair_results()
1915 return filenode.repair(check_results)
1916 d.addCallback(_got_results)
1920 class IRepairResults(Interface):
1921 """I contain the results of a repair operation."""
1924 class IClient(Interface):
1925 def upload(uploadable):
1926 """Upload some data into a CHK, get back the UploadResults for it.
1927 @param uploadable: something that implements IUploadable
1928 @return: a Deferred that fires with the UploadResults instance.
1929 To get the URI for this file, use results.uri .
1932 def create_mutable_file(contents=""):
1933 """Create a new mutable file with contents, get back the URI string.
1934 @param contents: the initial contents to place in the file.
1935 @return: a Deferred that fires with tne (string) SSK URI for the new
1939 def create_empty_dirnode():
1940 """Create a new dirnode, empty and unattached.
1941 @return: a Deferred that fires with the new IDirectoryNode instance.
1944 def create_node_from_uri(uri):
1945 """Create a new IFilesystemNode instance from the uri, synchronously.
1946 @param uri: a string or IURI-providing instance. This could be for a
1947 LiteralFileNode, a CHK file node, a mutable file node, or
1949 @return: an instance that provides IFilesystemNode (or more usefully one
1950 of its subclasses). File-specifying URIs will result in
1951 IFileNode or IMutableFileNode -providing instances, like
1952 FileNode, LiteralFileNode, or MutableFileNode.
1953 Directory-specifying URIs will result in
1954 IDirectoryNode-providing instances, like NewDirectoryNode.
1957 class IClientStatus(Interface):
1958 def list_all_uploads():
1959 """Return a list of uploader objects, one for each upload which
1960 currently has an object available (tracked with weakrefs). This is
1961 intended for debugging purposes."""
1962 def list_active_uploads():
1963 """Return a list of active IUploadStatus objects."""
1964 def list_recent_uploads():
1965 """Return a list of IUploadStatus objects for the most recently
1968 def list_all_downloads():
1969 """Return a list of downloader objects, one for each download which
1970 currently has an object available (tracked with weakrefs). This is
1971 intended for debugging purposes."""
1972 def list_active_downloads():
1973 """Return a list of active IDownloadStatus objects."""
1974 def list_recent_downloads():
1975 """Return a list of IDownloadStatus objects for the most recently
1976 started downloads."""
1978 class IUploadStatus(Interface):
1980 """Return a timestamp (float with seconds since epoch) indicating
1981 when the operation was started."""
1982 def get_storage_index():
1983 """Return a string with the (binary) storage index in use on this
1984 upload. Returns None if the storage index has not yet been
1987 """Return an integer with the number of bytes that will eventually
1988 be uploaded for this file. Returns None if the size is not yet known.
1991 """Return True if this upload is using a Helper, False if not."""
1993 """Return a string describing the current state of the upload
1996 """Returns a tuple of floats, (chk, ciphertext, encode_and_push),
1997 each from 0.0 to 1.0 . 'chk' describes how much progress has been
1998 made towards hashing the file to determine a CHK encryption key: if
1999 non-convergent encryption is in use, this will be trivial, otherwise
2000 the whole file must be hashed. 'ciphertext' describes how much of the
2001 ciphertext has been pushed to the helper, and is '1.0' for non-helper
2002 uploads. 'encode_and_push' describes how much of the encode-and-push
2003 process has finished: for helper uploads this is dependent upon the
2004 helper providing progress reports. It might be reasonable to add all
2005 three numbers and report the sum to the user."""
2007 """Return True if the upload is currently active, False if not."""
2009 """Return an instance of UploadResults (which contains timing and
2010 sharemap information). Might return None if the upload is not yet
2013 """Each upload status gets a unique number: this method returns that
2014 number. This provides a handle to this particular upload, so a web
2015 page can generate a suitable hyperlink."""
2017 class IDownloadStatus(Interface):
2019 """Return a timestamp (float with seconds since epoch) indicating
2020 when the operation was started."""
2021 def get_storage_index():
2022 """Return a string with the (binary) storage index in use on this
2023 download. This may be None if there is no storage index (i.e. LIT
2026 """Return an integer with the number of bytes that will eventually be
2027 retrieved for this file. Returns None if the size is not yet known.
2030 """Return True if this download is using a Helper, False if not."""
2032 """Return a string describing the current state of the download
2035 """Returns a float (from 0.0 to 1.0) describing the amount of the
2036 download that has completed. This value will remain at 0.0 until the
2037 first byte of plaintext is pushed to the download target."""
2039 """Return True if the download is currently active, False if not."""
2041 """Each download status gets a unique number: this method returns
2042 that number. This provides a handle to this particular download, so a
2043 web page can generate a suitable hyperlink."""
2045 class IServermapUpdaterStatus(Interface):
2047 class IPublishStatus(Interface):
2049 class IRetrieveStatus(Interface):
2052 class NotCapableError(Exception):
2053 """You have tried to write to a read-only node."""
2055 class BadWriteEnablerError(Exception):
2058 class RIControlClient(RemoteInterface):
2060 def wait_for_client_connections(num_clients=int):
2061 """Do not return until we have connections to at least NUM_CLIENTS
2065 def upload_from_file_to_uri(filename=str, convergence=ChoiceOf(None, StringConstraint(2**20))):
2066 """Upload a file to the grid. This accepts a filename (which must be
2067 absolute) that points to a file on the node's local disk. The node will
2068 read the contents of this file, upload it to the grid, then return the
2069 URI at which it was uploaded. If convergence is None then a random
2070 encryption key will be used, else the plaintext will be hashed, then
2071 that hash will be mixed together with the "convergence" string to form
2076 def download_from_uri_to_file(uri=URI, filename=str):
2077 """Download a file from the grid, placing it on the node's local disk
2078 at the given filename (which must be absolute[?]). Returns the
2079 absolute filename where the file was written."""
2084 def get_memory_usage():
2085 """Return a dict describes the amount of memory currently in use. The
2086 keys are 'VmPeak', 'VmSize', and 'VmData'. The values are integers,
2087 measuring memory consupmtion in bytes."""
2088 return DictOf(str, int)
2090 def speed_test(count=int, size=int, mutable=Any()):
2091 """Write 'count' tempfiles to disk, all of the given size. Measure
2092 how long (in seconds) it takes to upload them all to the servers.
2093 Then measure how long it takes to download all of them. If 'mutable'
2094 is 'create', time creation of mutable files. If 'mutable' is
2095 'upload', then time access to the same mutable file instead of
2098 Returns a tuple of (upload_time, download_time).
2100 return (float, float)
2102 def measure_peer_response_time():
2103 """Send a short message to each connected peer, and measure the time
2104 it takes for them to respond to it. This is a rough measure of the
2105 application-level round trip time.
2107 @return: a dictionary mapping peerid to a float (RTT time in seconds)
2110 return DictOf(Nodeid, float)
2112 UploadResults = Any() #DictOf(str, str)
2114 class RIEncryptedUploadable(RemoteInterface):
2115 __remote_name__ = "RIEncryptedUploadable.tahoe.allmydata.com"
2120 def get_all_encoding_parameters():
2121 return (int, int, int, long)
2123 def read_encrypted(offset=Offset, length=ReadSize):
2126 def get_plaintext_hashtree_leaves(first=int, last=int, num_segments=int):
2129 def get_plaintext_hash():
2136 class RICHKUploadHelper(RemoteInterface):
2137 __remote_name__ = "RIUploadHelper.tahoe.allmydata.com"
2141 Return a dictionary of version information.
2143 return DictOf(str, Any())
2145 def upload(reader=RIEncryptedUploadable):
2146 return UploadResults
2149 class RIHelper(RemoteInterface):
2150 __remote_name__ = "RIHelper.tahoe.allmydata.com"
2154 Return a dictionary of version information.
2156 return DictOf(str, Any())
2158 def upload_chk(si=StorageIndex):
2159 """See if a file with a given storage index needs uploading. The
2160 helper will ask the appropriate storage servers to see if the file
2161 has already been uploaded. If so, the helper will return a set of
2162 'upload results' that includes whatever hashes are needed to build
2163 the read-cap, and perhaps a truncated sharemap.
2165 If the file has not yet been uploaded (or if it was only partially
2166 uploaded), the helper will return an empty upload-results dictionary
2167 and also an RICHKUploadHelper object that will take care of the
2168 upload process. The client should call upload() on this object and
2169 pass it a reference to an RIEncryptedUploadable object that will
2170 provide ciphertext. When the upload is finished, the upload() method
2171 will finish and return the upload results.
2173 return (UploadResults, ChoiceOf(RICHKUploadHelper, None))
2176 class RIStatsProvider(RemoteInterface):
2177 __remote_name__ = "RIStatsProvider.tahoe.allmydata.com"
2179 Provides access to statistics and monitoring information.
2184 returns a dictionary containing 'counters' and 'stats', each a dictionary
2185 with string counter/stat name keys, and numeric values. counters are
2186 monotonically increasing measures of work done, and stats are instantaneous
2187 measures (potentially time averaged internally)
2189 return DictOf(str, DictOf(str, ChoiceOf(float, int, long)))
2191 class RIStatsGatherer(RemoteInterface):
2192 __remote_name__ = "RIStatsGatherer.tahoe.allmydata.com"
2194 Provides a monitoring service for centralised collection of stats
2197 def provide(provider=RIStatsProvider, nickname=str):
2199 @param provider: a stats collector instance which should be polled
2200 periodically by the gatherer to collect stats.
2201 @param nickname: a name useful to identify the provided client
2206 class IStatsProducer(Interface):
2209 returns a dictionary, with str keys representing the names of stats
2210 to be monitored, and numeric values.
2213 class RIKeyGenerator(RemoteInterface):
2214 __remote_name__ = "RIKeyGenerator.tahoe.allmydata.com"
2216 Provides a service offering to make RSA key pairs.
2219 def get_rsa_key_pair(key_size=int):
2221 @param key_size: the size of the signature key.
2222 @return: tuple(verifying_key, signing_key)
2224 return TupleOf(str, str)
2227 class FileTooLargeError(Exception):
2230 class IValidatedThingProxy(Interface):
2232 """ Acquire a thing and validate it. Return a deferred which is eventually fired with
2233 self if the thing is valid or errbacked if it can't be acquired or validated. """
2235 class InsufficientVersionError(Exception):
2236 def __init__(self, needed, got):
2237 self.needed = needed
2240 return "InsufficientVersionError(need '%s', got %s)" % (self.needed,