2 from zope.interface import Interface
3 from foolscap.schema import StringConstraint, ListOf, TupleOf, SetOf, DictOf, \
4 ChoiceOf, IntegerConstraint
5 from foolscap import RemoteInterface, Referenceable
9 Hash = StringConstraint(maxLength=HASH_SIZE,
10 minLength=HASH_SIZE)# binary format 32-byte SHA256 hash
11 Nodeid = StringConstraint(maxLength=20,
12 minLength=20) # binary format 20-byte SHA1 hash
13 FURL = StringConstraint(1000)
14 StorageIndex = StringConstraint(16)
15 URI = StringConstraint(300) # kind of arbitrary
17 MAX_BUCKETS = 200 # per peer
19 ShareData = StringConstraint(None)
20 URIExtensionData = StringConstraint(1000)
21 Number = IntegerConstraint(8) # 2**(8*8) == 16EiB ~= 18e18 ~= 18 exabytes
23 ReadSize = int # the 'int' constraint is 2**31 == 2Gib -- large files are processed in not-so-large increments
24 LeaseRenewSecret = Hash # used to protect bucket lease renewal requests
25 LeaseCancelSecret = Hash # used to protect bucket lease cancellation requests
27 # Announcements are (FURL, service_name, remoteinterface_name,
28 # nickname, my_version, oldest_supported)
29 # the (FURL, service_name, remoteinterface_name) refer to the service being
30 # announced. The (nickname, my_version, oldest_supported) refer to the
31 # client as a whole. The my_version/oldest_supported strings can be parsed
32 # by an allmydata.util.version.Version instance, and then compared. The
33 # first goal is to make sure that nodes are not confused by speaking to an
34 # incompatible peer. The second goal is to enable the development of
35 # backwards-compatibility code.
37 Announcement = TupleOf(FURL, str, str,
40 class RIIntroducerSubscriberClient(RemoteInterface):
41 __remote_name__ = "RIIntroducerSubscriberClient.tahoe.allmydata.com"
43 def announce(announcements=SetOf(Announcement)):
44 """I accept announcements from the publisher."""
47 def set_encoding_parameters(parameters=(int, int, int)):
48 """Advise the client of the recommended k-of-n encoding parameters
49 for this grid. 'parameters' is a tuple of (k, desired, n), where 'n'
50 is the total number of shares that will be created for any given
51 file, while 'k' is the number of shares that must be retrieved to
52 recover that file, and 'desired' is the minimum number of shares that
53 must be placed before the uploader will consider its job a success.
54 n/k is the expansion ratio, while k determines the robustness.
56 Introducers should specify 'n' according to the expected size of the
57 grid (there is no point to producing more shares than there are
58 peers), and k according to the desired reliability-vs-overhead goals.
60 Note that setting k=1 is equivalent to simple replication.
64 # When Foolscap can handle multiple interfaces (Foolscap#17), the
65 # full-powered introducer will implement both RIIntroducerPublisher and
66 # RIIntroducerSubscriberService. Until then, we define
67 # RIIntroducerPublisherAndSubscriberService as a combination of the two, and
68 # make everybody use that.
70 class RIIntroducerPublisher(RemoteInterface):
71 """To publish a service to the world, connect to me and give me your
72 announcement message. I will deliver a copy to all connected subscribers."""
73 __remote_name__ = "RIIntroducerPublisher.tahoe.allmydata.com"
75 def publish(announcement=Announcement):
79 class RIIntroducerSubscriberService(RemoteInterface):
80 __remote_name__ = "RIIntroducerSubscriberService.tahoe.allmydata.com"
82 def subscribe(subscriber=RIIntroducerSubscriberClient, service_name=str):
83 """Give me a subscriber reference, and I will call its new_peers()
84 method will any announcements that match the desired service name. I
85 will ignore duplicate subscriptions.
89 class RIIntroducerPublisherAndSubscriberService(RemoteInterface):
90 __remote_name__ = "RIIntroducerPublisherAndSubscriberService.tahoe.allmydata.com"
91 def publish(announcement=Announcement):
93 def subscribe(subscriber=RIIntroducerSubscriberClient, service_name=str):
96 class IIntroducerClient(Interface):
97 """I provide service introduction facilities for a node. I help nodes
98 publish their services to the rest of the world, and I help them learn
99 about services available on other nodes."""
101 def publish(furl, service_name, remoteinterface_name):
102 """Once you call this, I will tell the world that the Referenceable
103 available at FURL is available to provide a service named
104 SERVICE_NAME. The precise definition of the service being provided is
105 identified by the Foolscap 'remote interface name' in the last
106 parameter: this is supposed to be a globally-unique string that
107 identifies the RemoteInterface that is implemented."""
109 def subscribe_to(service_name):
110 """Call this if you will eventually want to use services with the
111 given SERVICE_NAME. This will prompt me to subscribe to announcements
112 of those services. You can pick up the announcements later by calling
113 get_all_connections_for() or get_permuted_peers().
116 def get_all_connections():
117 """Return a frozenset of (nodeid, service_name, rref) tuples, one for
118 each active connection we've established to a remote service. This is
119 mostly useful for unit tests that need to wait until a certain number
120 of connections have been made."""
122 def get_all_connectors():
123 """Return a dict that maps from (nodeid, service_name) to a
124 RemoteServiceConnector instance for all services that we are actively
125 trying to connect to. Each RemoteServiceConnector has the following
128 service_name: the type of service provided, like 'storage'
129 announcement_time: when we first heard about this service
130 last_connect_time: when we last established a connection
131 last_loss_time: when we last lost a connection
133 version: the peer's version, from the most recent connection
134 oldest_supported: the peer's oldest supported version, same
136 rref: the RemoteReference, if connected, otherwise None
137 remote_host: the IAddress, if connected, otherwise None
139 This method is intended for monitoring interfaces, such as a web page
140 which describes connecting and connected peers.
143 def get_all_peerids():
144 """Return a frozenset of all peerids to whom we have a connection (to
145 one or more services) established. Mostly useful for unit tests."""
147 def get_all_connections_for(service_name):
148 """Return a frozenset of (nodeid, service_name, rref) tuples, one
149 for each active connection that provides the given SERVICE_NAME."""
151 def get_permuted_peers(service_name, key):
152 """Returns an ordered list of (peerid, rref) tuples, selecting from
153 the connections that provide SERVICE_NAME, using a hash-based
154 permutation keyed by KEY. This randomizes the service list in a
155 repeatable way, to distribute load over many peers.
158 def connected_to_introducer():
159 """Returns a boolean, True if we are currently connected to the
160 introducer, False if not."""
162 class RIStubClient(RemoteInterface):
163 """Each client publishes a service announcement for a dummy object called
164 the StubClient. This object doesn't actually offer any services, but the
165 announcement helps the Introducer keep track of which clients are
166 subscribed (so the grid admin can keep track of things like the size of
167 the grid and the client versions in use. This is the (empty)
168 RemoteInterface for the StubClient."""
170 class RIBucketWriter(RemoteInterface):
171 def write(offset=Offset, data=ShareData):
176 If the data that has been written is incomplete or inconsistent then
177 the server will throw the data away, else it will store it for future
183 """Abandon all the data that has been written.
187 class RIBucketReader(RemoteInterface):
188 def read(offset=Offset, length=ReadSize):
191 TestVector = ListOf(TupleOf(Offset, ReadSize, str, str))
192 # elements are (offset, length, operator, specimen)
193 # operator is one of "lt, le, eq, ne, ge, gt"
194 # nop always passes and is used to fetch data while writing.
195 # you should use length==len(specimen) for everything except nop
196 DataVector = ListOf(TupleOf(Offset, ShareData))
197 # (offset, data). This limits us to 30 writes of 1MiB each per call
198 TestAndWriteVectorsForShares = DictOf(int,
201 ChoiceOf(None, Offset), # new_length
203 ReadVector = ListOf(TupleOf(Offset, ReadSize))
204 ReadData = ListOf(ShareData)
205 # returns data[offset:offset+length] for each element of TestVector
207 class RIStorageServer(RemoteInterface):
208 __remote_name__ = "RIStorageServer.tahoe.allmydata.com"
211 """Return a tuple of (my_version, oldest_supported) strings.
212 Each string can be parsed by an allmydata.util.version.Version
213 instance, and then compared. The first goal is to make sure that
214 nodes are not confused by speaking to an incompatible peer. The
215 second goal is to enable the development of backwards-compatibility
218 This method is likely to change in incompatible ways until we get the
219 whole compatibility scheme nailed down.
221 return TupleOf(str, str)
223 def allocate_buckets(storage_index=StorageIndex,
224 renew_secret=LeaseRenewSecret,
225 cancel_secret=LeaseCancelSecret,
226 sharenums=SetOf(int, maxLength=MAX_BUCKETS),
227 allocated_size=Offset, canary=Referenceable):
229 @param storage_index: the index of the bucket to be created or
231 @param sharenums: these are the share numbers (probably between 0 and
232 99) that the sender is proposing to store on this
234 @param renew_secret: This is the secret used to protect bucket refresh
235 This secret is generated by the client and
236 stored for later comparison by the server. Each
237 server is given a different secret.
238 @param cancel_secret: Like renew_secret, but protects bucket decref.
239 @param canary: If the canary is lost before close(), the bucket is
241 @return: tuple of (alreadygot, allocated), where alreadygot is what we
242 already have and is what we hereby agree to accept. New
243 leases are added for shares in both lists.
245 return TupleOf(SetOf(int, maxLength=MAX_BUCKETS),
246 DictOf(int, RIBucketWriter, maxKeys=MAX_BUCKETS))
248 def renew_lease(storage_index=StorageIndex, renew_secret=LeaseRenewSecret):
250 Renew the lease on a given bucket. Some networks will use this, some
254 def cancel_lease(storage_index=StorageIndex,
255 cancel_secret=LeaseCancelSecret):
257 Cancel the lease on a given bucket. If this was the last lease on the
258 bucket, the bucket will be deleted.
261 def get_buckets(storage_index=StorageIndex):
262 return DictOf(int, RIBucketReader, maxKeys=MAX_BUCKETS)
266 def slot_readv(storage_index=StorageIndex,
267 shares=ListOf(int), readv=ReadVector):
268 """Read a vector from the numbered shares associated with the given
269 storage index. An empty shares list means to return data from all
270 known shares. Returns a dictionary with one key per share."""
271 return DictOf(int, ReadData) # shnum -> results
273 def slot_testv_and_readv_and_writev(storage_index=StorageIndex,
274 secrets=TupleOf(Hash, Hash, Hash),
275 tw_vectors=TestAndWriteVectorsForShares,
278 """General-purpose test-and-set operation for mutable slots. Perform
279 a bunch of comparisons against the existing shares. If they all pass,
280 then apply a bunch of write vectors to those shares. Then use the
281 read vectors to extract data from all the shares and return the data.
283 This method is, um, large. The goal is to allow clients to update all
284 the shares associated with a mutable file in a single round trip.
286 @param storage_index: the index of the bucket to be created or
288 @param write_enabler: a secret that is stored along with the slot.
289 Writes are accepted from any caller who can
290 present the matching secret. A different secret
291 should be used for each slot*server pair.
292 @param renew_secret: This is the secret used to protect bucket refresh
293 This secret is generated by the client and
294 stored for later comparison by the server. Each
295 server is given a different secret.
296 @param cancel_secret: Like renew_secret, but protects bucket decref.
298 The 'secrets' argument is a tuple of (write_enabler, renew_secret,
299 cancel_secret). The first is required to perform any write. The
300 latter two are used when allocating new shares. To simply acquire a
301 new lease on existing shares, use an empty testv and an empty writev.
303 Each share can have a separate test vector (i.e. a list of
304 comparisons to perform). If all vectors for all shares pass, then all
305 writes for all shares are recorded. Each comparison is a 4-tuple of
306 (offset, length, operator, specimen), which effectively does a bool(
307 (read(offset, length)) OPERATOR specimen ) and only performs the
308 write if all these evaluate to True. Basic test-and-set uses 'eq'.
309 Write-if-newer uses a seqnum and (offset, length, 'lt', specimen).
310 Write-if-same-or-newer uses 'le'.
312 Reads from the end of the container are truncated, and missing shares
313 behave like empty ones, so to assert that a share doesn't exist (for
314 use when creating a new share), use (0, 1, 'eq', '').
316 The write vector will be applied to the given share, expanding it if
317 necessary. A write vector applied to a share number that did not
318 exist previously will cause that share to be created.
320 Each write vector is accompanied by a 'new_length' argument. If
321 new_length is not None, use it to set the size of the container. This
322 can be used to pre-allocate space for a series of upcoming writes, or
323 truncate existing data. If the container is growing, new_length will
324 be applied before datav. If the container is shrinking, it will be
327 The read vector is used to extract data from all known shares,
328 *before* any writes have been applied. The same vector is used for
329 all shares. This captures the state that was tested by the test
332 This method returns two values: a boolean and a dict. The boolean is
333 True if the write vectors were applied, False if not. The dict is
334 keyed by share number, and each value contains a list of strings, one
335 for each element of the read vector.
337 If the write_enabler is wrong, this will raise BadWriteEnablerError.
338 To enable share migration, the exception will have the nodeid used
339 for the old write enabler embedded in it, in the following string::
341 The write enabler was recorded by nodeid '%s'.
343 Note that the nodeid here is encoded using the same base32 encoding
344 used by Foolscap and allmydata.util.idlib.nodeid_b2a().
347 return TupleOf(bool, DictOf(int, ReadData))
349 class IStorageBucketWriter(Interface):
350 def put_block(segmentnum=int, data=ShareData):
351 """@param data: For most segments, this data will be 'blocksize'
352 bytes in length. The last segment might be shorter.
353 @return: a Deferred that fires (with None) when the operation completes
356 def put_plaintext_hashes(hashes=ListOf(Hash, maxLength=2**20)):
358 @return: a Deferred that fires (with None) when the operation completes
361 def put_crypttext_hashes(hashes=ListOf(Hash, maxLength=2**20)):
363 @return: a Deferred that fires (with None) when the operation completes
366 def put_block_hashes(blockhashes=ListOf(Hash, maxLength=2**20)):
368 @return: a Deferred that fires (with None) when the operation completes
371 def put_share_hashes(sharehashes=ListOf(TupleOf(int, Hash),
374 @return: a Deferred that fires (with None) when the operation completes
377 def put_uri_extension(data=URIExtensionData):
378 """This block of data contains integrity-checking information (hashes
379 of plaintext, crypttext, and shares), as well as encoding parameters
380 that are necessary to recover the data. This is a serialized dict
381 mapping strings to other strings. The hash of this data is kept in
382 the URI and verified before any of the data is used. All buckets for
383 a given file contain identical copies of this data.
385 The serialization format is specified with the following pseudocode:
386 for k in sorted(dict.keys()):
387 assert re.match(r'^[a-zA-Z_\-]+$', k)
388 write(k + ':' + netstring(dict[k]))
390 @return: a Deferred that fires (with None) when the operation completes
394 """Finish writing and close the bucket. The share is not finalized
395 until this method is called: if the uploading client disconnects
396 before calling close(), the partially-written share will be
399 @return: a Deferred that fires (with None) when the operation completes
402 class IStorageBucketReader(Interface):
404 def get_block(blocknum=int):
405 """Most blocks will be the same size. The last block might be shorter
411 def get_plaintext_hashes():
413 @return: ListOf(Hash, maxLength=2**20)
416 def get_crypttext_hashes():
418 @return: ListOf(Hash, maxLength=2**20)
421 def get_block_hashes():
423 @return: ListOf(Hash, maxLength=2**20)
426 def get_share_hashes():
428 @return: ListOf(TupleOf(int, Hash), maxLength=2**20)
431 def get_uri_extension():
433 @return: URIExtensionData
438 # hm, we need a solution for forward references in schemas
439 from foolscap.schema import Any
441 FileNode_ = Any() # TODO: foolscap needs constraints on copyables
442 DirectoryNode_ = Any() # TODO: same
443 AnyNode_ = ChoiceOf(FileNode_, DirectoryNode_)
446 class IURI(Interface):
447 def init_from_string(uri):
448 """Accept a string (as created by my to_string() method) and populate
449 this instance with its data. I am not normally called directly,
450 please use the module-level uri.from_string() function to convert
451 arbitrary URI strings into IURI-providing instances."""
454 """Return False if this URI be used to modify the data. Return True
455 if this URI cannot be used to modify the data."""
458 """Return True if the data can be modified by *somebody* (perhaps
459 someone who has a more powerful URI than this one)."""
462 """Return another IURI instance, which represents a read-only form of
463 this one. If is_readonly() is True, this returns self."""
466 """Return an instance that provides IVerifierURI, which can be used
467 to check on the availability of the file or directory, without
468 providing enough capabilities to actually read or modify the
469 contents. This may return None if the file does not need checking or
470 verification (e.g. LIT URIs).
474 """Return a string of printable ASCII characters, suitable for
475 passing into init_from_string."""
477 class IVerifierURI(Interface):
478 def init_from_string(uri):
479 """Accept a string (as created by my to_string() method) and populate
480 this instance with its data. I am not normally called directly,
481 please use the module-level uri.from_string() function to convert
482 arbitrary URI strings into IURI-providing instances."""
485 """Return a string of printable ASCII characters, suitable for
486 passing into init_from_string."""
488 class IDirnodeURI(Interface):
489 """I am a URI which represents a dirnode."""
492 class IFileURI(Interface):
493 """I am a URI which represents a filenode."""
495 """Return the length (in bytes) of the file that I represent."""
497 class IMutableFileURI(Interface):
498 """I am a URI which represents a mutable filenode."""
499 class INewDirectoryURI(Interface):
501 class IReadonlyNewDirectoryURI(Interface):
505 class IFilesystemNode(Interface):
508 Return the URI that can be used by others to get access to this
509 node. If this node is read-only, the URI will only offer read-only
510 access. If this node is read-write, the URI will offer read-write
513 If you have read-write access to a node and wish to share merely
514 read-only access with others, use get_readonly_uri().
517 def get_readonly_uri():
518 """Return the directory URI that can be used by others to get
519 read-only access to this directory node. The result is a read-only
520 URI, regardless of whether this dirnode is read-only or read-write.
522 If you have merely read-only access to this dirnode,
523 get_readonly_uri() will return the same thing as get_uri().
527 """Return an IVerifierURI instance that represents the
528 'verifiy/refresh capability' for this node. The holder of this
529 capability will be able to renew the lease for this node, protecting
530 it from garbage-collection. They will also be able to ask a server if
531 it holds a share for the file or directory.
535 """Perform a file check. See IChecker.check for details."""
538 """Return True if this reference provides mutable access to the given
539 file or directory (i.e. if you can modify it), or False if not. Note
540 that even if this reference is read-only, someone else may hold a
541 read-write reference to it."""
544 """Return True if this file or directory is mutable (by *somebody*,
545 not necessarily you), False if it is is immutable. Note that a file
546 might be mutable overall, but your reference to it might be
547 read-only. On the other hand, all references to an immutable file
548 will be read-only; there are no read-write references to an immutable
552 class IMutableFilesystemNode(IFilesystemNode):
555 class IFileNode(IFilesystemNode):
556 def download(target):
557 """Download the file's contents to a given IDownloadTarget"""
559 def download_to_data():
560 """Download the file's contents. Return a Deferred that fires
561 with those contents."""
564 """Return the length (in bytes) of the data this node represents."""
566 class IMutableFileNode(IFileNode, IMutableFilesystemNode):
567 """I provide access to a 'mutable file', which retains its identity
568 regardless of what contents are put in it.
570 The consistency-vs-availability problem means that there might be
571 multiple versions of a file present in the grid, some of which might be
572 unrecoverable (i.e. have fewer than 'k' shares). These versions are
573 loosely ordered: each has a sequence number and a hash, and any version
574 with seqnum=N was uploaded by a node which has seen at least one version
577 The 'servermap' (an instance of IMutableFileServerMap) is used to
578 describe the versions that are known to be present in the grid, and which
579 servers are hosting their shares. It is used to represent the 'state of
580 the world', and is used for this purpose by my test-and-set operations.
581 Downloading the contents of the mutable file will also return a
582 servermap. Uploading a new version into the mutable file requires a
583 servermap as input, and the semantics of the replace operation is
584 'replace the file with my new version if it looks like nobody else has
585 changed the file since my previous download'. Because the file is
586 distributed, this is not a perfect test-and-set operation, but it will do
587 its best. If the replace process sees evidence of a simultaneous write,
588 it will signal an UncoordinatedWriteError, so that the caller can take
592 Most readers will want to use the 'best' current version of the file, and
593 should use my 'download_best_version()' method.
595 To unconditionally replace the file, callers should use overwrite(). This
596 is the mode that user-visible mutable files will probably use.
598 To apply some delta to the file, call modify() with a callable modifier
599 function that can apply the modification that you want to make. This is
600 the mode that dirnodes will use, since most directory modification
601 operations can be expressed in terms of deltas to the directory state.
604 Three methods are available for users who need to perform more complex
605 operations. The first is get_servermap(), which returns an up-to-date
606 servermap using a specified mode. The second is download_version(), which
607 downloads a specific version (not necessarily the 'best' one). The third
608 is 'upload', which accepts new contents and a servermap (which must have
609 been updated with MODE_WRITE). The upload method will attempt to apply
610 the new contents as long as no other node has modified the file since the
611 servermap was updated. This might be useful to a caller who wants to
612 merge multiple versions into a single new one.
614 Note that each time the servermap is updated, a specific 'mode' is used,
615 which determines how many peers are queried. To use a servermap for my
616 replace() method, that servermap must have been updated in MODE_WRITE.
617 These modes are defined in allmydata.mutable.common, and consist of
618 MODE_READ, MODE_WRITE, MODE_ANYTHING, and MODE_CHECK. Please look in
619 allmydata/mutable/servermap.py for details about the differences.
621 Mutable files are currently limited in size (about 3.5MB max) and can
622 only be retrieved and updated all-at-once, as a single big string. Future
623 versions of our mutable files will remove this restriction.
626 def download_best_version():
627 """Download the 'best' available version of the file, meaning one of
628 the recoverable versions with the highest sequence number. If no
629 uncoordinated writes have occurred, and if enough shares are
630 available, then this will be the most recent version that has been
633 I return a Deferred that fires with a (contents, servermap) pair. The
634 servermap is updated with MODE_READ. The contents will be the version
635 of the file indicated by servermap.best_recoverable_version(). If no
636 version is recoverable, the Deferred will errback with
637 UnrecoverableFileError.
640 def overwrite(new_contents):
641 """Unconditionally replace the contents of the mutable file with new
642 ones. This simply chains get_servermap(MODE_WRITE) and upload(). This
643 is only appropriate to use when the new contents of the file are
644 completely unrelated to the old ones, and you do not care about other
647 I return a Deferred that fires (with a PublishStatus object) when the
648 update has completed.
651 def modify(modifier_cb):
652 """Modify the contents of the file, by downloading the current
653 version, applying the modifier function (or bound method), then
654 uploading the new version. I return a Deferred that fires (with a
655 PublishStatus object) when the update is complete.
657 The modifier callable will be given two arguments: a string (with the
658 old contents) and a servermap. As with download_best_version(), the
659 old contents will be from the best recoverable version, but the
660 modifier can use the servermap to make other decisions (such as
661 refusing to apply the delta if there are multiple parallel versions,
662 or if there is evidence of a newer unrecoverable version).
664 The callable should return a string with the new contents. The
665 callable must be prepared to be called multiple times, and must
666 examine the input string to see if the change that it wants to make
667 is already present in the old version. If it does not need to make
668 any changes, it can either return None, or return its input string.
670 If the modifier raises an exception, it will be returned in the
675 def get_servermap(mode):
676 """Return a Deferred that fires with an IMutableFileServerMap
677 instance, updated using the given mode.
680 def download_version(servermap, version):
681 """Download a specific version of the file, using the servermap
682 as a guide to where the shares are located.
684 I return a Deferred that fires with the requested contents, or
685 errbacks with UnrecoverableFileError. Note that a servermap which was
686 updated with MODE_ANYTHING or MODE_READ may not know about shares for
687 all versions (those modes stop querying servers as soon as they can
688 fulfil their goals), so you may want to use MODE_CHECK (which checks
689 everything) to get increased visibility.
692 def upload(new_contents, servermap):
693 """Replace the contents of the file with new ones. This requires a
694 servermap that was previously updated with MODE_WRITE.
696 I attempt to provide test-and-set semantics, in that I will avoid
697 modifying any share that is different than the version I saw in the
698 servermap. However, if another node is writing to the file at the
699 same time as me, I may manage to update some shares while they update
700 others. If I see any evidence of this, I will signal
701 UncoordinatedWriteError, and the file will be left in an inconsistent
702 state (possibly the version you provided, possibly the old version,
703 possibly somebody else's version, and possibly a mix of shares from
706 The recommended response to UncoordinatedWriteError is to either
707 return it to the caller (since they failed to coordinate their
708 writes), or to attempt some sort of recovery. It may be sufficient to
709 wait a random interval (with exponential backoff) and repeat your
710 operation. If I do not signal UncoordinatedWriteError, then I was
711 able to write the new version without incident.
713 I return a Deferred that fires (with a PublishStatus object) when the
714 publish has completed. I will update the servermap in-place with the
715 location of all new shares.
719 """Return this filenode's writekey, or None if the node does not have
720 write-capability. This may be used to assist with data structures
721 that need to make certain data available only to writers, such as the
722 read-write child caps in dirnodes. The recommended process is to have
723 reader-visible data be submitted to the filenode in the clear (where
724 it will be encrypted by the filenode using the readkey), but encrypt
725 writer-visible data using this writekey.
728 class IDirectoryNode(IMutableFilesystemNode):
729 """I represent a name-to-child mapping, holding the tahoe equivalent of a
730 directory. All child names are unicode strings, and all children are some
731 sort of IFilesystemNode (either files or subdirectories).
736 The dirnode ('1') URI returned by this method can be used in
737 set_uri() on a different directory ('2') to 'mount' a reference to
738 this directory ('1') under the other ('2'). This URI is just a
739 string, so it can be passed around through email or other out-of-band
743 def get_readonly_uri():
745 The dirnode ('1') URI returned by this method can be used in
746 set_uri() on a different directory ('2') to 'mount' a reference to
747 this directory ('1') under the other ('2'). This URI is just a
748 string, so it can be passed around through email or other out-of-band
753 """I return a Deferred that fires with a dictionary mapping child
754 name (a unicode string) to (node, metadata_dict) tuples, in which
755 'node' is either an IFileNode or IDirectoryNode, and 'metadata_dict'
756 is a dictionary of metadata."""
759 """I return a Deferred that fires with a boolean, True if there
760 exists a child of the given name, False if not. The child name must
761 be a unicode string."""
764 """I return a Deferred that fires with a specific named child node,
765 either an IFileNode or an IDirectoryNode. The child name must be a
768 def get_metadata_for(name):
769 """I return a Deferred that fires with the metadata dictionary for a
770 specific named child node. This metadata is stored in the *edge*, not
771 in the child, so it is attached to the parent dirnode rather than the
772 child dir-or-file-node. The child name must be a unicode string."""
774 def set_metadata_for(name, metadata):
775 """I replace any existing metadata for the named child with the new
776 metadata. The child name must be a unicode string. This metadata is
777 stored in the *edge*, not in the child, so it is attached to the
778 parent dirnode rather than the child dir-or-file-node. I return a
779 Deferred (that fires with this dirnode) when the operation is
782 def get_child_at_path(path):
783 """Transform a child path into an IDirectoryNode or IFileNode.
785 I perform a recursive series of 'get' operations to find the named
786 descendant node. I return a Deferred that fires with the node, or
787 errbacks with IndexError if the node could not be found.
789 The path can be either a single string (slash-separated) or a list of
790 path-name elements. All elements must be unicode strings.
793 def set_uri(name, child_uri, metadata=None):
794 """I add a child (by URI) at the specific name. I return a Deferred
795 that fires when the operation finishes. I will replace any existing
796 child of the same name. The child name must be a unicode string.
798 The child_uri could be for a file, or for a directory (either
799 read-write or read-only, using a URI that came from get_uri() ).
801 If metadata= is provided, I will use it as the metadata for the named
802 edge. This will replace any existing metadata. If metadata= is left
803 as the default value of None, I will set ['mtime'] to the current
804 time, and I will set ['ctime'] to the current time if there was not
805 already a child by this name present. This roughly matches the
806 ctime/mtime semantics of traditional filesystems.
808 If this directory node is read-only, the Deferred will errback with a
811 def set_children(entries):
812 """Add multiple (name, child_uri) pairs (or (name, child_uri,
813 metadata) triples) to a directory node. Returns a Deferred that fires
814 (with None) when the operation finishes. This is equivalent to
815 calling set_uri() multiple times, but is much more efficient. All
816 child names must be unicode strings.
819 def set_node(name, child, metadata=None):
820 """I add a child at the specific name. I return a Deferred that fires
821 when the operation finishes. This Deferred will fire with the child
822 node that was just added. I will replace any existing child of the
823 same name. The child name must be a unicode string.
825 If metadata= is provided, I will use it as the metadata for the named
826 edge. This will replace any existing metadata. If metadata= is left
827 as the default value of None, I will set ['mtime'] to the current
828 time, and I will set ['ctime'] to the current time if there was not
829 already a child by this name present. This roughly matches the
830 ctime/mtime semantics of traditional filesystems.
832 If this directory node is read-only, the Deferred will errback with a
835 def set_nodes(entries):
836 """Add multiple (name, child_node) pairs (or (name, child_node,
837 metadata) triples) to a directory node. Returns a Deferred that fires
838 (with None) when the operation finishes. This is equivalent to
839 calling set_node() multiple times, but is much more efficient. All
840 child names must be unicode strings."""
843 def add_file(name, uploadable, metadata=None):
844 """I upload a file (using the given IUploadable), then attach the
845 resulting FileNode to the directory at the given name. I set metadata
846 the same way as set_uri and set_node. The child name must be a
849 I return a Deferred that fires (with the IFileNode of the uploaded
850 file) when the operation completes."""
853 """I remove the child at the specific name. I return a Deferred that
854 fires when the operation finishes. The child name must be a unicode
857 def create_empty_directory(name):
858 """I create and attach an empty directory at the given name. The
859 child name must be a unicode string. I return a Deferred that fires
860 when the operation finishes."""
862 def move_child_to(current_child_name, new_parent, new_child_name=None):
863 """I take one of my children and move them to a new parent. The child
864 is referenced by name. On the new parent, the child will live under
865 'new_child_name', which defaults to 'current_child_name'. TODO: what
866 should we do about metadata? I return a Deferred that fires when the
867 operation finishes. The child name must be a unicode string."""
869 def build_manifest():
870 """Return a Deferred that fires with a frozenset of
871 verifier-capability strings for all nodes (directories and files)
872 reachable from this one."""
875 """Return a Deferred that fires with a dictionary of statistics
876 computed by examining all nodes (directories and files) reachable
877 from this one, with the following keys::
879 count-immutable-files: count of how many CHK files are in the set
880 count-mutable-files: same, for mutable files (does not include
882 count-literal-files: same, for LIT files
883 count-files: sum of the above three
885 count-directories: count of directories
887 size-immutable-files: total bytes for all CHK files in the set
888 size-mutable-files (TODO): same, for current version of all mutable
889 files, does not include directories
890 size-literal-files: same, for LIT files
891 size-directories: size of mutable files used by directories
893 largest-directory: number of bytes in the largest directory
894 largest-directory-children: number of children in the largest
896 largest-immutable-file: number of bytes in the largest CHK file
898 size-mutable-files is not yet implemented, because it would involve
899 even more queries than deep_stats does.
901 This operation will visit every directory node underneath this one,
902 and can take a long time to run. On a typical workstation with good
903 bandwidth, this can examine roughly 15 directories per second (and
904 takes several minutes of 100% CPU for ~1700 directories).
907 class ICodecEncoder(Interface):
908 def set_params(data_size, required_shares, max_shares):
909 """Set up the parameters of this encoder.
911 This prepares the encoder to perform an operation that converts a
912 single block of data into a number of shares, such that a future
913 ICodecDecoder can use a subset of these shares to recover the
914 original data. This operation is invoked by calling encode(). Once
915 the encoding parameters are set up, the encode operation can be
916 invoked multiple times.
918 set_params() prepares the encoder to accept blocks of input data that
919 are exactly 'data_size' bytes in length. The encoder will be prepared
920 to produce 'max_shares' shares for each encode() operation (although
921 see the 'desired_share_ids' to use less CPU). The encoding math will
922 be chosen such that the decoder can get by with as few as
923 'required_shares' of these shares and still reproduce the original
924 data. For example, set_params(1000, 5, 5) offers no redundancy at
925 all, whereas set_params(1000, 1, 10) provides 10x redundancy.
927 Numerical Restrictions: 'data_size' is required to be an integral
928 multiple of 'required_shares'. In general, the caller should choose
929 required_shares and max_shares based upon their reliability
930 requirements and the number of peers available (the total storage
931 space used is roughly equal to max_shares*data_size/required_shares),
932 then choose data_size to achieve the memory footprint desired (larger
933 data_size means more efficient operation, smaller data_size means
934 smaller memory footprint).
936 In addition, 'max_shares' must be equal to or greater than
937 'required_shares'. Of course, setting them to be equal causes
938 encode() to degenerate into a particularly slow form of the 'split'
941 See encode() for more details about how these parameters are used.
943 set_params() must be called before any other ICodecEncoder methods
947 def get_encoder_type():
948 """Return a short string that describes the type of this encoder.
950 There is required to be a global table of encoder classes. This method
951 returns an index into this table; the value at this index is an
952 encoder class, and this encoder is an instance of that class.
955 def get_serialized_params(): # TODO: maybe, maybe not
956 """Return a string that describes the parameters of this encoder.
958 This string can be passed to the decoder to prepare it for handling
959 the encoded shares we create. It might contain more information than
960 was presented to set_params(), if there is some flexibility of
963 This string is intended to be embedded in the URI, so there are
964 several restrictions on its contents. At the moment I'm thinking that
965 this means it may contain hex digits and hyphens, and nothing else.
966 The idea is that the URI contains something like '%s:%s:%s' %
967 (encoder.get_encoder_name(), encoder.get_serialized_params(),
968 b2a(crypttext_hash)), and this is enough information to construct a
972 def get_block_size():
973 """Return the length of the shares that encode() will produce.
976 def encode_proposal(data, desired_share_ids=None):
979 'data' must be a string (or other buffer object), and len(data) must
980 be equal to the 'data_size' value passed earlier to set_params().
982 This will return a Deferred that will fire with two lists. The first
983 is a list of shares, each of which is a string (or other buffer
984 object) such that len(share) is the same as what get_share_size()
985 returned earlier. The second is a list of shareids, in which each is
986 an integer. The lengths of the two lists will always be equal to each
987 other. The user should take care to keep each share closely
988 associated with its shareid, as one is useless without the other.
990 The length of this output list will normally be the same as the value
991 provided to the 'max_shares' parameter of set_params(). This may be
992 different if 'desired_share_ids' is provided.
994 'desired_share_ids', if provided, is required to be a sequence of
995 ints, each of which is required to be >= 0 and < max_shares. If not
996 provided, encode() will produce 'max_shares' shares, as if
997 'desired_share_ids' were set to range(max_shares). You might use this
998 if you initially thought you were going to use 10 peers, started
999 encoding, and then two of the peers dropped out: you could use
1000 desired_share_ids= to skip the work (both memory and CPU) of
1001 producing shares for the peers which are no longer available.
1005 def encode(inshares, desired_share_ids=None):
1006 """Encode some data. This may be called multiple times. Each call is
1009 inshares is a sequence of length required_shares, containing buffers
1010 (i.e. strings), where each buffer contains the next contiguous
1011 non-overlapping segment of the input data. Each buffer is required to
1012 be the same length, and the sum of the lengths of the buffers is
1013 required to be exactly the data_size promised by set_params(). (This
1014 implies that the data has to be padded before being passed to
1015 encode(), unless of course it already happens to be an even multiple
1016 of required_shares in length.)
1018 ALSO: the requirement to break up your data into 'required_shares'
1019 chunks before calling encode() feels a bit surprising, at least from
1020 the point of view of a user who doesn't know how FEC works. It feels
1021 like an implementation detail that has leaked outside the
1022 abstraction barrier. Can you imagine a use case in which the data to
1023 be encoded might already be available in pre-segmented chunks, such
1024 that it is faster or less work to make encode() take a list rather
1025 than splitting a single string?
1027 ALSO ALSO: I think 'inshares' is a misleading term, since encode()
1028 is supposed to *produce* shares, so what it *accepts* should be
1029 something other than shares. Other places in this interface use the
1030 word 'data' for that-which-is-not-shares.. maybe we should use that
1033 'desired_share_ids', if provided, is required to be a sequence of
1034 ints, each of which is required to be >= 0 and < max_shares. If not
1035 provided, encode() will produce 'max_shares' shares, as if
1036 'desired_share_ids' were set to range(max_shares). You might use this
1037 if you initially thought you were going to use 10 peers, started
1038 encoding, and then two of the peers dropped out: you could use
1039 desired_share_ids= to skip the work (both memory and CPU) of
1040 producing shares for the peers which are no longer available.
1042 For each call, encode() will return a Deferred that fires with two
1043 lists, one containing shares and the other containing the shareids.
1044 The get_share_size() method can be used to determine the length of
1045 the share strings returned by encode(). Each shareid is a small
1046 integer, exactly as passed into 'desired_share_ids' (or
1047 range(max_shares), if desired_share_ids was not provided).
1049 The shares and their corresponding shareids are required to be kept
1050 together during storage and retrieval. Specifically, the share data is
1051 useless by itself: the decoder needs to be told which share is which
1052 by providing it with both the shareid and the actual share data.
1054 This function will allocate an amount of memory roughly equal to::
1056 (max_shares - required_shares) * get_share_size()
1058 When combined with the memory that the caller must allocate to
1059 provide the input data, this leads to a memory footprint roughly
1060 equal to the size of the resulting encoded shares (i.e. the expansion
1061 factor times the size of the input segment).
1066 # returning a list of (shareidN,shareN) tuples instead of a pair of
1067 # lists (shareids..,shares..). Brian thought the tuples would
1068 # encourage users to keep the share and shareid together throughout
1069 # later processing, Zooko pointed out that the code to iterate
1070 # through two lists is not really more complicated than using a list
1071 # of tuples and there's also a performance improvement
1073 # having 'data_size' not required to be an integral multiple of
1074 # 'required_shares'. Doing this would require encode() to perform
1075 # padding internally, and we'd prefer to have any padding be done
1076 # explicitly by the caller. Yes, it is an abstraction leak, but
1077 # hopefully not an onerous one.
1080 class ICodecDecoder(Interface):
1081 def set_serialized_params(params):
1082 """Set up the parameters of this encoder, from a string returned by
1083 encoder.get_serialized_params()."""
1085 def get_needed_shares():
1086 """Return the number of shares needed to reconstruct the data.
1087 set_serialized_params() is required to be called before this."""
1089 def decode(some_shares, their_shareids):
1090 """Decode a partial list of shares into data.
1092 'some_shares' is required to be a sequence of buffers of sharedata, a
1093 subset of the shares returned by ICodecEncode.encode(). Each share is
1094 required to be of the same length. The i'th element of their_shareids
1095 is required to be the shareid of the i'th buffer in some_shares.
1097 This returns a Deferred which fires with a sequence of buffers. This
1098 sequence will contain all of the segments of the original data, in
1099 order. The sum of the lengths of all of the buffers will be the
1100 'data_size' value passed into the original ICodecEncode.set_params()
1101 call. To get back the single original input block of data, use
1102 ''.join(output_buffers), or you may wish to simply write them in
1103 order to an output file.
1105 Note that some of the elements in the result sequence may be
1106 references to the elements of the some_shares input sequence. In
1107 particular, this means that if those share objects are mutable (e.g.
1108 arrays) and if they are changed, then both the input (the
1109 'some_shares' parameter) and the output (the value given when the
1110 deferred is triggered) will change.
1112 The length of 'some_shares' is required to be exactly the value of
1113 'required_shares' passed into the original ICodecEncode.set_params()
1117 class IEncoder(Interface):
1118 """I take an object that provides IEncryptedUploadable, which provides
1119 encrypted data, and a list of shareholders. I then encode, hash, and
1120 deliver shares to those shareholders. I will compute all the necessary
1121 Merkle hash trees that are necessary to validate the crypttext that
1122 eventually comes back from the shareholders. I provide the URI Extension
1123 Block Hash, and the encoding parameters, both of which must be included
1126 I do not choose shareholders, that is left to the IUploader. I must be
1127 given a dict of RemoteReferences to storage buckets that are ready and
1128 willing to receive data.
1132 """Specify the number of bytes that will be encoded. This must be
1133 peformed before get_serialized_params() can be called.
1135 def set_params(params):
1136 """Override the default encoding parameters. 'params' is a tuple of
1137 (k,d,n), where 'k' is the number of required shares, 'd' is the
1138 shares_of_happiness, and 'n' is the total number of shares that will
1141 Encoding parameters can be set in three ways. 1: The Encoder class
1142 provides defaults (3/7/10). 2: the Encoder can be constructed with
1143 an 'options' dictionary, in which the
1144 needed_and_happy_and_total_shares' key can be a (k,d,n) tuple. 3:
1145 set_params((k,d,n)) can be called.
1147 If you intend to use set_params(), you must call it before
1148 get_share_size or get_param are called.
1151 def set_encrypted_uploadable(u):
1152 """Provide a source of encrypted upload data. 'u' must implement
1153 IEncryptedUploadable.
1155 When this is called, the IEncryptedUploadable will be queried for its
1156 length and the storage_index that should be used.
1158 This returns a Deferred that fires with this Encoder instance.
1160 This must be performed before start() can be called.
1163 def get_param(name):
1164 """Return an encoding parameter, by name.
1166 'storage_index': return a string with the (16-byte truncated SHA-256
1167 hash) storage index to which these shares should be
1170 'share_counts': return a tuple describing how many shares are used:
1171 (needed_shares, shares_of_happiness, total_shares)
1173 'num_segments': return an int with the number of segments that
1176 'segment_size': return an int with the size of each segment.
1178 'block_size': return the size of the individual blocks that will
1179 be delivered to a shareholder's put_block() method. By
1180 knowing this, the shareholder will be able to keep all
1181 blocks in a single file and still provide random access
1182 when reading them. # TODO: can we avoid exposing this?
1184 'share_size': an int with the size of the data that will be stored
1185 on each shareholder. This is aggregate amount of data
1186 that will be sent to the shareholder, summed over all
1187 the put_block() calls I will ever make. It is useful to
1188 determine this size before asking potential
1189 shareholders whether they will grant a lease or not,
1190 since their answers will depend upon how much space we
1191 need. TODO: this might also include some amount of
1192 overhead, like the size of all the hashes. We need to
1193 decide whether this is useful or not.
1195 'serialized_params': a string with a concise description of the
1196 codec name and its parameters. This may be passed
1197 into the IUploadable to let it make sure that
1198 the same file encoded with different parameters
1199 will result in different storage indexes.
1201 Once this is called, set_size() and set_params() may not be called.
1204 def set_shareholders(shareholders):
1205 """Tell the encoder where to put the encoded shares. 'shareholders'
1206 must be a dictionary that maps share number (an integer ranging from
1207 0 to n-1) to an instance that provides IStorageBucketWriter. This
1208 must be performed before start() can be called."""
1211 """Begin the encode/upload process. This involves reading encrypted
1212 data from the IEncryptedUploadable, encoding it, uploading the shares
1213 to the shareholders, then sending the hash trees.
1215 set_encrypted_uploadable() and set_shareholders() must be called
1216 before this can be invoked.
1218 This returns a Deferred that fires with a tuple of
1219 (uri_extension_hash, needed_shares, total_shares, size) when the
1220 upload process is complete. This information, plus the encryption
1221 key, is sufficient to construct the URI.
1224 class IDecoder(Interface):
1225 """I take a list of shareholders and some setup information, then
1226 download, validate, decode, and decrypt data from them, writing the
1227 results to an output file.
1229 I do not locate the shareholders, that is left to the IDownloader. I must
1230 be given a dict of RemoteReferences to storage buckets that are ready to
1235 """I take a file-like object (providing write and close) to which all
1236 the plaintext data will be written.
1238 TODO: producer/consumer . Maybe write() should return a Deferred that
1239 indicates when it will accept more data? But probably having the
1240 IDecoder be a producer is easier to glue to IConsumer pieces.
1243 def set_shareholders(shareholders):
1244 """I take a dictionary that maps share identifiers (small integers)
1245 to RemoteReferences that provide RIBucketReader. This must be called
1249 """I start the download. This process involves retrieving data and
1250 hash chains from the shareholders, using the hashes to validate the
1251 data, decoding the shares into segments, decrypting the segments,
1252 then writing the resulting plaintext to the output file.
1254 I return a Deferred that will fire (with self) when the download is
1258 class IDownloadTarget(Interface):
1260 """Called before any calls to write() or close(). If an error
1261 occurs before any data is available, fail() may be called without
1262 a previous call to open().
1264 'size' is the length of the file being downloaded, in bytes."""
1267 """Output some data to the target."""
1269 """Inform the target that there is no more data to be written."""
1271 """fail() is called to indicate that the download has failed. 'why'
1272 is a Failure object indicating what went wrong. No further methods
1273 will be invoked on the IDownloadTarget after fail()."""
1274 def register_canceller(cb):
1275 """The FileDownloader uses this to register a no-argument function
1276 that the target can call to cancel the download. Once this canceller
1277 is invoked, no further calls to write() or close() will be made."""
1279 """When the FileDownloader is done, this finish() function will be
1280 called. Whatever it returns will be returned to the invoker of
1281 Downloader.download.
1284 class IDownloader(Interface):
1285 def download(uri, target):
1286 """Perform a CHK download, sending the data to the given target.
1287 'target' must provide IDownloadTarget.
1289 Returns a Deferred that fires (with the results of target.finish)
1290 when the download is finished, or errbacks if something went wrong."""
1292 class IEncryptedUploadable(Interface):
1293 def set_upload_status(upload_status):
1294 """Provide an IUploadStatus object that should be filled with status
1295 information. The IEncryptedUploadable is responsible for setting
1296 key-determination progress ('chk'), size, storage_index, and
1297 ciphertext-fetch progress. It may delegate some of this
1298 responsibility to others, in particular to the IUploadable."""
1301 """This behaves just like IUploadable.get_size()."""
1303 def get_all_encoding_parameters():
1304 """Return a Deferred that fires with a tuple of
1305 (k,happy,n,segment_size). The segment_size will be used as-is, and
1306 must match the following constraints: it must be a multiple of k, and
1307 it shouldn't be unreasonably larger than the file size (if
1308 segment_size is larger than filesize, the difference must be stored
1311 This usually passes through to the IUploadable method of the same
1314 The encoder strictly obeys the values returned by this method. To
1315 make an upload use non-default encoding parameters, you must arrange
1316 to control the values that this method returns.
1319 def get_storage_index():
1320 """Return a Deferred that fires with a 16-byte storage index.
1323 def read_encrypted(length, hash_only):
1324 """This behaves just like IUploadable.read(), but returns crypttext
1325 instead of plaintext. If hash_only is True, then this discards the
1326 data (and returns an empty list); this improves efficiency when
1327 resuming an interrupted upload (where we need to compute the
1328 plaintext hashes, but don't need the redundant encrypted data)."""
1330 def get_plaintext_hashtree_leaves(first, last, num_segments):
1331 """Get the leaf nodes of a merkle hash tree over the plaintext
1332 segments, i.e. get the tagged hashes of the given segments. The
1333 segment size is expected to be generated by the IEncryptedUploadable
1334 before any plaintext is read or ciphertext produced, so that the
1335 segment hashes can be generated with only a single pass.
1337 This returns a Deferred which fires with a sequence of hashes, using:
1339 tuple(segment_hashes[first:last])
1341 'num_segments' is used to assert that the number of segments that the
1342 IEncryptedUploadable handled matches the number of segments that the
1343 encoder was expecting.
1345 This method must not be called until the final byte has been read
1346 from read_encrypted(). Once this method is called, read_encrypted()
1347 can never be called again.
1350 def get_plaintext_hash():
1351 """Get the hash of the whole plaintext.
1353 This returns a Deferred which fires with a tagged SHA-256 hash of the
1354 whole plaintext, obtained from hashutil.plaintext_hash(data).
1358 """Just like IUploadable.close()."""
1360 class IUploadable(Interface):
1361 def set_upload_status(upload_status):
1362 """Provide an IUploadStatus object that should be filled with status
1363 information. The IUploadable is responsible for setting
1364 key-determination progress ('chk')."""
1366 def set_default_encoding_parameters(params):
1367 """Set the default encoding parameters, which must be a dict mapping
1368 strings to ints. The meaningful keys are 'k', 'happy', 'n', and
1369 'max_segment_size'. These might have an influence on the final
1370 encoding parameters returned by get_all_encoding_parameters(), if the
1371 Uploadable doesn't have more specific preferences.
1373 This call is optional: if it is not used, the Uploadable will use
1374 some built-in defaults. If used, this method must be called before
1375 any other IUploadable methods to have any effect.
1379 """Return a Deferred that will fire with the length of the data to be
1380 uploaded, in bytes. This will be called before the data is actually
1381 used, to compute encoding parameters.
1384 def get_all_encoding_parameters():
1385 """Return a Deferred that fires with a tuple of
1386 (k,happy,n,segment_size). The segment_size will be used as-is, and
1387 must match the following constraints: it must be a multiple of k, and
1388 it shouldn't be unreasonably larger than the file size (if
1389 segment_size is larger than filesize, the difference must be stored
1392 The relative values of k and n allow some IUploadables to request
1393 better redundancy than others (in exchange for consuming more space
1396 Larger values of segment_size reduce hash overhead, while smaller
1397 values reduce memory footprint and cause data to be delivered in
1398 smaller pieces (which may provide a smoother and more predictable
1399 download experience).
1401 The encoder strictly obeys the values returned by this method. To
1402 make an upload use non-default encoding parameters, you must arrange
1403 to control the values that this method returns. One way to influence
1404 them may be to call set_encoding_parameters() before calling
1405 get_all_encoding_parameters().
1408 def get_encryption_key():
1409 """Return a Deferred that fires with a 16-byte AES key. This key will
1410 be used to encrypt the data. The key will also be hashed to derive
1413 Uploadables which want to achieve convergence should hash their file
1414 contents and the serialized_encoding_parameters to form the key
1415 (which of course requires a full pass over the data). Uploadables can
1416 use the upload.ConvergentUploadMixin class to achieve this
1419 Uploadables which do not care about convergence (or do not wish to
1420 make multiple passes over the data) can simply return a
1421 strongly-random 16 byte string.
1423 get_encryption_key() may be called multiple times: the IUploadable is
1424 required to return the same value each time.
1428 """Return a Deferred that fires with a list of strings (perhaps with
1429 only a single element) which, when concatenated together, contain the
1430 next 'length' bytes of data. If EOF is near, this may provide fewer
1431 than 'length' bytes. The total number of bytes provided by read()
1432 before it signals EOF must equal the size provided by get_size().
1434 If the data must be acquired through multiple internal read
1435 operations, returning a list instead of a single string may help to
1436 reduce string copies.
1438 'length' will typically be equal to (min(get_size(),1MB)/req_shares),
1439 so a 10kB file means length=3kB, 100kB file means length=30kB,
1440 and >=1MB file means length=300kB.
1442 This method provides for a single full pass through the data. Later
1443 use cases may desire multiple passes or access to only parts of the
1444 data (such as a mutable file making small edits-in-place). This API
1445 will be expanded once those use cases are better understood.
1449 """The upload is finished, and whatever filehandle was in use may be
1452 class IUploadResults(Interface):
1453 """I am returned by upload() methods. I contain a number of public
1454 attributes which can be read to determine the results of the upload. Some
1455 of these are functional, some are timing information. All of these may be
1458 .file_size : the size of the file, in bytes
1459 .uri : the CHK read-cap for the file
1460 .ciphertext_fetched : how many bytes were fetched by the helper
1461 .sharemap : dict mapping share number to placement string
1462 .servermap : dict mapping server peerid to a set of share numbers
1463 .timings : dict of timing information, mapping name to seconds (float)
1464 total : total upload time, start to finish
1465 storage_index : time to compute the storage index
1466 peer_selection : time to decide which peers will be used
1467 contacting_helper : initial helper query to upload/no-upload decision
1468 existence_check : helper pre-upload existence check
1469 helper_total : initial helper query to helper finished pushing
1470 cumulative_fetch : helper waiting for ciphertext requests
1471 total_fetch : helper start to last ciphertext response
1472 cumulative_encoding : just time spent in zfec
1473 cumulative_sending : just time spent waiting for storage servers
1474 hashes_and_close : last segment push to shareholder close
1475 total_encode_and_push : first encode to shareholder close
1479 class IDownloadResults(Interface):
1480 """I am created internally by download() methods. I contain a number of
1481 public attributes which contain details about the download process.::
1483 .file_size : the size of the file, in bytes
1484 .servers_used : set of server peerids that were used during download
1485 .server_problems : dict mapping server peerid to a problem string. Only
1486 servers that had problems (bad hashes, disconnects) are
1488 .servermap : dict mapping server peerid to a set of share numbers. Only
1489 servers that had any shares are listed here.
1490 .timings : dict of timing information, mapping name to seconds (float)
1491 peer_selection : time to ask servers about shares
1492 servers_peer_selection : dict of peerid to DYHB-query time
1493 uri_extension : time to fetch a copy of the URI extension block
1494 hashtrees : time to fetch the hash trees
1495 segments : time to fetch, decode, and deliver segments
1496 cumulative_fetch : time spent waiting for storage servers
1497 cumulative_decode : just time spent in zfec
1498 cumulative_decrypt : just time spent in decryption
1499 total : total download time, start to finish
1500 fetch_per_server : dict of peerid to list of per-segment fetch times
1504 class IUploader(Interface):
1505 def upload(uploadable):
1506 """Upload the file. 'uploadable' must impement IUploadable. This
1507 returns a Deferred which fires with an UploadResults instance, from
1508 which the URI of the file can be obtained as results.uri ."""
1510 def upload_ssk(write_capability, new_version, uploadable):
1511 """TODO: how should this work?"""
1513 class IChecker(Interface):
1514 def check(uri_to_check):
1515 """Accepts an IVerifierURI, and checks upon the health of its target.
1517 For now, uri_to_check must be an IVerifierURI. In the future we
1518 expect to relax that to be anything that can be adapted to
1519 IVerifierURI (like read-only or read-write dirnode/filenode URIs).
1521 This returns a Deferred. For dirnodes, this fires with either True or
1522 False (dirnodes are not distributed, so their health is a boolean).
1524 For filenodes, this fires with a tuple of (needed_shares,
1525 total_shares, found_shares, sharemap). The first three are ints. The
1526 basic health of the file is found_shares / needed_shares: if less
1527 than 1.0, the file is unrecoverable.
1529 The sharemap has a key for each sharenum. The value is a list of
1530 (binary) nodeids who hold that share. If two shares are kept on the
1531 same nodeid, they will fail as a pair, and overall reliability is
1534 The IChecker instance remembers the results of the check. By default,
1535 these results are stashed in RAM (and are forgotten at shutdown). If
1536 a file named 'checker_results.db' exists in the node's basedir, it is
1537 used as a sqlite database of results, making them persistent across
1538 runs. To start using this feature, just 'touch checker_results.db',
1539 and the node will initialize it properly the next time it is started.
1542 def verify(uri_to_check):
1543 """Accepts an IVerifierURI, and verifies the crypttext of the target.
1545 This is a more-intensive form of checking. For verification, the
1546 file's crypttext contents are retrieved, and the associated hash
1547 checks are performed. If a storage server is holding a corrupted
1548 share, verification will detect the problem, but checking will not.
1549 This returns a Deferred that fires with True if the crypttext hashes
1550 look good, and will probably raise an exception if anything goes
1553 For dirnodes, 'verify' is the same as 'check', so the Deferred will
1554 fire with True or False.
1556 Verification currently only uses a minimal subset of peers, so a lot
1557 of share corruption will not be caught by it. We expect to improve
1561 def checker_results_for(uri_to_check):
1562 """Accepts an IVerifierURI, and returns a list of previously recorded
1563 checker results. This method performs no checking itself: it merely
1564 reports the results of checks that have taken place in the past.
1566 Each element of the list is a two-entry tuple: (when, results).
1567 The 'when' values are timestamps (float seconds since epoch), and the
1568 results are as defined in the check() method.
1570 Note: at the moment, this is specified to return synchronously. We
1571 might need to back away from this in the future.
1574 class IClient(Interface):
1575 def upload(uploadable):
1576 """Upload some data into a CHK, get back the UploadResults for it.
1577 @param uploadable: something that implements IUploadable
1578 @return: a Deferred that fires with the UploadResults instance.
1579 To get the URI for this file, use results.uri .
1582 def create_mutable_file(contents=""):
1583 """Create a new mutable file with contents, get back the URI string.
1584 @param contents: the initial contents to place in the file.
1585 @return: a Deferred that fires with tne (string) SSK URI for the new
1589 def create_empty_dirnode():
1590 """Create a new dirnode, empty and unattached.
1591 @return: a Deferred that fires with the new IDirectoryNode instance.
1594 def create_node_from_uri(uri):
1595 """Create a new IFilesystemNode instance from the uri, synchronously.
1596 @param uri: a string or IURI-providing instance. This could be for a
1597 LiteralFileNode, a CHK file node, a mutable file node, or
1599 @return: an instance that provides IFilesystemNode (or more usefully one
1600 of its subclasses). File-specifying URIs will result in
1601 IFileNode or IMutableFileNode -providing instances, like
1602 FileNode, LiteralFileNode, or MutableFileNode.
1603 Directory-specifying URIs will result in
1604 IDirectoryNode-providing instances, like NewDirectoryNode.
1607 class IClientStatus(Interface):
1608 def list_all_uploads():
1609 """Return a list of uploader objects, one for each upload which
1610 currently has an object available (tracked with weakrefs). This is
1611 intended for debugging purposes."""
1612 def list_active_uploads():
1613 """Return a list of active IUploadStatus objects."""
1614 def list_recent_uploads():
1615 """Return a list of IUploadStatus objects for the most recently
1618 def list_all_downloads():
1619 """Return a list of downloader objects, one for each download which
1620 currently has an object available (tracked with weakrefs). This is
1621 intended for debugging purposes."""
1622 def list_active_downloads():
1623 """Return a list of active IDownloadStatus objects."""
1624 def list_recent_downloads():
1625 """Return a list of IDownloadStatus objects for the most recently
1626 started downloads."""
1628 class IUploadStatus(Interface):
1630 """Return a timestamp (float with seconds since epoch) indicating
1631 when the operation was started."""
1632 def get_storage_index():
1633 """Return a string with the (binary) storage index in use on this
1634 upload. Returns None if the storage index has not yet been
1637 """Return an integer with the number of bytes that will eventually
1638 be uploaded for this file. Returns None if the size is not yet known.
1641 """Return True if this upload is using a Helper, False if not."""
1643 """Return a string describing the current state of the upload
1646 """Returns a tuple of floats, (chk, ciphertext, encode_and_push),
1647 each from 0.0 to 1.0 . 'chk' describes how much progress has been
1648 made towards hashing the file to determine a CHK encryption key: if
1649 non-convergent encryption is in use, this will be trivial, otherwise
1650 the whole file must be hashed. 'ciphertext' describes how much of the
1651 ciphertext has been pushed to the helper, and is '1.0' for non-helper
1652 uploads. 'encode_and_push' describes how much of the encode-and-push
1653 process has finished: for helper uploads this is dependent upon the
1654 helper providing progress reports. It might be reasonable to add all
1655 three numbers and report the sum to the user."""
1657 """Return True if the upload is currently active, False if not."""
1659 """Return an instance of UploadResults (which contains timing and
1660 sharemap information). Might return None if the upload is not yet
1663 """Each upload status gets a unique number: this method returns that
1664 number. This provides a handle to this particular upload, so a web
1665 page can generate a suitable hyperlink."""
1667 class IDownloadStatus(Interface):
1669 """Return a timestamp (float with seconds since epoch) indicating
1670 when the operation was started."""
1671 def get_storage_index():
1672 """Return a string with the (binary) storage index in use on this
1673 download. This may be None if there is no storage index (i.e. LIT
1676 """Return an integer with the number of bytes that will eventually be
1677 retrieved for this file. Returns None if the size is not yet known.
1680 """Return True if this download is using a Helper, False if not."""
1682 """Return a string describing the current state of the download
1685 """Returns a float (from 0.0 to 1.0) describing the amount of the
1686 download that has completed. This value will remain at 0.0 until the
1687 first byte of plaintext is pushed to the download target."""
1689 """Return True if the download is currently active, False if not."""
1691 """Each download status gets a unique number: this method returns
1692 that number. This provides a handle to this particular download, so a
1693 web page can generate a suitable hyperlink."""
1695 class IServermapUpdaterStatus(Interface):
1697 class IPublishStatus(Interface):
1699 class IRetrieveStatus(Interface):
1702 class NotCapableError(Exception):
1703 """You have tried to write to a read-only node."""
1705 class BadWriteEnablerError(Exception):
1708 class RIControlClient(RemoteInterface):
1710 def wait_for_client_connections(num_clients=int):
1711 """Do not return until we have connections to at least NUM_CLIENTS
1715 def upload_from_file_to_uri(filename=str, convergence=ChoiceOf(None, StringConstraint(2**20))):
1716 """Upload a file to the grid. This accepts a filename (which must be
1717 absolute) that points to a file on the node's local disk. The node will
1718 read the contents of this file, upload it to the grid, then return the
1719 URI at which it was uploaded. If convergence is None then a random
1720 encryption key will be used, else the plaintext will be hashed, then
1721 that hash will be mixed together with the "convergence" string to form
1726 def download_from_uri_to_file(uri=URI, filename=str):
1727 """Download a file from the grid, placing it on the node's local disk
1728 at the given filename (which must be absolute[?]). Returns the
1729 absolute filename where the file was written."""
1734 def get_memory_usage():
1735 """Return a dict describes the amount of memory currently in use. The
1736 keys are 'VmPeak', 'VmSize', and 'VmData'. The values are integers,
1737 measuring memory consupmtion in bytes."""
1738 return DictOf(str, int)
1740 def speed_test(count=int, size=int, mutable=Any()):
1741 """Write 'count' tempfiles to disk, all of the given size. Measure
1742 how long (in seconds) it takes to upload them all to the servers.
1743 Then measure how long it takes to download all of them. If 'mutable'
1744 is 'create', time creation of mutable files. If 'mutable' is
1745 'upload', then time access to the same mutable file instead of
1748 Returns a tuple of (upload_time, download_time).
1750 return (float, float)
1752 def measure_peer_response_time():
1753 """Send a short message to each connected peer, and measure the time
1754 it takes for them to respond to it. This is a rough measure of the
1755 application-level round trip time.
1757 @return: a dictionary mapping peerid to a float (RTT time in seconds)
1760 return DictOf(Nodeid, float)
1762 UploadResults = Any() #DictOf(str, str)
1764 class RIEncryptedUploadable(RemoteInterface):
1765 __remote_name__ = "RIEncryptedUploadable.tahoe.allmydata.com"
1770 def get_all_encoding_parameters():
1771 return (int, int, int, long)
1773 def read_encrypted(offset=Offset, length=ReadSize):
1776 def get_plaintext_hashtree_leaves(first=int, last=int, num_segments=int):
1779 def get_plaintext_hash():
1786 class RICHKUploadHelper(RemoteInterface):
1787 __remote_name__ = "RIUploadHelper.tahoe.allmydata.com"
1789 def upload(reader=RIEncryptedUploadable):
1790 return UploadResults
1793 class RIHelper(RemoteInterface):
1794 __remote_name__ = "RIHelper.tahoe.allmydata.com"
1796 def upload_chk(si=StorageIndex):
1797 """See if a file with a given storage index needs uploading. The
1798 helper will ask the appropriate storage servers to see if the file
1799 has already been uploaded. If so, the helper will return a set of
1800 'upload results' that includes whatever hashes are needed to build
1801 the read-cap, and perhaps a truncated sharemap.
1803 If the file has not yet been uploaded (or if it was only partially
1804 uploaded), the helper will return an empty upload-results dictionary
1805 and also an RICHKUploadHelper object that will take care of the
1806 upload process. The client should call upload() on this object and
1807 pass it a reference to an RIEncryptedUploadable object that will
1808 provide ciphertext. When the upload is finished, the upload() method
1809 will finish and return the upload results.
1811 return (UploadResults, ChoiceOf(RICHKUploadHelper, None))
1814 class RIStatsProvider(RemoteInterface):
1815 __remote_name__ = "RIStatsProvider.tahoe.allmydata.com"
1817 Provides access to statistics and monitoring information.
1822 returns a dictionary containing 'counters' and 'stats', each a dictionary
1823 with string counter/stat name keys, and numeric values. counters are
1824 monotonically increasing measures of work done, and stats are instantaneous
1825 measures (potentially time averaged internally)
1827 return DictOf(str, DictOf(str, ChoiceOf(float, int, long)))
1829 class RIStatsGatherer(RemoteInterface):
1830 __remote_name__ = "RIStatsGatherer.tahoe.allmydata.com"
1832 Provides a monitoring service for centralised collection of stats
1835 def provide(provider=RIStatsProvider, nickname=str):
1837 @param provider: a stats collector instance which should be polled
1838 periodically by the gatherer to collect stats.
1839 @param nickname: a name useful to identify the provided client
1844 class IStatsProducer(Interface):
1847 returns a dictionary, with str keys representing the names of stats
1848 to be monitored, and numeric values.
1851 class RIKeyGenerator(RemoteInterface):
1852 __remote_name__ = "RIKeyGenerator.tahoe.allmydata.com"
1854 Provides a service offering to make RSA key pairs.
1857 def get_rsa_key_pair(key_size=int):
1859 @param key_size: the size of the signature key.
1860 @return: tuple(verifying_key, signing_key)
1862 return TupleOf(str, str)