-import struct, math
+import struct
from allmydata.mutable.common import NeedMoreDataError, UnknownVersionError
from allmydata.interfaces import HASH_SIZE, SALT_SIZE, SDMF_VERSION, \
MDMF_VERSION, IMutableSlotWriter
-from allmydata.util import mathutil, observer
+from allmydata.util import mathutil
from twisted.python import failure
from twisted.internet import defer
from zope.interface import implements
pubkey, signature, share_hash_chain, block_hash_tree,
share_data, enc_privkey)
-def unpack_checkstring(checkstring):
+def get_version_from_checkstring(checkstring):
+ (t, ) = struct.unpack(">B", checkstring[:1])
+ return t
+
+def unpack_sdmf_checkstring(checkstring):
cs_len = struct.calcsize(PREFIX)
version, seqnum, root_hash, IV = struct.unpack(PREFIX, checkstring[:cs_len])
if version != 0: # TODO: just ignore the share
raise UnknownVersionError("got mutable share version %d, but I only understand version 0" % version)
return (seqnum, root_hash, IV)
+def unpack_mdmf_checkstring(checkstring):
+ cs_len = struct.calcsize(MDMFCHECKSTRING)
+ checkstring = checkstring[:cs_len]
+ version, seqnum, root_hash = struct.unpack(MDMFCHECKSTRING, checkstring)
+
+ assert version == 1
+ return (seqnum, root_hash)
def pack_offsets(verification_key_length, signature_length,
share_hash_chain_length, block_hash_tree_length,
"""
for k in ["sharedata", "encprivkey", "signature", "verification_key",
"share_hash_chain", "block_hash_tree"]:
- assert k in self._share_pieces
+ assert k in self._share_pieces, (self.shnum, k, self._share_pieces.keys())
# This is the only method that actually writes something to the
# remote server.
# First, we need to pack the share into data that we can write
PRIVATE_KEY_SIZE = 1220
SIGNATURE_SIZE = 260
VERIFICATION_KEY_SIZE = 292
-# We know we won't have more than 256 shares, and we know that we won't
-# need to store more than lg 256 of them to validate, so that's our
-# bound. We add 1 to the int cast to round to the next integer.
-SHARE_HASH_CHAIN_SIZE = int(math.log(HASH_SIZE * 256)) + 1
+# We know we won't have more than 256 shares, and we know that we won't need
+# to store more than ln2(256) hash-chain nodes to validate, so that's our
+# bound. Each node requires 2 bytes of node-number plus 32 bytes of hash.
+SHARE_HASH_CHAIN_SIZE = (2+HASH_SIZE)*mathutil.log_ceil(256, 2)
class MDMFSlotWriteProxy:
implements(IMutableSlotWriter)
# offset: size: name:
#-- signed part --
# 0 1 version number (01)
- # 1 8 sequence number
+ # 1 8 sequence number
# 9 32 share tree root hash
# 41 1 The "k" encoding parameter
# 42 1 The "N" encoding parameter
# 51 8 The data length of the original plaintext
#-- end signed part --
# 59 8 The offset of the encrypted private key
- # 67 8 The offset of the signature
- # 75 8 The offset of the verification key
- # 83 8 The offset of the end of the v. key.
- # 92 8 The offset of the share data
- # 100 8 The offset of the block hash tree
- # 108 8 The offset of the share hash chain
- # 116 8 The offset of EOF
- #
- # followed by the encrypted private key, signature, verification
- # key, share hash chain, data, and block hash tree. We order the
- # fields that way to make smart downloaders -- downloaders which
- # prempetively read a big part of the share -- possible.
+ # 67 8 The offset of the share hash chain
+ # 75 8 The offset of the signature
+ # 83 8 The offset of the verification key
+ # 91 8 The offset of the end of the v. key.
+ # 99 8 The offset of the share data
+ # 107 8 The offset of the block hash tree
+ # 115 8 The offset of EOF
+ # 123 var encrypted private key
+ # var var share hash chain
+ # var var signature
+ # var var verification key
+ # var large share data
+ # var var block hash tree
+ #
+ # We order the fields that way to make smart downloaders -- downloaders
+ # which prempetively read a big part of the share -- possible.
#
# The checkstring is the first three fields -- the version number,
# sequence number, root hash and root salt hash. This is consistent
if self._data == None:
self._data = ""
- self._queue_observers = observer.ObserverList()
- self._queue_errbacks = observer.ObserverList()
- self._readvs = []
-
def _maybe_fetch_offsets_and_header(self, force_remote=False):
"""
self._offsets['share_data'] = sharedata
- def get_block_and_salt(self, segnum, queue=False):
+ def get_block_and_salt(self, segnum):
"""
I return (block, salt), where block is the block data and
salt is the salt used to encrypt that segment.
readvs = [(share_offset, data)]
return readvs
d.addCallback(_then)
- d.addCallback(lambda readvs:
- self._read(readvs, queue=queue))
+ d.addCallback(lambda readvs: self._read(readvs))
def _process_results(results):
assert self.shnum in results
if self._version_number == 0:
return d
- def get_blockhashes(self, needed=None, queue=False, force_remote=False):
+ def get_blockhashes(self, needed=None, force_remote=False):
"""
I return the block hash tree
return readvs
d.addCallback(_then)
d.addCallback(lambda readvs:
- self._read(readvs, queue=queue, force_remote=force_remote))
+ self._read(readvs, force_remote=force_remote))
def _build_block_hash_tree(results):
assert self.shnum in results
return d
- def get_sharehashes(self, needed=None, queue=False, force_remote=False):
+ def get_sharehashes(self, needed=None, force_remote=False):
"""
I return the part of the share hash chain placed to validate
this share.
return readvs
d.addCallback(_make_readvs)
d.addCallback(lambda readvs:
- self._read(readvs, queue=queue, force_remote=force_remote))
+ self._read(readvs, force_remote=force_remote))
def _build_share_hash_chain(results):
assert self.shnum in results
return d
- def get_encprivkey(self, queue=False):
+ def get_encprivkey(self):
"""
I return the encrypted private key.
"""
readvs = [(privkey_offset, privkey_length)]
return readvs
d.addCallback(_make_readvs)
- d.addCallback(lambda readvs:
- self._read(readvs, queue=queue))
+ d.addCallback(lambda readvs: self._read(readvs))
def _process_results(results):
assert self.shnum in results
privkey = results[self.shnum][0]
return d
- def get_signature(self, queue=False):
+ def get_signature(self):
"""
I return the signature of my share.
"""
readvs = [(signature_offset, signature_length)]
return readvs
d.addCallback(_make_readvs)
- d.addCallback(lambda readvs:
- self._read(readvs, queue=queue))
+ d.addCallback(lambda readvs: self._read(readvs))
def _process_results(results):
assert self.shnum in results
signature = results[self.shnum][0]
return d
- def get_verification_key(self, queue=False):
+ def get_verification_key(self):
"""
I return the verification key.
"""
readvs = [(vk_offset, vk_length)]
return readvs
d.addCallback(_make_readvs)
- d.addCallback(lambda readvs:
- self._read(readvs, queue=queue))
+ d.addCallback(lambda readvs: self._read(readvs))
def _process_results(results):
assert self.shnum in results
verification_key = results[self.shnum][0]
return d
- def flush(self):
- """
- I flush my queue of read vectors.
- """
- d = self._read(self._readvs)
- def _then(results):
- self._readvs = []
- if isinstance(results, failure.Failure):
- self._queue_errbacks.notify(results)
- else:
- self._queue_observers.notify(results)
- self._queue_observers = observer.ObserverList()
- self._queue_errbacks = observer.ObserverList()
- d.addBoth(_then)
-
-
- def _read(self, readvs, force_remote=False, queue=False):
+ def _read(self, readvs, force_remote=False):
unsatisfiable = filter(lambda x: x[0] + x[1] > len(self._data), readvs)
# TODO: It's entirely possible to tweak this so that it just
# fulfills the requests that it can, and not demand that all
results = {self.shnum: results}
return defer.succeed(results)
else:
- if queue:
- start = len(self._readvs)
- self._readvs += readvs
- end = len(self._readvs)
- def _get_results(results, start, end):
- if not self.shnum in results:
- return {self._shnum: [""]}
- return {self.shnum: results[self.shnum][start:end]}
- d = defer.Deferred()
- d.addCallback(_get_results, start, end)
- self._queue_observers.subscribe(d.callback)
- self._queue_errbacks.subscribe(d.errback)
- return d
return self._rref.callRemote("slot_readv",
self._storage_index,
[self.shnum],