This reduces the total test time on my laptop from 400s to 283s.
* src/allmydata/test/test_system.py (SystemTest.test_mutable._test_debug):
Remove assertion about container_size/data_size, this changes with keysize
and was too variable anyways.
* src/allmydata/mutable/filenode.py (MutableFileNode.create): add keysize=
* src/allmydata/dirnode.py (NewDirectoryNode.create): same
* src/allmydata/client.py (Client.DEFAULT_MUTABLE_KEYSIZE): add default,
this overrides the one in MutableFileNode
"max_segment_size": 128*KiB,
}
+ # set this to override the size of the RSA keys created for new mutable
+ # files. The default of None means to let mutable.filenode choose its own
+ # size, which means 2048 bits.
+ DEFAULT_MUTABLE_KEYSIZE = None
+
def __init__(self, basedir="."):
node.Node.__init__(self, basedir)
self.started_timestamp = time.time()
def create_empty_dirnode(self):
n = NewDirectoryNode(self)
- d = n.create(self._generate_pubprivkeys)
+ d = n.create(self._generate_pubprivkeys, self.DEFAULT_MUTABLE_KEYSIZE)
d.addCallback(lambda res: n)
return d
- def create_mutable_file(self, contents=""):
+ def create_mutable_file(self, contents="", keysize=None):
+ keysize = keysize or self.DEFAULT_MUTABLE_KEYSIZE
n = MutableFileNode(self)
- d = n.create(contents, self._generate_pubprivkeys)
+ d = n.create(contents, self._generate_pubprivkeys, keysize=keysize)
d.addCallback(lambda res: n)
return d
self._node.init_from_uri(self._uri.get_filenode_uri())
return self
- def create(self, keypair_generator=None):
+ def create(self, keypair_generator=None, keysize=None):
"""
Returns a deferred that eventually fires with self once the directory
has been created (distributed across a set of storage servers).
# URI to create our own.
self._node = self.filenode_class(self._client)
empty_contents = self._pack_contents({})
- d = self._node.create(empty_contents, keypair_generator)
+ d = self._node.create(empty_contents, keypair_generator, keysize=keysize)
d.addCallback(self._filenode_created)
return d
def _filenode_created(self, res):
self._encprivkey = None
return self
- def create(self, initial_contents, keypair_generator=None):
+ def create(self, initial_contents, keypair_generator=None, keysize=None):
"""Call this when the filenode is first created. This will generate
the keys, generate the initial shares, wait until at least numpeers
are connected, allocate shares, and upload the initial
contents. Returns a Deferred that fires (with the MutableFileNode
instance you should use) when it completes.
"""
-
- d = defer.maybeDeferred(self._generate_pubprivkeys, keypair_generator)
+ keysize = keysize or self.SIGNATURE_KEY_SIZE
+ d = defer.maybeDeferred(self._generate_pubprivkeys,
+ keypair_generator, keysize)
d.addCallback(self._generated)
d.addCallback(lambda res: self._upload(initial_contents, None))
return d
self._readkey = self._uri.readkey
self._storage_index = self._uri.storage_index
- def _generate_pubprivkeys(self, keypair_generator):
+ def _generate_pubprivkeys(self, keypair_generator, keysize):
if keypair_generator:
- return keypair_generator(self.SIGNATURE_KEY_SIZE)
+ return keypair_generator(keysize)
else:
- # RSA key generation for a 2048 bit key takes between 0.8 and 3.2 secs
- signer = rsa.generate(self.SIGNATURE_KEY_SIZE)
+ # RSA key generation for a 2048 bit key takes between 0.8 and 3.2
+ # secs
+ signer = rsa.generate(keysize)
verifier = signer.get_verifying_key()
return verifier, signer
self.client = thisclient
self.my_uri = make_mutable_file_uri()
self.storage_index = self.my_uri.storage_index
- def create(self, initial_contents, key_generator=None):
+ def create(self, initial_contents, key_generator=None, keysize=None):
if len(initial_contents) > self.MUTABLE_SIZELIMIT:
raise FileTooLargeError("SDMF is limited to one segment, and "
"%d > %d" % (len(initial_contents),
kgsdir = self.getdir("key_generator")
fileutil.make_dirs(kgsdir)
- self.key_generator_svc = KeyGeneratorService(kgsdir, display_furl=False)
+ self.key_generator_svc = KeyGeneratorService(kgsdir,
+ display_furl=False,
+ default_key_size=522)
self.key_generator_svc.key_generator.pool_size = 4
self.key_generator_svc.key_generator.pool_refresh_delay = 60
self.add_service(self.key_generator_svc)
# will have registered the helper furl).
c = self.add_service(client.Client(basedir=basedirs[0]))
self.clients.append(c)
+ c.DEFAULT_MUTABLE_KEYSIZE = 522
d = c.when_tub_ready()
def _ready(res):
f = open(os.path.join(basedirs[0],"private","helper.furl"), "r")
for i in range(1, self.numclients):
c = self.add_service(client.Client(basedir=basedirs[i]))
self.clients.append(c)
+ c.DEFAULT_MUTABLE_KEYSIZE = 522
log.msg("STARTING")
return self.wait_for_connections()
d.addCallback(_ready)
def _stopped(res):
new_c = client.Client(basedir=self.getdir("client%d" % num))
self.clients[num] = new_c
+ new_c.DEFAULT_MUTABLE_KEYSIZE = 522
self.add_service(new_c)
return new_c.when_tub_ready()
d.addCallback(_stopped)
c = client.Client(basedir=basedir)
self.clients.append(c)
+ c.DEFAULT_MUTABLE_KEYSIZE = 522
self.numclients += 1
if add_to_sparent:
c.setServiceParent(self.sparent)
return None
class NoNetworkClient(Client):
+ DEFAULT_MUTABLE_KEYSIZE = 522
def create_tub(self):
pass
# broken the server on which the first share wants to be stored.
n = FastMutableFileNode(self.client)
d = defer.succeed(None)
- d.addCallback(n._generate_pubprivkeys)
+ d.addCallback(n._generate_pubprivkeys, keysize=522)
d.addCallback(n._generated)
def _break_peer0(res):
si = n.get_storage_index()
from base64 import b32encode
-import os, sys, time, re, simplejson
+import os, sys, time, simplejson
from cStringIO import StringIO
from zope.interface import implements
from twisted.trial import unittest
peerid = idlib.nodeid_b2a(self.clients[client_num].nodeid)
self.failUnless(" WE for nodeid: %s\n" % peerid in output)
self.failUnless(" num_extra_leases: 0\n" in output)
- # the pubkey size can vary by a byte, so the container might
- # be a bit larger on some runs.
- m = re.search(r'^ container_size: (\d+)$', output, re.M)
- self.failUnless(m)
- container_size = int(m.group(1))
- self.failUnless(2037 <= container_size <= 2049, container_size)
- m = re.search(r'^ data_length: (\d+)$', output, re.M)
- self.failUnless(m)
- data_length = int(m.group(1))
- self.failUnless(2037 <= data_length <= 2049, data_length)
self.failUnless(" secrets are for nodeid: %s\n" % peerid
in output)
self.failUnless(" SDMF contents:\n" in output)