from zope.interface import implements
from allmydata import uri, client
from allmydata.nodemaker import NodeMaker
-from allmydata.util import base32, consumer, fileutil
+from allmydata.util import base32, consumer, fileutil, mathutil
from allmydata.util.hashutil import tagged_hash, ssk_writekey_hash, \
ssk_pubkey_fingerprint_hash
from allmydata.util.deferredutil import gatherResults
from foolscap.logging import log
from allmydata.storage_client import StorageFarmBroker
from allmydata.storage.common import storage_index_to_dir
+from allmydata.scripts import debug
from allmydata.mutable.filenode import MutableFileNode, BackoffAgent
from allmydata.mutable.common import ResponseCache, \
from allmydata.mutable.repairer import MustForceRepairError
import allmydata.test.common_util as testutil
+from allmydata.test.common import TEST_RSA_KEY_SIZE
+
# this "FakeStorage" exists to put the share data in RAM and avoid using real
# network connections, both to speed up the tests and to reduce the amount of
storage_broker = make_storagebroker(s, num_peers)
sh = client.SecretHolder("lease secret", "convergence secret")
keygen = client.KeyGenerator()
- keygen.set_default_keysize(522)
+ keygen.set_default_keysize(TEST_RSA_KEY_SIZE)
nodemaker = NodeMaker(storage_broker, sh, None,
None, None,
{"k": 3, "n": 10}, keygen)
self.failUnlessEqual(len(shnums), 1)
d.addCallback(_created)
return d
- test_create.timeout = 15
def test_create_mdmf(self):
def _created(n):
self.failUnless(isinstance(n, MutableFileNode))
cap = n.get_cap()
- self.failUnless(isinstance(cap, uri.WritableMDMFFileURI))
+ self.failUnless(isinstance(cap, uri.WriteableMDMFFileURI))
rcap = n.get_readcap()
self.failUnless(isinstance(rcap, uri.ReadonlyMDMFFileURI))
vcap = n.get_verify_cap()
d.addCallback(lambda ignored:
self.failUnlessEqual(self.c.data, "contents1" * 100000))
return d
- test_retrieve_pause.timeout = 25
def test_download_from_mdmf_cap(self):
return d
d.addCallback(_created)
return d
- test_create_with_initial_contents.timeout = 15
def test_create_mdmf_with_initial_contents(self):
return d
d.addCallback(_created)
return d
- test_create_mdmf_with_initial_contents.timeout = 20
def test_response_cache_memory_leak(self):
return d
d.addCallback(_created)
return d
- test_modify.timeout = 15
def test_modify_backoffer(self):
self.failIf(servermap.all_peers())
d.addCallback(_check_servermap)
return d
- test_no_servers.timeout = 15
def test_no_servers_download(self):
sb2 = make_storagebroker(num_peers=0)
d.addCallback(_restore)
d.addCallback(_retrieved)
return d
- test_no_servers_download.timeout = 15
def _test_corrupt_all(self, offset, substring,
d = self._fn.check(Monitor(), verify=True)
d.addCallback(self.check_good, "test_verify_good")
return d
- test_verify_good.timeout = 15
def test_verify_all_bad_sig(self):
d = corrupt(None, self._storage, 1) # bad sig
return d
d.addCallback(_created)
return d
- test_unexpected_shares.timeout = 15
def test_bad_server(self):
# Break one server, then create the file: the initial publish should
# use #467 static-server-selection to disable permutation and force
# the choice of server for share[0].
- d = nm.key_generator.generate(522)
+ d = nm.key_generator.generate(TEST_RSA_KEY_SIZE)
def _got_key( (pubkey, privkey) ):
nm.key_generator = SameKeyGenerator(pubkey, privkey)
pubkey_s = pubkey.serialize()
"Ran out of non-bad servers",
nm.create_mutable_file, MutableData("contents"))
return d
- test_publish_no_servers.timeout = 30
def test_privkey_query_error(self):
self.nm = self.c.nodemaker
self.data = "test data" * 100000 # about 900 KiB; MDMF
self.small_data = "test data" * 10 # about 90 B; SDMF
- return self.do_upload()
-
- def do_upload(self):
- d1 = self.nm.create_mutable_file(MutableData(self.data),
- version=MDMF_VERSION)
- d2 = self.nm.create_mutable_file(MutableData(self.small_data))
- dl = gatherResults([d1, d2])
- def _then((n1, n2)):
- assert isinstance(n1, MutableFileNode)
- assert isinstance(n2, MutableFileNode)
- self.mdmf_node = n1
- self.sdmf_node = n2
- dl.addCallback(_then)
- return dl
+ def do_upload_mdmf(self):
+ d = self.nm.create_mutable_file(MutableData(self.data),
+ version=MDMF_VERSION)
+ def _then(n):
+ assert isinstance(n, MutableFileNode)
+ assert n._protocol_version == MDMF_VERSION
+ self.mdmf_node = n
+ return n
+ d.addCallback(_then)
+ return d
+ def do_upload_sdmf(self):
+ d = self.nm.create_mutable_file(MutableData(self.small_data))
+ def _then(n):
+ assert isinstance(n, MutableFileNode)
+ assert n._protocol_version == SDMF_VERSION
+ self.sdmf_node = n
+ return n
+ d.addCallback(_then)
+ return d
- def test_get_readonly_mutable_version(self):
- # Attempting to get a mutable version of a mutable file from a
- # filenode initialized with a readcap should return a readonly
- # version of that same node.
- ro = self.mdmf_node.get_readonly()
- d = ro.get_best_mutable_version()
- d.addCallback(lambda version:
- self.failUnless(version.is_readonly()))
- d.addCallback(lambda ignored:
- self.sdmf_node.get_readonly())
- d.addCallback(lambda version:
- self.failUnless(version.is_readonly()))
+ def do_upload_empty_sdmf(self):
+ d = self.nm.create_mutable_file(MutableData(""))
+ def _then(n):
+ assert isinstance(n, MutableFileNode)
+ self.sdmf_zero_length_node = n
+ assert n._protocol_version == SDMF_VERSION
+ return n
+ d.addCallback(_then)
return d
+ def do_upload(self):
+ d = self.do_upload_mdmf()
+ d.addCallback(lambda ign: self.do_upload_sdmf())
+ return d
+
+ def test_debug(self):
+ d = self.do_upload_mdmf()
+ def _debug(n):
+ fso = debug.FindSharesOptions()
+ storage_index = base32.b2a(n.get_storage_index())
+ fso.si_s = storage_index
+ fso.nodedirs = [unicode(os.path.dirname(os.path.abspath(storedir)))
+ for (i,ss,storedir)
+ in self.iterate_servers()]
+ fso.stdout = StringIO()
+ fso.stderr = StringIO()
+ debug.find_shares(fso)
+ sharefiles = fso.stdout.getvalue().splitlines()
+ expected = self.nm.default_encoding_parameters["n"]
+ self.failUnlessEqual(len(sharefiles), expected)
+
+ do = debug.DumpOptions()
+ do["filename"] = sharefiles[0]
+ do.stdout = StringIO()
+ debug.dump_share(do)
+ output = do.stdout.getvalue()
+ lines = set(output.splitlines())
+ self.failUnless("Mutable slot found:" in lines, output)
+ self.failUnless(" share_type: MDMF" in lines, output)
+ self.failUnless(" num_extra_leases: 0" in lines, output)
+ self.failUnless(" MDMF contents:" in lines, output)
+ self.failUnless(" seqnum: 1" in lines, output)
+ self.failUnless(" required_shares: 3" in lines, output)
+ self.failUnless(" total_shares: 10" in lines, output)
+ self.failUnless(" segsize: 131073" in lines, output)
+ self.failUnless(" datalen: %d" % len(self.data) in lines, output)
+ vcap = n.get_verify_cap().to_string()
+ self.failUnless(" verify-cap: %s" % vcap in lines, output)
+
+ cso = debug.CatalogSharesOptions()
+ cso.nodedirs = fso.nodedirs
+ cso.stdout = StringIO()
+ cso.stderr = StringIO()
+ debug.catalog_shares(cso)
+ shares = cso.stdout.getvalue().splitlines()
+ oneshare = shares[0] # all shares should be MDMF
+ self.failIf(oneshare.startswith("UNKNOWN"), oneshare)
+ self.failUnless(oneshare.startswith("MDMF"), oneshare)
+ fields = oneshare.split()
+ self.failUnlessEqual(fields[0], "MDMF")
+ self.failUnlessEqual(fields[1], storage_index)
+ self.failUnlessEqual(fields[2], "3/10")
+ self.failUnlessEqual(fields[3], "%d" % len(self.data))
+ self.failUnless(fields[4].startswith("#1:"), fields[3])
+ # the rest of fields[4] is the roothash, which depends upon
+ # encryption salts and is not constant. fields[5] is the
+ # remaining time on the longest lease, which is timing dependent.
+ # The rest of the line is the quoted pathname to the share.
+ d.addCallback(_debug)
+ return d
def test_get_sequence_number(self):
- d = self.mdmf_node.get_best_readable_version()
+ d = self.do_upload()
+ d.addCallback(lambda ign: self.mdmf_node.get_best_readable_version())
d.addCallback(lambda bv:
self.failUnlessEqual(bv.get_sequence_number(), 1))
d.addCallback(lambda ignored:
# We need to define an API by which an uploader can set the
# extension parameters, and by which a downloader can retrieve
# extensions.
- d = self.mdmf_node.get_best_mutable_version()
+ d = self.do_upload_mdmf()
+ d.addCallback(lambda ign: self.mdmf_node.get_best_mutable_version())
def _got_version(version):
hints = version.get_downloader_hints()
# Should be empty at this point.
# If we initialize a mutable file with a cap that has extension
# parameters in it and then grab the extension parameters using
# our API, we should see that they're set correctly.
- mdmf_uri = self.mdmf_node.get_uri()
- new_node = self.nm.create_from_cap(mdmf_uri)
- d = new_node.get_best_mutable_version()
+ d = self.do_upload_mdmf()
+ def _then(ign):
+ mdmf_uri = self.mdmf_node.get_uri()
+ new_node = self.nm.create_from_cap(mdmf_uri)
+ return new_node.get_best_mutable_version()
+ d.addCallback(_then)
def _got_version(version):
hints = version.get_downloader_hints()
self.failUnlessIn("k", hints)
# it's an MDMF file, we should get an MDMF cap back from that
# file and should be able to use that.
# That's essentially what MDMF node is, so just check that.
- mdmf_uri = self.mdmf_node.get_uri()
- cap = uri.from_string(mdmf_uri)
- self.failUnless(isinstance(cap, uri.WritableMDMFFileURI))
- readonly_mdmf_uri = self.mdmf_node.get_readonly_uri()
- cap = uri.from_string(readonly_mdmf_uri)
- self.failUnless(isinstance(cap, uri.ReadonlyMDMFFileURI))
-
-
- def test_get_writekey(self):
- d = self.mdmf_node.get_best_mutable_version()
- d.addCallback(lambda bv:
- self.failUnlessEqual(bv.get_writekey(),
- self.mdmf_node.get_writekey()))
- d.addCallback(lambda ignored:
- self.sdmf_node.get_best_mutable_version())
- d.addCallback(lambda bv:
- self.failUnlessEqual(bv.get_writekey(),
- self.sdmf_node.get_writekey()))
+ d = self.do_upload_mdmf()
+ def _then(ign):
+ mdmf_uri = self.mdmf_node.get_uri()
+ cap = uri.from_string(mdmf_uri)
+ self.failUnless(isinstance(cap, uri.WriteableMDMFFileURI))
+ readonly_mdmf_uri = self.mdmf_node.get_readonly_uri()
+ cap = uri.from_string(readonly_mdmf_uri)
+ self.failUnless(isinstance(cap, uri.ReadonlyMDMFFileURI))
+ d.addCallback(_then)
return d
-
- def test_get_storage_index(self):
- d = self.mdmf_node.get_best_mutable_version()
- d.addCallback(lambda bv:
- self.failUnlessEqual(bv.get_storage_index(),
- self.mdmf_node.get_storage_index()))
- d.addCallback(lambda ignored:
- self.sdmf_node.get_best_mutable_version())
- d.addCallback(lambda bv:
- self.failUnlessEqual(bv.get_storage_index(),
- self.sdmf_node.get_storage_index()))
+ def test_mutable_version(self):
+ # assert that getting parameters from the IMutableVersion object
+ # gives us the same data as getting them from the filenode itself
+ d = self.do_upload()
+ d.addCallback(lambda ign: self.mdmf_node.get_best_mutable_version())
+ def _check_mdmf(bv):
+ n = self.mdmf_node
+ self.failUnlessEqual(bv.get_writekey(), n.get_writekey())
+ self.failUnlessEqual(bv.get_storage_index(), n.get_storage_index())
+ self.failIf(bv.is_readonly())
+ d.addCallback(_check_mdmf)
+ d.addCallback(lambda ign: self.sdmf_node.get_best_mutable_version())
+ def _check_sdmf(bv):
+ n = self.sdmf_node
+ self.failUnlessEqual(bv.get_writekey(), n.get_writekey())
+ self.failUnlessEqual(bv.get_storage_index(), n.get_storage_index())
+ self.failIf(bv.is_readonly())
+ d.addCallback(_check_sdmf)
return d
def test_get_readonly_version(self):
- d = self.mdmf_node.get_best_readable_version()
- d.addCallback(lambda bv:
- self.failUnless(bv.is_readonly()))
- d.addCallback(lambda ignored:
- self.sdmf_node.get_best_readable_version())
- d.addCallback(lambda bv:
- self.failUnless(bv.is_readonly()))
- return d
+ d = self.do_upload()
+ d.addCallback(lambda ign: self.mdmf_node.get_best_readable_version())
+ d.addCallback(lambda bv: self.failUnless(bv.is_readonly()))
+ # Attempting to get a mutable version of a mutable file from a
+ # filenode initialized with a readcap should return a readonly
+ # version of that same node.
+ d.addCallback(lambda ign: self.mdmf_node.get_readonly())
+ d.addCallback(lambda ro: ro.get_best_mutable_version())
+ d.addCallback(lambda v: self.failUnless(v.is_readonly()))
- def test_get_mutable_version(self):
- d = self.mdmf_node.get_best_mutable_version()
- d.addCallback(lambda bv:
- self.failIf(bv.is_readonly()))
- d.addCallback(lambda ignored:
- self.sdmf_node.get_best_mutable_version())
- d.addCallback(lambda bv:
- self.failIf(bv.is_readonly()))
+ d.addCallback(lambda ign: self.sdmf_node.get_best_readable_version())
+ d.addCallback(lambda bv: self.failUnless(bv.is_readonly()))
+
+ d.addCallback(lambda ign: self.sdmf_node.get_readonly())
+ d.addCallback(lambda ro: ro.get_best_mutable_version())
+ d.addCallback(lambda v: self.failUnless(v.is_readonly()))
return d
def test_toplevel_overwrite(self):
new_data = MutableData("foo bar baz" * 100000)
new_small_data = MutableData("foo bar baz" * 10)
- d = self.mdmf_node.overwrite(new_data)
+ d = self.do_upload()
+ d.addCallback(lambda ign: self.mdmf_node.overwrite(new_data))
d.addCallback(lambda ignored:
self.mdmf_node.download_best_version())
d.addCallback(lambda data:
def test_toplevel_modify(self):
+ d = self.do_upload()
def modifier(old_contents, servermap, first_time):
return old_contents + "modified"
- d = self.mdmf_node.modify(modifier)
+ d.addCallback(lambda ign: self.mdmf_node.modify(modifier))
d.addCallback(lambda ignored:
self.mdmf_node.download_best_version())
d.addCallback(lambda data:
# TODO: When we can publish multiple versions, alter this test
# to modify a version other than the best usable version, then
# test to see that the best recoverable version is that.
+ d = self.do_upload()
def modifier(old_contents, servermap, first_time):
return old_contents + "modified"
- d = self.mdmf_node.modify(modifier)
+ d.addCallback(lambda ign: self.mdmf_node.modify(modifier))
d.addCallback(lambda ignored:
self.mdmf_node.download_best_version())
d.addCallback(lambda data:
def test_download_nonexistent_version(self):
- d = self.mdmf_node.get_servermap(mode=MODE_WRITE)
+ d = self.do_upload_mdmf()
+ d.addCallback(lambda ign: self.mdmf_node.get_servermap(mode=MODE_WRITE))
def _set_servermap(servermap):
self.servermap = servermap
d.addCallback(_set_servermap)
def test_partial_read(self):
# read only a few bytes at a time, and see that the results are
# what we expect.
- d = self.mdmf_node.get_best_readable_version()
+ d = self.do_upload_mdmf()
+ d.addCallback(lambda ign: self.mdmf_node.get_best_readable_version())
def _read_data(version):
c = consumer.MemoryConsumer()
d2 = defer.succeed(None)
return d
- def test_read(self):
- d = self.mdmf_node.get_best_readable_version()
+ def _test_partial_read(self, offset, length):
+ d = self.do_upload_mdmf()
+ d.addCallback(lambda ign: self.mdmf_node.get_best_readable_version())
+ c = consumer.MemoryConsumer()
+ d.addCallback(lambda version:
+ version.read(c, offset, length))
+ expected = self.data[offset:offset+length]
+ d.addCallback(lambda ignored: "".join(c.chunks))
+ def _check(results):
+ if results != expected:
+ print
+ print "got: %s ... %s" % (results[:20], results[-20:])
+ print "exp: %s ... %s" % (expected[:20], expected[-20:])
+ self.fail("results != expected")
+ d.addCallback(_check)
+ return d
+
+ def test_partial_read_starting_on_segment_boundary(self):
+ return self._test_partial_read(mathutil.next_multiple(128 * 1024, 3), 50)
+
+ def test_partial_read_ending_one_byte_after_segment_boundary(self):
+ return self._test_partial_read(mathutil.next_multiple(128 * 1024, 3)-50, 51)
+
+ def test_partial_read_zero_length_at_start(self):
+ return self._test_partial_read(0, 0)
+
+ def test_partial_read_zero_length_in_middle(self):
+ return self._test_partial_read(50, 0)
+
+ def test_partial_read_zero_length_at_segment_boundary(self):
+ return self._test_partial_read(mathutil.next_multiple(128 * 1024, 3), 0)
+
+ # XXX factor these into a single upload after they pass
+ _broken = "zero-length reads of mutable files don't work"
+ test_partial_read_zero_length_at_start.todo = _broken
+ test_partial_read_zero_length_in_middle.todo = _broken
+ test_partial_read_zero_length_at_segment_boundary.todo = _broken
+
+ def _test_read_and_download(self, node, expected):
+ d = node.get_best_readable_version()
def _read_data(version):
c = consumer.MemoryConsumer()
d2 = defer.succeed(None)
d2.addCallback(lambda ignored: version.read(c))
d2.addCallback(lambda ignored:
- self.failUnlessEqual("".join(c.chunks), self.data))
+ self.failUnlessEqual(expected, "".join(c.chunks)))
return d2
d.addCallback(_read_data)
+ d.addCallback(lambda ignored: node.download_best_version())
+ d.addCallback(lambda data: self.failUnlessEqual(expected, data))
return d
+ def test_read_and_download_mdmf(self):
+ d = self.do_upload_mdmf()
+ d.addCallback(self._test_read_and_download, self.data)
+ return d
- def test_download_best_version(self):
- d = self.mdmf_node.download_best_version()
- d.addCallback(lambda data:
- self.failUnlessEqual(data, self.data))
- d.addCallback(lambda ignored:
- self.sdmf_node.download_best_version())
- d.addCallback(lambda data:
- self.failUnlessEqual(data, self.small_data))
+ def test_read_and_download_sdmf(self):
+ d = self.do_upload_sdmf()
+ d.addCallback(self._test_read_and_download, self.small_data)
+ return d
+
+ def test_read_and_download_sdmf_zero_length(self):
+ d = self.do_upload_empty_sdmf()
+ d.addCallback(self._test_read_and_download, "")
return d
class Update(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin):
+ timeout = 400 # these tests are too big, 120s is not enough on slow
+ # platforms
def setUp(self):
GridTestMixin.setUp(self)
self.basedir = self.mktemp()
dl.addCallback(_stash)
return dl
- def test_append(self):
- # We should be able to append data to the middle of a mutable
- # file and get what we expect.
- new_data = self.data + "appended"
- for node in (self.mdmf_node, self.mdmf_max_shares_node):
- d = node.get_best_mutable_version()
- d.addCallback(lambda mv:
- mv.update(MutableData("appended"), len(self.data)))
- d.addCallback(lambda ignored, node=node:
- node.download_best_version())
- d.addCallback(lambda results:
- self.failUnlessEqual(results, new_data))
- return d
-
- def test_replace(self):
- # We should be able to replace data in the middle of a mutable
- # file and get what we expect back.
- new_data = self.data[:100]
- new_data += "appended"
- new_data += self.data[108:]
- for node in (self.mdmf_node, self.mdmf_max_shares_node):
- d = node.get_best_mutable_version()
- d.addCallback(lambda mv:
- mv.update(MutableData("appended"), 100))
- d.addCallback(lambda ignored, node=node:
- node.download_best_version())
- d.addCallback(lambda results:
- self.failUnlessEqual(results, new_data))
- return d
-
- def test_replace_beginning(self):
- # We should be able to replace data at the beginning of the file
- # without truncating the file
- B = "beginning"
- new_data = B + self.data[len(B):]
- for node in (self.mdmf_node, self.mdmf_max_shares_node):
- d = node.get_best_mutable_version()
- d.addCallback(lambda mv: mv.update(MutableData(B), 0))
- d.addCallback(lambda ignored, node=node:
- node.download_best_version())
- d.addCallback(lambda results: self.failUnlessEqual(results, new_data))
- return d
- def test_replace_segstart1(self):
- offset = 128*1024+1
- new_data = "NNNN"
- expected = self.data[:offset]+new_data+self.data[offset+4:]
+ def _test_replace(self, offset, new_data):
+ expected = self.data[:offset]+new_data+self.data[offset+len(new_data):]
for node in (self.mdmf_node, self.mdmf_max_shares_node):
d = node.get_best_mutable_version()
d.addCallback(lambda mv:
d.addCallback(_check)
return d
+ def test_append(self):
+ # We should be able to append data to a mutable file and get
+ # what we expect.
+ return self._test_replace(len(self.data), "appended")
+
+ def test_replace_middle(self):
+ # We should be able to replace data in the middle of a mutable
+ # file and get what we expect back.
+ return self._test_replace(100, "replaced")
+
+ def test_replace_beginning(self):
+ # We should be able to replace data at the beginning of the file
+ # without truncating the file
+ return self._test_replace(0, "beginning")
+
+ def test_replace_segstart1(self):
+ return self._test_replace(128*1024+1, "NNNN")
+
+ def test_replace_zero_length_beginning(self):
+ return self._test_replace(0, "")
+
+ def test_replace_zero_length_middle(self):
+ return self._test_replace(50, "")
+
+ def test_replace_zero_length_segstart1(self):
+ return self._test_replace(128*1024+1, "")
+
+ def test_replace_and_extend(self):
+ # We should be able to replace data in the middle of a mutable
+ # file and extend that mutable file and get what we expect.
+ return self._test_replace(100, "modified " * 100000)
+
+
def _check_differences(self, got, expected):
# displaying arbitrary file corruption is tricky for a
# 1MB file of repeating data,, so look for likely places
d.addCallback(self._check_differences, expected)
return d
- def test_replace_and_extend(self):
- # We should be able to replace data in the middle of a mutable
- # file and extend that mutable file and get what we expect.
- new_data = self.data[:100]
- new_data += "modified " * 100000
- for node in (self.mdmf_node, self.mdmf_max_shares_node):
- d = node.get_best_mutable_version()
- d.addCallback(lambda mv:
- mv.update(MutableData("modified " * 100000), 100))
- d.addCallback(lambda ignored, node=node:
- node.download_best_version())
- d.addCallback(lambda results:
- self.failUnlessEqual(results, new_data))
- return d
-
def test_append_power_of_two(self):
# If we attempt to extend a mutable file so that its segment
d.addCallback(lambda results:
self.failUnlessEqual(results, new_data))
return d
- test_append_power_of_two.timeout = 15
def test_update_sdmf(self):
d = n.download_best_version()
d.addCallback(self.failUnlessEqual, self.sdmf_old_contents)
return d
+
+class DifferentEncoding(unittest.TestCase):
+ def setUp(self):
+ self._storage = s = FakeStorage()
+ self.nodemaker = make_nodemaker(s)
+
+ def test_filenode(self):
+ # create a file with 3-of-20, then modify it with a client configured
+ # to do 3-of-10. #1510 tracks a failure here
+ self.nodemaker.default_encoding_parameters["n"] = 20
+ d = self.nodemaker.create_mutable_file("old contents")
+ def _created(n):
+ filecap = n.get_cap().to_string()
+ del n # we want a new object, not the cached one
+ self.nodemaker.default_encoding_parameters["n"] = 10
+ n2 = self.nodemaker.create_from_cap(filecap)
+ return n2
+ d.addCallback(_created)
+ def modifier(old_contents, servermap, first_time):
+ return "new contents"
+ d.addCallback(lambda n: n.modify(modifier))
+ return d