import os, random, struct
from zope.interface import implements
from twisted.internet import defer
-from twisted.internet.interfaces import IConsumer
+from twisted.internet.interfaces import IPullProducer
from twisted.python import failure
from twisted.application import service
from twisted.web.error import Error as WebError
from foolscap.api import flushEventualQueue, fireEventually
-from allmydata import uri, dirnode, client
+from allmydata import uri, client
from allmydata.introducer.server import IntroducerNode
-from allmydata.interfaces import IURI, IMutableFileNode, IFileNode, \
- FileTooLargeError, NotEnoughSharesError, ICheckable
+from allmydata.interfaces import IMutableFileNode, IImmutableFileNode,\
+ NotEnoughSharesError, ICheckable, \
+ IMutableUploadable, SDMF_VERSION, \
+ MDMF_VERSION
from allmydata.check_results import CheckResults, CheckAndRepairResults, \
DeepCheckResults, DeepCheckAndRepairResults
-from allmydata.mutable.common import CorruptShareError
+from allmydata.storage_client import StubServer
from allmydata.mutable.layout import unpack_header
-from allmydata.storage.server import storage_index_to_dir
+from allmydata.mutable.publish import MutableData
from allmydata.storage.mutable import MutableShareFile
from allmydata.util import hashutil, log, fileutil, pollmixin
from allmydata.util.assertutil import precondition
+from allmydata.util.consumer import download_to_data
from allmydata.stats import StatsGathererService
from allmydata.key_generator import KeyGeneratorService
-import common_util as testutil
+import allmydata.test.common_util as testutil
from allmydata import immutable
+TEST_RSA_KEY_SIZE = 522
def flush_but_dont_ignore(res):
d = flushEventualQueue()
d.addCallback(_done)
return d
+class DummyProducer:
+ implements(IPullProducer)
+ def resumeProducing(self):
+ pass
+
class FakeCHKFileNode:
- """I provide IFileNode, but all of my data is stored in a class-level
- dictionary."""
- implements(IFileNode)
- all_contents = {}
- bad_shares = {}
-
- def __init__(self, u, thisclient):
- precondition(IURI.providedBy(u), u)
- self.client = thisclient
- self.my_uri = u
- self.storage_index = u.storage_index
+ """I provide IImmutableFileNode, but all of my data is stored in a
+ class-level dictionary."""
+ implements(IImmutableFileNode)
+
+ def __init__(self, filecap, all_contents):
+ precondition(isinstance(filecap, (uri.CHKFileURI, uri.LiteralFileURI)), filecap)
+ self.all_contents = all_contents
+ self.my_uri = filecap
+ self.storage_index = self.my_uri.get_storage_index()
def get_uri(self):
return self.my_uri.to_string()
+ def get_write_uri(self):
+ return None
def get_readonly_uri(self):
return self.my_uri.to_string()
+ def get_cap(self):
+ return self.my_uri
def get_verify_cap(self):
return self.my_uri.get_verify_cap()
def get_repair_cap(self):
return self.storage_index
def check(self, monitor, verify=False, add_lease=False):
- r = CheckResults(self.my_uri, self.storage_index)
- is_bad = self.bad_shares.get(self.storage_index, None)
- data = {}
- data["count-shares-needed"] = 3
- data["count-shares-expected"] = 10
- data["count-good-share-hosts"] = 10
- data["count-wrong-shares"] = 0
- nodeid = "\x00"*20
- data["list-corrupt-shares"] = []
- data["sharemap"] = {1: [nodeid]}
- data["servers-responding"] = [nodeid]
- data["count-recoverable-versions"] = 1
- data["count-unrecoverable-versions"] = 0
- if is_bad:
- r.set_healthy(False)
- r.set_recoverable(True)
- data["count-shares-good"] = 9
- data["list-corrupt-shares"] = [(nodeid, self.storage_index, 0)]
- r.problems = failure.Failure(CorruptShareError(is_bad))
- else:
- r.set_healthy(True)
- r.set_recoverable(True)
- data["count-shares-good"] = 10
- r.problems = []
- r.set_data(data)
- r.set_needs_rebalancing(False)
+ s = StubServer("\x00"*20)
+ r = CheckResults(self.my_uri, self.storage_index,
+ healthy=True, recoverable=True,
+ needs_rebalancing=False,
+ count_shares_needed=3,
+ count_shares_expected=10,
+ count_shares_good=10,
+ count_good_share_hosts=10,
+ count_recoverable_versions=1,
+ count_unrecoverable_versions=0,
+ servers_responding=[s],
+ sharemap={1: [s]},
+ count_wrong_shares=0,
+ list_corrupt_shares=[],
+ count_corrupt_shares=0,
+ list_incompatible_shares=[],
+ count_incompatible_shares=0,
+ summary="",
+ report=[],
+ share_problems=[],
+ servermap=None)
return defer.succeed(r)
def check_and_repair(self, monitor, verify=False, add_lease=False):
d = self.check(verify)
return False
def is_readonly(self):
return True
+ def is_unknown(self):
+ return False
+ def is_allowed_in_immutable_directory(self):
+ return True
+ def raise_error(self):
+ pass
- def download(self, target):
- if self.my_uri.to_string() not in self.all_contents:
- f = failure.Failure(NotEnoughSharesError(None, 0, 3))
- target.fail(f)
- return defer.fail(f)
- data = self.all_contents[self.my_uri.to_string()]
- target.open(len(data))
- target.write(data)
- target.close()
- return defer.maybeDeferred(target.finish)
- def download_to_data(self):
- if self.my_uri.to_string() not in self.all_contents:
- return defer.fail(NotEnoughSharesError(None, 0, 3))
- data = self.all_contents[self.my_uri.to_string()]
- return defer.succeed(data)
def get_size(self):
+ if isinstance(self.my_uri, uri.LiteralFileURI):
+ return self.my_uri.get_size()
try:
data = self.all_contents[self.my_uri.to_string()]
except KeyError, le:
raise NotEnoughSharesError(le, 0, 3)
return len(data)
+ def get_current_size(self):
+ return defer.succeed(self.get_size())
+
def read(self, consumer, offset=0, size=None):
- d = self.download_to_data()
- def _got(data):
- start = offset
- if size is not None:
- end = offset + size
- else:
- end = len(data)
- consumer.write(data[start:end])
- return consumer
- d.addCallback(_got)
+ # we don't bother to call registerProducer/unregisterProducer,
+ # because it's a hassle to write a dummy Producer that does the right
+ # thing (we have to make sure that DummyProducer.resumeProducing
+ # writes the data into the consumer immediately, otherwise it will
+ # loop forever).
+
+ d = defer.succeed(None)
+ d.addCallback(self._read, consumer, offset, size)
return d
-def make_chk_file_uri(size):
+ def _read(self, ignored, consumer, offset, size):
+ if isinstance(self.my_uri, uri.LiteralFileURI):
+ data = self.my_uri.data
+ else:
+ if self.my_uri.to_string() not in self.all_contents:
+ raise NotEnoughSharesError(None, 0, 3)
+ data = self.all_contents[self.my_uri.to_string()]
+ start = offset
+ if size is not None:
+ end = offset + size
+ else:
+ end = len(data)
+ consumer.write(data[start:end])
+ return consumer
+
+
+ def get_best_readable_version(self):
+ return defer.succeed(self)
+
+
+ def download_to_data(self):
+ return download_to_data(self)
+
+
+ download_best_version = download_to_data
+
+
+ def get_size_of_best_version(self):
+ return defer.succeed(self.get_size)
+
+
+def make_chk_file_cap(size):
return uri.CHKFileURI(key=os.urandom(16),
uri_extension_hash=os.urandom(32),
needed_shares=3,
total_shares=10,
size=size)
+def make_chk_file_uri(size):
+ return make_chk_file_cap(size).to_string()
-def create_chk_filenode(thisclient, contents):
- u = make_chk_file_uri(len(contents))
- n = FakeCHKFileNode(u, thisclient)
- FakeCHKFileNode.all_contents[u.to_string()] = contents
+def create_chk_filenode(contents, all_contents):
+ filecap = make_chk_file_cap(len(contents))
+ n = FakeCHKFileNode(filecap, all_contents)
+ all_contents[filecap.to_string()] = contents
return n
implements(IMutableFileNode, ICheckable)
MUTABLE_SIZELIMIT = 10000
- all_contents = {}
- bad_shares = {}
-
- def __init__(self, thisclient):
- self.client = thisclient
- self.my_uri = make_mutable_file_uri()
- self.storage_index = self.my_uri.storage_index
- def create(self, initial_contents, key_generator=None):
- if len(initial_contents) > self.MUTABLE_SIZELIMIT:
- raise FileTooLargeError("SDMF is limited to one segment, and "
- "%d > %d" % (len(initial_contents),
- self.MUTABLE_SIZELIMIT))
- self.all_contents[self.storage_index] = initial_contents
+
+ def __init__(self, storage_broker, secret_holder,
+ default_encoding_parameters, history, all_contents):
+ self.all_contents = all_contents
+ self.file_types = {} # storage index => MDMF_VERSION or SDMF_VERSION
+ self.init_from_cap(make_mutable_file_cap())
+ self._k = default_encoding_parameters['k']
+ self._segsize = default_encoding_parameters['max_segment_size']
+ def create(self, contents, key_generator=None, keysize=None,
+ version=SDMF_VERSION):
+ if version == MDMF_VERSION and \
+ isinstance(self.my_uri, (uri.ReadonlySSKFileURI,
+ uri.WriteableSSKFileURI)):
+ self.init_from_cap(make_mdmf_mutable_file_cap())
+ self.file_types[self.storage_index] = version
+ initial_contents = self._get_initial_contents(contents)
+ data = initial_contents.read(initial_contents.get_size())
+ data = "".join(data)
+ self.all_contents[self.storage_index] = data
return defer.succeed(self)
- def init_from_uri(self, myuri):
- self.my_uri = IURI(myuri)
- self.storage_index = self.my_uri.storage_index
+ def _get_initial_contents(self, contents):
+ if contents is None:
+ return MutableData("")
+
+ if IMutableUploadable.providedBy(contents):
+ return contents
+
+ assert callable(contents), "%s should be callable, not %s" % \
+ (contents, type(contents))
+ return contents(self)
+ def init_from_cap(self, filecap):
+ assert isinstance(filecap, (uri.WriteableSSKFileURI,
+ uri.ReadonlySSKFileURI,
+ uri.WriteableMDMFFileURI,
+ uri.ReadonlyMDMFFileURI))
+ self.my_uri = filecap
+ self.storage_index = self.my_uri.get_storage_index()
+ if isinstance(filecap, (uri.WriteableMDMFFileURI,
+ uri.ReadonlyMDMFFileURI)):
+ self.file_types[self.storage_index] = MDMF_VERSION
+
+ else:
+ self.file_types[self.storage_index] = SDMF_VERSION
+
return self
+ def get_cap(self):
+ return self.my_uri
+ def get_readcap(self):
+ return self.my_uri.get_readonly()
def get_uri(self):
return self.my_uri.to_string()
+ def get_write_uri(self):
+ if self.is_readonly():
+ return None
+ return self.my_uri.to_string()
def get_readonly(self):
return self.my_uri.get_readonly()
def get_readonly_uri(self):
return self.my_uri.get_readonly().to_string()
def get_verify_cap(self):
return self.my_uri.get_verify_cap()
+ def get_repair_cap(self):
+ if self.my_uri.is_readonly():
+ return None
+ return self.my_uri
def is_readonly(self):
return self.my_uri.is_readonly()
def is_mutable(self):
return self.my_uri.is_mutable()
+ def is_unknown(self):
+ return False
+ def is_allowed_in_immutable_directory(self):
+ return not self.my_uri.is_mutable()
+ def raise_error(self):
+ pass
def get_writekey(self):
return "\x00"*16
def get_size(self):
- return "?" # TODO: see mutable.MutableFileNode.get_size
+ return len(self.all_contents[self.storage_index])
+ def get_current_size(self):
+ return self.get_size_of_best_version()
def get_size_of_best_version(self):
return defer.succeed(len(self.all_contents[self.storage_index]))
def get_storage_index(self):
return self.storage_index
+ def get_servermap(self, mode):
+ return defer.succeed(None)
+
+ def get_version(self):
+ assert self.storage_index in self.file_types
+ return self.file_types[self.storage_index]
+
def check(self, monitor, verify=False, add_lease=False):
- r = CheckResults(self.my_uri, self.storage_index)
- is_bad = self.bad_shares.get(self.storage_index, None)
- data = {}
- data["count-shares-needed"] = 3
- data["count-shares-expected"] = 10
- data["count-good-share-hosts"] = 10
- data["count-wrong-shares"] = 0
- data["list-corrupt-shares"] = []
- nodeid = "\x00"*20
- data["sharemap"] = {"seq1-abcd-sh0": [nodeid]}
- data["servers-responding"] = [nodeid]
- data["count-recoverable-versions"] = 1
- data["count-unrecoverable-versions"] = 0
- if is_bad:
- r.set_healthy(False)
- r.set_recoverable(True)
- data["count-shares-good"] = 9
- r.problems = failure.Failure(CorruptShareError("peerid",
- 0, # shnum
- is_bad))
- else:
- r.set_healthy(True)
- r.set_recoverable(True)
- data["count-shares-good"] = 10
- r.problems = []
- r.set_data(data)
- r.set_needs_rebalancing(False)
+ s = StubServer("\x00"*20)
+ r = CheckResults(self.my_uri, self.storage_index,
+ healthy=True, recoverable=True,
+ needs_rebalancing=False,
+ count_shares_needed=3,
+ count_shares_expected=10,
+ count_shares_good=10,
+ count_good_share_hosts=10,
+ count_recoverable_versions=1,
+ count_unrecoverable_versions=0,
+ servers_responding=[s],
+ sharemap={"seq1-abcd-sh0": [s]},
+ count_wrong_shares=0,
+ list_corrupt_shares=[],
+ count_corrupt_shares=0,
+ list_incompatible_shares=[],
+ count_incompatible_shares=0,
+ summary="",
+ report=[],
+ share_problems=[],
+ servermap=None)
return defer.succeed(r)
def check_and_repair(self, monitor, verify=False, add_lease=False):
return d
def download_best_version(self):
- return defer.succeed(self.all_contents[self.storage_index])
+ return defer.succeed(self._download_best_version())
+
+
+ def _download_best_version(self, ignored=None):
+ if isinstance(self.my_uri, uri.LiteralFileURI):
+ return self.my_uri.data
+ if self.storage_index not in self.all_contents:
+ raise NotEnoughSharesError(None, 0, 3)
+ return self.all_contents[self.storage_index]
+
+
def overwrite(self, new_contents):
- if len(new_contents) > self.MUTABLE_SIZELIMIT:
- raise FileTooLargeError("SDMF is limited to one segment, and "
- "%d > %d" % (len(new_contents),
- self.MUTABLE_SIZELIMIT))
assert not self.is_readonly()
- self.all_contents[self.storage_index] = new_contents
+ new_data = new_contents.read(new_contents.get_size())
+ new_data = "".join(new_data)
+ self.all_contents[self.storage_index] = new_data
return defer.succeed(None)
def modify(self, modifier):
# this does not implement FileTooLargeError, but the real one does
def _modify(self, modifier):
assert not self.is_readonly()
old_contents = self.all_contents[self.storage_index]
- self.all_contents[self.storage_index] = modifier(old_contents, None, True)
+ new_data = modifier(old_contents, None, True)
+ self.all_contents[self.storage_index] = new_data
return None
- def download(self, target):
- if self.storage_index not in self.all_contents:
- f = failure.Failure(NotEnoughSharesError(None, 0, 3))
- target.fail(f)
- return defer.fail(f)
- data = self.all_contents[self.storage_index]
- target.open(len(data))
- target.write(data)
- target.close()
- return defer.maybeDeferred(target.finish)
- def download_to_data(self):
- if self.storage_index not in self.all_contents:
- return defer.fail(NotEnoughSharesError(None, 0, 3))
- data = self.all_contents[self.storage_index]
- return defer.succeed(data)
+ # As actually implemented, MutableFilenode and MutableFileVersion
+ # are distinct. However, nothing in the webapi uses (yet) that
+ # distinction -- it just uses the unified download interface
+ # provided by get_best_readable_version and read. When we start
+ # doing cooler things like LDMF, we will want to revise this code to
+ # be less simplistic.
+ def get_best_readable_version(self):
+ return defer.succeed(self)
+
-def make_mutable_file_uri():
+ def get_best_mutable_version(self):
+ return defer.succeed(self)
+
+ # Ditto for this, which is an implementation of IWriteable.
+ # XXX: Declare that the same is implemented.
+ def update(self, data, offset):
+ assert not self.is_readonly()
+ def modifier(old, servermap, first_time):
+ new = old[:offset] + "".join(data.read(data.get_size()))
+ new += old[len(new):]
+ return new
+ return self.modify(modifier)
+
+
+ def read(self, consumer, offset=0, size=None):
+ data = self._download_best_version()
+ if size:
+ data = data[offset:offset+size]
+ consumer.write(data)
+ return defer.succeed(consumer)
+
+
+def make_mutable_file_cap():
return uri.WriteableSSKFileURI(writekey=os.urandom(16),
fingerprint=os.urandom(32))
+
+def make_mdmf_mutable_file_cap():
+ return uri.WriteableMDMFFileURI(writekey=os.urandom(16),
+ fingerprint=os.urandom(32))
+
+def make_mutable_file_uri(mdmf=False):
+ if mdmf:
+ uri = make_mdmf_mutable_file_cap()
+ else:
+ uri = make_mutable_file_cap()
+
+ return uri.to_string()
+
def make_verifier_uri():
return uri.SSKVerifierURI(storage_index=os.urandom(16),
- fingerprint=os.urandom(32))
+ fingerprint=os.urandom(32)).to_string()
+
+def create_mutable_filenode(contents, mdmf=False, all_contents=None):
+ # XXX: All of these arguments are kind of stupid.
+ if mdmf:
+ cap = make_mdmf_mutable_file_cap()
+ else:
+ cap = make_mutable_file_cap()
+
+ encoding_params = {}
+ encoding_params['k'] = 3
+ encoding_params['max_segment_size'] = 128*1024
+
+ filenode = FakeMutableFileNode(None, None, encoding_params, None,
+ all_contents)
+ filenode.init_from_cap(cap)
+ if mdmf:
+ filenode.create(MutableData(contents), version=MDMF_VERSION)
+ else:
+ filenode.create(MutableData(contents), version=SDMF_VERSION)
+ return filenode
-class FakeDirectoryNode(dirnode.NewDirectoryNode):
- """This offers IDirectoryNode, but uses a FakeMutableFileNode for the
- backing store, so it doesn't go to the grid. The child data is still
- encrypted and serialized, so this isn't useful for tests that want to
- look inside the dirnodes and check their contents.
- """
- filenode_class = FakeMutableFileNode
class LoggingServiceParent(service.MultiService):
def log(self, *args, **kwargs):
return log.msg(*args, **kwargs)
-
class SystemTestMixin(pollmixin.PollMixin, testutil.StallMixin):
# SystemTestMixin tests tend to be a lot of work, and we have a few
iv_dir = self.getdir("introducer")
if not os.path.isdir(iv_dir):
fileutil.make_dirs(iv_dir)
- f = open(os.path.join(iv_dir, "webport"), "w")
- f.write("tcp:0:interface=127.0.0.1\n")
- f.close()
+ fileutil.write(os.path.join(iv_dir, 'tahoe.cfg'),
+ "[node]\n" +
+ u"nickname = introducer \u263A\n".encode('utf-8') +
+ "web.port = tcp:0:interface=127.0.0.1\n")
if SYSTEM_TEST_CERTS:
os.mkdir(os.path.join(iv_dir, "private"))
f = open(os.path.join(iv_dir, "private", "node.pem"), "w")
kgsdir = self.getdir("key_generator")
fileutil.make_dirs(kgsdir)
- self.key_generator_svc = KeyGeneratorService(kgsdir, display_furl=False)
+ self.key_generator_svc = KeyGeneratorService(kgsdir,
+ display_furl=False,
+ default_key_size=TEST_RSA_KEY_SIZE)
self.key_generator_svc.key_generator.pool_size = 4
self.key_generator_svc.key_generator.pool_refresh_delay = 60
self.add_service(self.key_generator_svc)
f.write(SYSTEM_TEST_CERTS[i+1])
f.close()
- def write(name, value):
- open(os.path.join(basedir, name), "w").write(value+"\n")
+ config = "[client]\n"
+ config += "introducer.furl = %s\n" % self.introducer_furl
+ if self.stats_gatherer_furl:
+ config += "stats_gatherer.furl = %s\n" % self.stats_gatherer_furl
+
+ nodeconfig = "[node]\n"
+ nodeconfig += (u"nickname = client %d \u263A\n" % (i,)).encode('utf-8')
+
if i == 0:
# clients[0] runs a webserver and a helper, no key_generator
- write("webport", "tcp:0:interface=127.0.0.1")
- write("run_helper", "yes")
- write("keepalive_timeout", "600")
- if i == 3:
+ config += nodeconfig
+ config += "web.port = tcp:0:interface=127.0.0.1\n"
+ config += "timeout.keepalive = 600\n"
+ config += "[helper]\n"
+ config += "enabled = True\n"
+ elif i == 3:
# clients[3] runs a webserver and uses a helper, uses
# key_generator
- write("webport", "tcp:0:interface=127.0.0.1")
- write("disconnect_timeout", "1800")
if self.key_generator_furl:
- kgf = "%s\n" % (self.key_generator_furl,)
- write("key_generator.furl", kgf)
- write("introducer.furl", self.introducer_furl)
- if self.stats_gatherer_furl:
- write("stats_gatherer.furl", self.stats_gatherer_furl)
+ config += "key_generator.furl = %s\n" % self.key_generator_furl
+ config += nodeconfig
+ config += "web.port = tcp:0:interface=127.0.0.1\n"
+ config += "timeout.disconnect = 1800\n"
+ else:
+ config += nodeconfig
- # give subclasses a chance to append liens to the node's tahoe.cfg
+ fileutil.write(os.path.join(basedir, 'tahoe.cfg'), config)
+
+ # give subclasses a chance to append lines to the node's tahoe.cfg
# files before they are launched.
self._set_up_nodes_extra_config()
# will have registered the helper furl).
c = self.add_service(client.Client(basedir=basedirs[0]))
self.clients.append(c)
+ c.set_default_mutable_keysize(TEST_RSA_KEY_SIZE)
d = c.when_tub_ready()
def _ready(res):
f = open(os.path.join(basedirs[0],"private","helper.furl"), "r")
f.close()
self.helper_furl = helper_furl
if self.numclients >= 4:
- f = open(os.path.join(basedirs[3],"helper.furl"), "w")
- f.write(helper_furl)
+ f = open(os.path.join(basedirs[3], 'tahoe.cfg'), 'ab+')
+ f.write(
+ "[client]\n"
+ "helper.furl = %s\n" % helper_furl)
f.close()
# this starts the rest of the clients
for i in range(1, self.numclients):
c = self.add_service(client.Client(basedir=basedirs[i]))
self.clients.append(c)
+ c.set_default_mutable_keysize(TEST_RSA_KEY_SIZE)
log.msg("STARTING")
return self.wait_for_connections()
d.addCallback(_ready)
def _connected(res):
log.msg("CONNECTED")
# now find out where the web port was
- l = self.clients[0].getServiceNamed("webish").listener
- port = l._port.getHost().port
- self.webish_url = "http://localhost:%d/" % port
+ self.webish_url = self.clients[0].getServiceNamed("webish").getURL()
if self.numclients >=4:
# and the helper-using webport
- l = self.clients[3].getServiceNamed("webish").listener
- port = l._port.getHost().port
- self.helper_webish_url = "http://localhost:%d/" % port
+ self.helper_webish_url = self.clients[3].getServiceNamed("webish").getURL()
d.addCallback(_connected)
return d
def _stopped(res):
new_c = client.Client(basedir=self.getdir("client%d" % num))
self.clients[num] = new_c
+ new_c.set_default_mutable_keysize(TEST_RSA_KEY_SIZE)
self.add_service(new_c)
return new_c.when_tub_ready()
d.addCallback(_stopped)
def _maybe_get_webport(res):
if num == 0:
# now find out where the web port was
- l = self.clients[0].getServiceNamed("webish").listener
- port = l._port.getHost().port
- self.webish_url = "http://localhost:%d/" % port
+ self.webish_url = self.clients[0].getServiceNamed("webish").getURL()
d.addCallback(_maybe_get_webport)
return d
basedir = self.getdir("client%d" % client_num)
if not os.path.isdir(basedir):
fileutil.make_dirs(basedir)
- open(os.path.join(basedir, "introducer.furl"), "w").write(self.introducer_furl)
+ config = "[client]\n"
+ config += "introducer.furl = %s\n" % self.introducer_furl
if helper_furl:
- f = open(os.path.join(basedir, "helper.furl") ,"w")
- f.write(helper_furl+"\n")
- f.close()
+ config += "helper.furl = %s\n" % helper_furl
+ fileutil.write(os.path.join(basedir, 'tahoe.cfg'), config)
c = client.Client(basedir=basedir)
self.clients.append(c)
+ c.set_default_mutable_keysize(TEST_RSA_KEY_SIZE)
self.numclients += 1
if add_to_sparent:
c.setServiceParent(self.sparent)
def _check_connections(self):
for c in self.clients:
- ic = c.introducer_client
- if not ic.connected_to_introducer():
+ if not c.connected_to_introducer():
return False
- if len(ic.get_all_peerids()) != self.numclients:
+ sb = c.get_storage_broker()
+ if len(sb.get_connected_servers()) != self.numclients:
+ return False
+ up = c.getServiceNamed("uploader")
+ if up._helper_furl and not up._helper:
return False
return True
def wait_for_connections(self, ignored=None):
- # TODO: replace this with something that takes a list of peerids and
- # fires when they've all been heard from, instead of using a count
- # and a threshold
return self.poll(self._check_connections, timeout=200)
TEST_DATA="\x02"*(immutable.upload.Uploader.URI_LIT_SIZE_THRESHOLD+1)
-class ShareManglingMixin(SystemTestMixin):
-
- def setUp(self):
- # Set self.basedir to a temp dir which has the name of the current
- # test method in its name.
- self.basedir = self.mktemp()
-
- d = defer.maybeDeferred(SystemTestMixin.setUp, self)
- d.addCallback(lambda x: self.set_up_nodes())
-
- def _upload_a_file(ignored):
- cl0 = self.clients[0]
- # We need multiple segments to test crypttext hash trees that are
- # non-trivial (i.e. they have more than just one hash in them).
- cl0.DEFAULT_ENCODING_PARAMETERS['max_segment_size'] = 12
- d2 = cl0.upload(immutable.upload.Data(TEST_DATA, convergence=""))
- def _after_upload(u):
- self.uri = IURI(u.uri)
- return cl0.create_node_from_uri(self.uri)
- d2.addCallback(_after_upload)
- return d2
- d.addCallback(_upload_a_file)
-
- def _stash_it(filenode):
- self.filenode = filenode
- d.addCallback(_stash_it)
- return d
-
- def find_shares(self, unused=None):
- """Locate shares on disk. Returns a dict that maps
- (clientnum,sharenum) to a string that contains the share container
- (copied directly from the disk, containing leases etc). You can
- modify this dict and then call replace_shares() to modify the shares.
- """
- shares = {} # k: (i, sharenum), v: data
-
- for i, c in enumerate(self.clients):
- sharedir = c.getServiceNamed("storage").sharedir
- for (dirp, dirns, fns) in os.walk(sharedir):
- for fn in fns:
- try:
- sharenum = int(fn)
- except TypeError:
- # Whoops, I guess that's not a share file then.
- pass
- else:
- data = open(os.path.join(sharedir, dirp, fn), "rb").read()
- shares[(i, sharenum)] = data
-
- return shares
-
- def replace_shares(self, newshares, storage_index):
- """Replace shares on disk. Takes a dictionary in the same form
- as find_shares() returns."""
-
- for i, c in enumerate(self.clients):
- sharedir = c.getServiceNamed("storage").sharedir
- for (dirp, dirns, fns) in os.walk(sharedir):
- for fn in fns:
- try:
- sharenum = int(fn)
- except TypeError:
- # Whoops, I guess that's not a share file then.
- pass
- else:
- pathtosharefile = os.path.join(sharedir, dirp, fn)
- os.unlink(pathtosharefile)
- for ((clientnum, sharenum), newdata) in newshares.iteritems():
- if clientnum == i:
- fullsharedirp=os.path.join(sharedir, storage_index_to_dir(storage_index))
- fileutil.make_dirs(fullsharedirp)
- wf = open(os.path.join(fullsharedirp, str(sharenum)), "wb")
- wf.write(newdata)
- wf.close()
-
- def _delete_a_share(self, unused=None, sharenum=None):
- """ Delete one share. """
-
- shares = self.find_shares()
- ks = shares.keys()
- if sharenum is not None:
- k = [ key for key in shares.keys() if key[1] == sharenum ][0]
- else:
- k = random.choice(ks)
- del shares[k]
- self.replace_shares(shares, storage_index=self.uri.storage_index)
-
- return unused
-
- def _corrupt_a_share(self, unused, corruptor_func, sharenum):
- shares = self.find_shares()
- ks = [ key for key in shares.keys() if key[1] == sharenum ]
- assert ks, (shares.keys(), sharenum)
- k = ks[0]
- shares[k] = corruptor_func(shares[k])
- self.replace_shares(shares, storage_index=self.uri.storage_index)
- return corruptor_func
-
- def _corrupt_all_shares(self, unused, corruptor_func):
- """ All shares on disk will be corrupted by corruptor_func. """
- shares = self.find_shares()
- for k in shares.keys():
- self._corrupt_a_share(unused, corruptor_func, k[1])
- return corruptor_func
-
- def _corrupt_a_random_share(self, unused, corruptor_func):
- """ Exactly one share on disk will be corrupted by corruptor_func. """
- shares = self.find_shares()
- ks = shares.keys()
- k = random.choice(ks)
- self._corrupt_a_share(unused, corruptor_func, k[1])
- return k[1]
-
- def _count_reads(self):
- sum_of_read_counts = 0
- for thisclient in self.clients:
- counters = thisclient.stats_provider.get_stats()['counters']
- sum_of_read_counts += counters.get('storage_server.read', 0)
- return sum_of_read_counts
-
- def _count_allocates(self):
- sum_of_allocate_counts = 0
- for thisclient in self.clients:
- counters = thisclient.stats_provider.get_stats()['counters']
- sum_of_allocate_counts += counters.get('storage_server.allocate', 0)
- return sum_of_allocate_counts
-
- def _count_writes(self):
- sum_of_write_counts = 0
- for thisclient in self.clients:
- counters = thisclient.stats_provider.get_stats()['counters']
- sum_of_write_counts += counters.get('storage_server.write', 0)
- return sum_of_write_counts
-
- def _download_and_check_plaintext(self, unused=None):
- self.downloader = self.clients[1].getServiceNamed("downloader")
- d = self.downloader.download_to_data(self.uri)
-
- def _after_download(result):
- self.failUnlessEqual(result, TEST_DATA)
- d.addCallback(_after_download)
- return d
-
class ShouldFailMixin:
def shouldFail(self, expected_failure, which, substring,
callable, *args, **kwargs):
error message, if any, because Deferred chains frequently make it
difficult to tell which assertion was tripped.
- The substring= argument, if not None, must appear inside the
- stringified Failure, or the test will fail.
+ The substring= argument, if not None, must appear in the 'repr'
+ of the message wrapped by this Failure, or the test will fail.
"""
assert substring is None or isinstance(substring, str)
if isinstance(res, failure.Failure):
res.trap(expected_failure)
if substring:
- self.failUnless(substring in str(res),
- "substring '%s' not in '%s'"
- % (substring, str(res)))
+ message = repr(res.value.args[0])
+ self.failUnless(substring in message,
+ "%s: substring '%s' not in '%s'"
+ % (which, substring, message))
else:
self.fail("%s was supposed to raise %s, not get '%s'" %
(which, expected_failure, res))
assert callable
def _validate(f):
if code is not None:
- self.failUnlessEqual(f.value.status, str(code))
+ self.failUnlessEqual(f.value.status, str(code), which)
if substring:
code_string = str(f)
self.failUnless(substring in code_string,
- "substring '%s' not in '%s'"
- % (substring, code_string))
+ "%s: substring '%s' not in '%s'"
+ % (which, substring, code_string))
response_body = f.value.response
if response_substring:
self.failUnless(response_substring in response_body,
- "response substring '%s' not in '%s'"
- % (response_substring, response_body))
+ "%s: response substring '%s' not in '%s'"
+ % (which, response_substring, response_body))
return response_body
d = defer.maybeDeferred(callable, *args, **kwargs)
d.addBoth(self._shouldHTTPError, which, _validate)
print "First Error:", f.value.subFailure
return f
-class MemoryConsumer:
- implements(IConsumer)
- def __init__(self):
- self.chunks = []
- self.done = False
- def registerProducer(self, p, streaming):
- if streaming:
- # call resumeProducing once to start things off
- p.resumeProducing()
- else:
- while not self.done:
- p.resumeProducing()
- def write(self, data):
- self.chunks.append(data)
- def unregisterProducer(self):
- self.done = True
-
-def download_to_data(n, offset=0, size=None):
- d = n.read(MemoryConsumer(), offset, size)
- d.addCallback(lambda mc: "".join(mc.chunks))
- return d
-
def corrupt_field(data, offset, size, debug=False):
if random.random() < 0.5:
newdata = testutil.flip_one_bit(data, offset, size)
log.msg("testing: corrupting offset %d, size %d randomizing field, orig: %r, newval: %r" % (offset, size, data[offset:offset+size], newval))
return data[:offset]+newval+data[offset+size:]
-def _corrupt_nothing(data):
- """ Leave the data pristine. """
+def _corrupt_nothing(data, debug=False):
+ """Leave the data pristine. """
return data
-def _corrupt_file_version_number(data):
- """ Scramble the file data -- the share file version number have one bit flipped or else
- will be changed to a random value."""
+def _corrupt_file_version_number(data, debug=False):
+ """Scramble the file data -- the share file version number have one bit
+ flipped or else will be changed to a random value."""
return corrupt_field(data, 0x00, 4)
-def _corrupt_size_of_file_data(data):
- """ Scramble the file data -- the field showing the size of the share data within the file
- will be set to one smaller. """
+def _corrupt_size_of_file_data(data, debug=False):
+ """Scramble the file data -- the field showing the size of the share data
+ within the file will be set to one smaller."""
return corrupt_field(data, 0x04, 4)
-def _corrupt_sharedata_version_number(data):
- """ Scramble the file data -- the share data version number will have one bit flipped or
- else will be changed to a random value, but not 1 or 2."""
+def _corrupt_sharedata_version_number(data, debug=False):
+ """Scramble the file data -- the share data version number will have one
+ bit flipped or else will be changed to a random value, but not 1 or 2."""
return corrupt_field(data, 0x0c, 4)
sharevernum = struct.unpack(">L", data[0x0c:0x0c+4])[0]
assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways."
newsharevernumbytes = struct.pack(">L", newsharevernum)
return data[:0x0c] + newsharevernumbytes + data[0x0c+4:]
-def _corrupt_sharedata_version_number_to_plausible_version(data):
- """ Scramble the file data -- the share data version number will
- be changed to 2 if it is 1 or else to 1 if it is 2."""
+def _corrupt_sharedata_version_number_to_plausible_version(data, debug=False):
+ """Scramble the file data -- the share data version number will be
+ changed to 2 if it is 1 or else to 1 if it is 2."""
sharevernum = struct.unpack(">L", data[0x0c:0x0c+4])[0]
assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways."
if sharevernum == 1:
newsharevernumbytes = struct.pack(">L", newsharevernum)
return data[:0x0c] + newsharevernumbytes + data[0x0c+4:]
-def _corrupt_segment_size(data):
- """ Scramble the file data -- the field showing the size of the segment will have one
- bit flipped or else be changed to a random value. """
+def _corrupt_segment_size(data, debug=False):
+ """Scramble the file data -- the field showing the size of the segment
+ will have one bit flipped or else be changed to a random value."""
sharevernum = struct.unpack(">L", data[0x0c:0x0c+4])[0]
assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways."
if sharevernum == 1:
else:
return corrupt_field(data, 0x0c+0x04, 8, debug=False)
-def _corrupt_size_of_sharedata(data):
- """ Scramble the file data -- the field showing the size of the data within the share
- data will have one bit flipped or else will be changed to a random value. """
+def _corrupt_size_of_sharedata(data, debug=False):
+ """Scramble the file data -- the field showing the size of the data
+ within the share data will have one bit flipped or else will be changed
+ to a random value."""
sharevernum = struct.unpack(">L", data[0x0c:0x0c+4])[0]
assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways."
if sharevernum == 1:
else:
return corrupt_field(data, 0x0c+0x0c, 8)
-def _corrupt_offset_of_sharedata(data):
- """ Scramble the file data -- the field showing the offset of the data within the share
- data will have one bit flipped or else be changed to a random value. """
+def _corrupt_offset_of_sharedata(data, debug=False):
+ """Scramble the file data -- the field showing the offset of the data
+ within the share data will have one bit flipped or else be changed to a
+ random value."""
sharevernum = struct.unpack(">L", data[0x0c:0x0c+4])[0]
assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways."
if sharevernum == 1:
else:
return corrupt_field(data, 0x0c+0x14, 8)
-def _corrupt_offset_of_ciphertext_hash_tree(data):
- """ Scramble the file data -- the field showing the offset of the ciphertext hash tree
- within the share data will have one bit flipped or else be changed to a random value.
+def _corrupt_offset_of_ciphertext_hash_tree(data, debug=False):
+ """Scramble the file data -- the field showing the offset of the
+ ciphertext hash tree within the share data will have one bit flipped or
+ else be changed to a random value.
"""
sharevernum = struct.unpack(">L", data[0x0c:0x0c+4])[0]
assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways."
else:
return corrupt_field(data, 0x0c+0x24, 8, debug=False)
-def _corrupt_offset_of_block_hashes(data):
- """ Scramble the file data -- the field showing the offset of the block hash tree within
- the share data will have one bit flipped or else will be changed to a random value. """
+def _corrupt_offset_of_block_hashes(data, debug=False):
+ """Scramble the file data -- the field showing the offset of the block
+ hash tree within the share data will have one bit flipped or else will be
+ changed to a random value."""
sharevernum = struct.unpack(">L", data[0x0c:0x0c+4])[0]
assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways."
if sharevernum == 1:
else:
return corrupt_field(data, 0x0c+0x2c, 8)
-def _corrupt_offset_of_block_hashes_to_truncate_crypttext_hashes(data):
- """ Scramble the file data -- the field showing the offset of the block hash tree within the
- share data will have a multiple of hash size subtracted from it, thus causing the downloader
- to download an incomplete crypttext hash tree."""
+def _corrupt_offset_of_block_hashes_to_truncate_crypttext_hashes(data, debug=False):
+ """Scramble the file data -- the field showing the offset of the block
+ hash tree within the share data will have a multiple of hash size
+ subtracted from it, thus causing the downloader to download an incomplete
+ crypttext hash tree."""
sharevernum = struct.unpack(">L", data[0x0c:0x0c+4])[0]
assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways."
if sharevernum == 1:
newvalstr = struct.pack(">Q", newval)
return data[:0x0c+0x2c]+newvalstr+data[0x0c+0x2c+8:]
-def _corrupt_offset_of_share_hashes(data):
- """ Scramble the file data -- the field showing the offset of the share hash tree within
- the share data will have one bit flipped or else will be changed to a random value. """
+def _corrupt_offset_of_share_hashes(data, debug=False):
+ """Scramble the file data -- the field showing the offset of the share
+ hash tree within the share data will have one bit flipped or else will be
+ changed to a random value."""
sharevernum = struct.unpack(">L", data[0x0c:0x0c+4])[0]
assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways."
if sharevernum == 1:
else:
return corrupt_field(data, 0x0c+0x34, 8)
-def _corrupt_offset_of_uri_extension(data):
- """ Scramble the file data -- the field showing the offset of the uri extension will
- have one bit flipped or else will be changed to a random value. """
+def _corrupt_offset_of_uri_extension(data, debug=False):
+ """Scramble the file data -- the field showing the offset of the uri
+ extension will have one bit flipped or else will be changed to a random
+ value."""
sharevernum = struct.unpack(">L", data[0x0c:0x0c+4])[0]
assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways."
if sharevernum == 1:
return corrupt_field(data, 0x0c+0x3c, 8)
def _corrupt_offset_of_uri_extension_to_force_short_read(data, debug=False):
- """ Scramble the file data -- the field showing the offset of the uri extension will be set
- to the size of the file minus 3. This means when the client tries to read the length field
- from that location it will get a short read -- the result string will be only 3 bytes long,
- not the 4 or 8 bytes necessary to do a successful struct.unpack."""
+ """Scramble the file data -- the field showing the offset of the uri
+ extension will be set to the size of the file minus 3. This means when
+ the client tries to read the length field from that location it will get
+ a short read -- the result string will be only 3 bytes long, not the 4 or
+ 8 bytes necessary to do a successful struct.unpack."""
sharevernum = struct.unpack(">L", data[0x0c:0x0c+4])[0]
assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways."
- # The "-0x0c" in here is to skip the server-side header in the share file, which the client doesn't see when seeking and reading.
+ # The "-0x0c" in here is to skip the server-side header in the share
+ # file, which the client doesn't see when seeking and reading.
if sharevernum == 1:
if debug:
log.msg("testing: corrupting offset %d, size %d, changing %d to %d (len(data) == %d)" % (0x2c, 4, struct.unpack(">L", data[0x2c:0x2c+4])[0], len(data)-0x0c-3, len(data)))
log.msg("testing: corrupting offset %d, size %d, changing %d to %d (len(data) == %d)" % (0x48, 8, struct.unpack(">Q", data[0x48:0x48+8])[0], len(data)-0x0c-3, len(data)))
return data[:0x48] + struct.pack(">Q", len(data)-0x0c-3) + data[0x48+8:]
-def _corrupt_mutable_share_data(data):
+def _corrupt_mutable_share_data(data, debug=False):
prefix = data[:32]
assert prefix == MutableShareFile.MAGIC, "This function is designed to corrupt mutable shares of v1, and the magic number doesn't look right: %r vs %r" % (prefix, MutableShareFile.MAGIC)
data_offset = MutableShareFile.DATA_OFFSET
length = data_offset + offsets["enc_privkey"] - start
return corrupt_field(data, start, length)
-def _corrupt_share_data(data):
- """ Scramble the file data -- the field containing the share data itself will have one
- bit flipped or else will be changed to a random value. """
+def _corrupt_share_data(data, debug=False):
+ """Scramble the file data -- the field containing the share data itself
+ will have one bit flipped or else will be changed to a random value."""
sharevernum = struct.unpack(">L", data[0x0c:0x0c+4])[0]
assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways, not v%d." % sharevernum
if sharevernum == 1:
return corrupt_field(data, 0x0c+0x44, sharedatasize)
-def _corrupt_crypttext_hash_tree(data):
- """ Scramble the file data -- the field containing the crypttext hash tree will have one
- bit flipped or else will be changed to a random value.
+def _corrupt_share_data_last_byte(data, debug=False):
+ """Scramble the file data -- flip all bits of the last byte."""
+ sharevernum = struct.unpack(">L", data[0x0c:0x0c+4])[0]
+ assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways, not v%d." % sharevernum
+ if sharevernum == 1:
+ sharedatasize = struct.unpack(">L", data[0x0c+0x08:0x0c+0x08+4])[0]
+ offset = 0x0c+0x24+sharedatasize-1
+ else:
+ sharedatasize = struct.unpack(">Q", data[0x0c+0x08:0x0c+0x0c+8])[0]
+ offset = 0x0c+0x44+sharedatasize-1
+
+ newdata = data[:offset] + chr(ord(data[offset])^0xFF) + data[offset+1:]
+ if debug:
+ log.msg("testing: flipping all bits of byte at offset %d: %r, newdata: %r" % (offset, data[offset], newdata[offset]))
+ return newdata
+
+def _corrupt_crypttext_hash_tree(data, debug=False):
+ """Scramble the file data -- the field containing the crypttext hash tree
+ will have one bit flipped or else will be changed to a random value.
"""
sharevernum = struct.unpack(">L", data[0x0c:0x0c+4])[0]
assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways."
crypttexthashtreeoffset = struct.unpack(">Q", data[0x0c+0x24:0x0c+0x24+8])[0]
blockhashesoffset = struct.unpack(">Q", data[0x0c+0x2c:0x0c+0x2c+8])[0]
- return corrupt_field(data, crypttexthashtreeoffset, blockhashesoffset-crypttexthashtreeoffset)
+ return corrupt_field(data, 0x0c+crypttexthashtreeoffset, blockhashesoffset-crypttexthashtreeoffset, debug=debug)
+
+def _corrupt_crypttext_hash_tree_byte_x221(data, debug=False):
+ """Scramble the file data -- the byte at offset 0x221 will have its 7th
+ (b1) bit flipped.
+ """
+ sharevernum = struct.unpack(">L", data[0x0c:0x0c+4])[0]
+ assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways."
+ if debug:
+ log.msg("original data: %r" % (data,))
+ return data[:0x0c+0x221] + chr(ord(data[0x0c+0x221])^0x02) + data[0x0c+0x2210+1:]
-def _corrupt_block_hashes(data):
- """ Scramble the file data -- the field containing the block hash tree will have one bit
- flipped or else will be changed to a random value.
+def _corrupt_block_hashes(data, debug=False):
+ """Scramble the file data -- the field containing the block hash tree
+ will have one bit flipped or else will be changed to a random value.
"""
sharevernum = struct.unpack(">L", data[0x0c:0x0c+4])[0]
assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways."
blockhashesoffset = struct.unpack(">Q", data[0x0c+0x2c:0x0c+0x2c+8])[0]
sharehashesoffset = struct.unpack(">Q", data[0x0c+0x34:0x0c+0x34+8])[0]
- return corrupt_field(data, blockhashesoffset, sharehashesoffset-blockhashesoffset)
+ return corrupt_field(data, 0x0c+blockhashesoffset, sharehashesoffset-blockhashesoffset)
-def _corrupt_share_hashes(data):
- """ Scramble the file data -- the field containing the share hash chain will have one
- bit flipped or else will be changed to a random value.
+def _corrupt_share_hashes(data, debug=False):
+ """Scramble the file data -- the field containing the share hash chain
+ will have one bit flipped or else will be changed to a random value.
"""
sharevernum = struct.unpack(">L", data[0x0c:0x0c+4])[0]
assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways."
sharehashesoffset = struct.unpack(">Q", data[0x0c+0x34:0x0c+0x34+8])[0]
uriextoffset = struct.unpack(">Q", data[0x0c+0x3c:0x0c+0x3c+8])[0]
- return corrupt_field(data, sharehashesoffset, uriextoffset-sharehashesoffset)
+ return corrupt_field(data, 0x0c+sharehashesoffset, uriextoffset-sharehashesoffset)
-def _corrupt_length_of_uri_extension(data):
- """ Scramble the file data -- the field showing the length of the uri extension will
- have one bit flipped or else will be changed to a random value. """
+def _corrupt_length_of_uri_extension(data, debug=False):
+ """Scramble the file data -- the field showing the length of the uri
+ extension will have one bit flipped or else will be changed to a random
+ value."""
sharevernum = struct.unpack(">L", data[0x0c:0x0c+4])[0]
assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways."
if sharevernum == 1:
return corrupt_field(data, uriextoffset, 4)
else:
uriextoffset = struct.unpack(">Q", data[0x0c+0x3c:0x0c+0x3c+8])[0]
- return corrupt_field(data, uriextoffset, 8)
+ return corrupt_field(data, 0x0c+uriextoffset, 8)
-def _corrupt_uri_extension(data):
- """ Scramble the file data -- the field containing the uri extension will have one bit
- flipped or else will be changed to a random value. """
+def _corrupt_uri_extension(data, debug=False):
+ """Scramble the file data -- the field containing the uri extension will
+ have one bit flipped or else will be changed to a random value."""
sharevernum = struct.unpack(">L", data[0x0c:0x0c+4])[0]
assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways."
if sharevernum == 1:
uriextoffset = struct.unpack(">Q", data[0x0c+0x3c:0x0c+0x3c+8])[0]
uriextlen = struct.unpack(">Q", data[0x0c+uriextoffset:0x0c+uriextoffset+8])[0]
- return corrupt_field(data, uriextoffset, uriextlen)
+ return corrupt_field(data, 0x0c+uriextoffset, uriextlen)