def set_children(self, entries, overwrite=True):
# this takes URIs
a = Adder(self, overwrite=overwrite)
- node_entries = []
for (name, e) in entries.iteritems():
assert isinstance(name, unicode)
if len(e) == 2:
self._log_id = log_id
def _bad(self, f, validatedthingproxy):
- failtype = f.trap(RemoteException, DeadReferenceError,
- IntegrityCheckReject, layout.LayoutInvalid,
- layout.ShareVersionIncompatible)
+ f.trap(RemoteException, DeadReferenceError,
+ IntegrityCheckReject, layout.LayoutInvalid,
+ layout.ShareVersionIncompatible)
level = log.WEIRD
if f.check(DeadReferenceError):
level = log.UNUSUAL
self.parent.hold_block(self.blocknum, data)
def _got_block_error(self, f):
- failtype = f.trap(RemoteException, DeadReferenceError,
- IntegrityCheckReject,
- layout.LayoutInvalid, layout.ShareVersionIncompatible)
+ f.trap(RemoteException, DeadReferenceError,
+ IntegrityCheckReject, layout.LayoutInvalid,
+ layout.ShareVersionIncompatible)
if f.check(RemoteException, DeadReferenceError):
level = log.UNUSUAL
else:
for shnum in self._encoder.get_shares_placed():
peer_tracker = self._peer_trackers[shnum]
peerid = peer_tracker.peerid
- peerid_s = idlib.shortnodeid_b2a(peerid)
r.sharemap.add(shnum, peerid)
r.servermap.add(peerid, shnum)
r.pushed_shares = len(self._encoder.get_shares_placed())
# we have a fragment that contains the whole request
index = (verinfo, shnum)
- end = offset+length
for entry in self.cache.get(index, set()):
(e_start, e_data, e_timestamp) = entry
if self._inside(offset, length, e_start, len(e_data)):
o3 = offsets['block_hash_tree'] = o2 + share_hash_chain_length
o4 = offsets['share_data'] = o3 + block_hash_tree_length
o5 = offsets['enc_privkey'] = o4 + share_data_length
- o6 = offsets['EOF'] = o5 + encprivkey_length
+ offsets['EOF'] = o5 + encprivkey_length
return struct.pack(">LLLLQQ",
offsets['signature'],
str(self._first_write_error),
self._first_write_error)
- new_assignments = []
# we then index this peerlist with an integer, because we may have to
# wrap. We update the goal as we go.
i = 0
# build the block hash tree. SDMF has only one leaf.
leaves = [hashutil.block_hash(share_data)]
t = hashtree.HashTree(leaves)
- block_hash_trees[shnum] = block_hash_tree = list(t)
+ block_hash_trees[shnum] = list(t)
share_hash_leaves[shnum] = t[0]
for leaf in share_hash_leaves:
assert leaf is not None
self._status.set_status("Sending %d initial queries" % len(peerlist))
self._queries_outstanding = set()
self._sharemap = DictOfSets() # shnum -> [(peerid, seqnum, R)..]
- dl = []
for (peerid, ss) in peerlist.items():
self._queries_outstanding.add(peerid)
self._do_query(ss, peerid, self._storage_index, self._read_size)
i_select = T.select(name=name)
for (count, description) in options:
count = astype(count)
- selected = False
if ((current_value is not None and count == current_value) or
(current_value is None and count == default)):
o = T.option(value=str(count), selected="true")[description]
add_output("Users",
T.div["Average file size: ", number(file_size)])
total_files = num_users * files_per_user / sharing_ratio
- user_file_check_interval = file_check_interval / files_per_user
add_output("Grid",
T.div["Total number of files in grid: ",
from allmydata import reliability
# we import this just to test to see if the page is available
_hush_pyflakes = reliability
+ del _hush_pyflakes
f = [T.div[T.a(href="../reliability")["Reliability Math"]], f]
except ImportError:
pass
#print "DIFF:", (old_post_repair - decay * repair)
START = array([0]*N + [1])
- ALIVE = array([0]*k + [1]*(1+N-k))
DEAD = array([1]*k + [0]*(1+N-k))
REPAIRp = array([0]*k + [1]*(R-k) + [0]*(1+N-R))
REPAIR_newshares = array([0]*k +
[0]*(1+N-R))
assert REPAIR_newshares.shape[0] == N+1
#print "START", START
- #print "ALIVE", ALIVE
#print "REPAIRp", REPAIRp
#print "REPAIR_newshares", REPAIR_newshares
# mutable share
m = MutableShareFile(abs_sharefile)
WE, nodeid = m._read_write_enabler_and_nodeid(f)
- num_extra_leases = m._read_num_extra_leases(f)
data_length = m._read_data_length(f)
- extra_lease_offset = m._read_extra_lease_offset(f)
- container_size = extra_lease_offset - m.DATA_OFFSET
expiration_time = min( [lease.expiration_time
for (i,lease) in m._enumerate_leases(f)] )
expiration = max(0, expiration_time - now)
"""
def create_stats_gatherer(config):
- out = config.stdout
err = config.stderr
basedir = config['basedir']
if not basedir:
def run(self):
options = self.options
nodeurl = options['node-url']
- from_dir = options.from_dir
- to_dir = options.to_dir
self.verbosity = 1
if options['quiet']:
self.verbosity = 0
if options['verbose']:
self.verbosity = 2
- stdin = options.stdin
stdout = options.stdout
stderr = options.stderr
to_url += "/"
archives_url = to_url + "Archives/"
- latest_url = to_url + "Latest"
# first step: make sure the target directory exists, as well as the
# Archives/ subdirectory.
print >>stderr, "Unable to create target directory: %s %s %s" % \
(resp.status, resp.reason, resp.read())
return 1
- archives_dir = {}
- else:
- jdata = simplejson.load(resp)
- (otype, attrs) = jdata
- archives_dir = attrs["children"]
# second step: process the tree
new_backup_dircap = self.process(options.from_dir)
url = self.nodeurl + "uri/%s" % urllib.quote(rootcap)
if path:
url += "/" + escape_path(path)
- last_slash = path.rfind("/")
resp = do_http("GET", url + "?t=json")
if resp.status == 404:
def write_results(self, data):
stdout = self.options.stdout
- stderr = self.options.stderr
keys = ("count-immutable-files",
"count-mutable-files",
"count-literal-files",
def rm(options):
"""
- @param verbosity: 0, 1, or 2, meaning quiet, verbose, or very verbose
-
@return: a Deferred which eventually fires with the exit code
"""
nodeurl = options['node-url']
aliases = options.aliases
where = options.where
- if options['quiet']:
- verbosity = 0
- else:
- verbosity = 2
stdout = options.stdout
stderr = options.stderr
self.current_sleep_time = None
self.next_wake_time = None
try:
- s = self.last_complete_prefix_index
self.start_current_prefix(start_slice)
finished_cycle = True
except TimeSliceExceeded:
self._clean_incomplete()
fileutil.make_dirs(self.incomingdir)
self._active_writers = weakref.WeakKeyDictionary()
- lp = log.msg("StorageServer created", facility="tahoe.storage")
+ log.msg("StorageServer created", facility="tahoe.storage")
if reserved_space:
if self.get_available_space() is None:
start = time.time()
self.count("writev")
si_s = si_b2a(storage_index)
- lp = log.msg("storage: slot_writev %s" % si_s)
+ log.msg("storage: slot_writev %s" % si_s)
si_dir = storage_index_to_dir(storage_index)
(write_enabler, renew_secret, cancel_secret) = secrets
# shares exist if there is a file for them
def do_test(self):
print "doing test"
- rr = self.client_rref
d = defer.succeed(None)
d.addCallback(self.one_test, "startup", 1, 1000, False) #ignore this one
d.addCallback(self.measure_rtt)
return corrupt_field(data, 0x0c+crypttexthashtreeoffset, blockhashesoffset-crypttexthashtreeoffset, debug=debug)
def _corrupt_crypttext_hash_tree_byte_x221(data, debug=False):
- """Scramble the file data -- the field containing the crypttext hash tree
- will have the 7th bit of the 9th byte flipped.
+ """Scramble the file data -- the byte at offset 0x221 will have its 7th
+ (b1) bit flipped.
"""
sharevernum = struct.unpack(">L", data[0x0c:0x0c+4])[0]
assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways."
- if sharevernum == 1:
- crypttexthashtreeoffset = struct.unpack(">L", data[0x0c+0x14:0x0c+0x14+4])[0]
- blockhashesoffset = struct.unpack(">L", data[0x0c+0x18:0x0c+0x18+4])[0]
- else:
- crypttexthashtreeoffset = struct.unpack(">Q", data[0x0c+0x24:0x0c+0x24+8])[0]
- blockhashesoffset = struct.unpack(">Q", data[0x0c+0x2c:0x0c+0x2c+8])[0]
-
if debug:
log.msg("original data: %r" % (data,))
return data[:0x0c+0x221] + chr(ord(data[0x0c+0x221])^0x02) + data[0x0c+0x2210+1:]
basedir = "test_client.Basic.test_loadable"
os.mkdir(basedir)
open(os.path.join(basedir, "introducer.furl"), "w").write("")
- c = client.Client(basedir)
+ client.Client(basedir)
def test_loadable_old_config_bits(self):
basedir = "test_client.Basic.test_loadable_old_config_bits"
dummy = "pb://wl74cyahejagspqgy4x5ukrvfnevlknt@127.0.0.1:58889/bogus"
open(os.path.join(basedir, "introducer.furl"), "w").write(dummy)
open(os.path.join(basedir, "suicide_prevention_hotline"), "w")
- c = client.Client(basedir)
+ client.Client(basedir)
def test_reloadable(self):
basedir = "test_client.Run.test_reloadable"
ss = StorageServer(self.basedir, serverid)
ss.setServiceParent(self.s)
- sis = [self.write(i, ss, serverid) for i in range(10)]
+ for i in range(10):
+ self.write(i, ss, serverid)
statefile = os.path.join(self.basedir, "statefile")
c = ConsumingCrawler(ss, statefile)
ss = StorageServer(self.basedir, serverid)
ss.setServiceParent(self.s)
- sis = [self.write(i, ss, serverid) for i in range(10)]
+ for i in range(10):
+ self.write(i, ss, serverid)
statefile = os.path.join(self.basedir, "statefile")
c = ShareCrawler(ss, statefile)
ss = StorageServer(self.basedir, serverid)
ss.setServiceParent(self.s)
- sis = [self.write(i, ss, serverid) for i in range(30)]
+ for i in range(30):
+ self.write(i, ss, serverid)
statefile = os.path.join(self.basedir, "statefile")
c = OneShotCrawler(ss, statefile)
def _created(node):
self.node = node
self.fileurl = "uri/" + urllib.quote(node.get_uri())
- si = self.node.get_storage_index()
d.addCallback(_created)
# now make sure the webapi verifier sees no problems
d.addCallback(lambda ign: self.GET(self.fileurl+"?t=check&verify=true",
self.check_stats_good(stats)
def do_web_stream_check(self, ignored):
+ # TODO
return
d = self.web(self.root, t="stream-deep-check")
def _check(res):
units = list(self.parse_streamed_json(res))
- files = [u for u in units if u["type"] in ("file", "directory")]
+ #files = [u for u in units if u["type"] in ("file", "directory")]
assert units[-1]["type"] == "stats"
- stats = units[-1]["stats"]
+ #stats = units[-1]["stats"]
# ...
d.addCallback(_check)
return d
return d
def _mark_file_bad(self, rootnode):
- si = rootnode.get_storage_index()
self.delete_shares_numbered(rootnode.get_uri(), [0])
return rootnode
self.failUnlessIn("lit", packed)
kids = self._make_kids(nm, ["imm", "lit", "write"])
- e = self.failUnlessRaises(dirnode.MustBeDeepImmutable,
- dirnode.pack_children,
- fn, kids, deep_immutable=True)
+ self.failUnlessRaises(dirnode.MustBeDeepImmutable,
+ dirnode.pack_children,
+ fn, kids, deep_immutable=True)
# read-only is not enough: all children must be immutable
kids = self._make_kids(nm, ["imm", "lit", "read"])
- e = self.failUnlessRaises(dirnode.MustBeDeepImmutable,
- dirnode.pack_children,
- fn, kids, deep_immutable=True)
+ self.failUnlessRaises(dirnode.MustBeDeepImmutable,
+ dirnode.pack_children,
+ fn, kids, deep_immutable=True)
kids = self._make_kids(nm, ["imm", "lit", "dirwrite"])
- e = self.failUnlessRaises(dirnode.MustBeDeepImmutable,
- dirnode.pack_children,
- fn, kids, deep_immutable=True)
+ self.failUnlessRaises(dirnode.MustBeDeepImmutable,
+ dirnode.pack_children,
+ fn, kids, deep_immutable=True)
kids = self._make_kids(nm, ["imm", "lit", "dirread"])
- e = self.failUnlessRaises(dirnode.MustBeDeepImmutable,
- dirnode.pack_children,
- fn, kids, deep_immutable=True)
+ self.failUnlessRaises(dirnode.MustBeDeepImmutable,
+ dirnode.pack_children,
+ fn, kids, deep_immutable=True)
class FakeMutableFile:
implements(IMutableFileNode)
def _ready(res):
k,happy,n = e.get_param("share_counts")
assert n == NUM_SHARES # else we'll be completely confused
- all_peers = []
for shnum in range(NUM_SHARES):
mode = bucket_modes.get(shnum, "good")
peer = FakeBucketReaderWriterProxy(mode)
needed_shares=3,
total_shares=10,
size=1000)
- c = FakeClient()
cf = cachedir.CacheFile("none")
fn1 = ImmutableFileNode(u, None, None, None, None, cf)
fn2 = ImmutableFileNode(u, None, None, None, None, cf)
def test_literal_filenode(self):
DATA = "I am a short file."
u = uri.LiteralFileURI(data=DATA)
- c = None
fn1 = LiteralFileNode(u)
fn2 = LiteralFileNode(u)
self.failUnlessEqual(fn1, fn2)
def test_mutable_filenode(self):
client = FakeClient()
wk = "\x00"*16
- fp = "\x00"*32
rk = hashutil.ssk_readkey_hash(wk)
si = hashutil.ssk_storage_index_hash(rk)
self.failUnlessEqual(ht.needed_hashes(5, True), set([12, 11, 6, 1]))
def test_depth_of(self):
- ht = hashtree.IncompleteHashTree(8)
+ hashtree.IncompleteHashTree(8)
self.failUnlessEqual(hashtree.depth_of(0), 0)
for i in [1,2]:
self.failUnlessEqual(hashtree.depth_of(i), 1, "i=%d"%i)
random.shuffle(shnums)
for i in shnums[:7]:
self._corrupt_a_share(None, common._corrupt_offset_of_block_hashes_to_truncate_crypttext_hashes, i)
- before_download_reads = self._count_reads()
+ #before_download_reads = self._count_reads()
d.addCallback(_then_corrupt_7)
d.addCallback(self._download_and_check_plaintext)
return d
log.msg("creating client %d: %s" % (i, tub.getShortTubID()))
c = IntroducerClient(tub, self.introducer_furl, u"nickname-%d" % i,
"version", "oldest")
- received_announcements[c] = ra = {}
+ received_announcements[c] = {}
def got(serverid, ann_d, announcements):
announcements[serverid] = ann_d
c.subscribe_to("storage", got, received_announcements[c])
from allmydata import uri, client
from allmydata.nodemaker import NodeMaker
from allmydata.util import base32
-from allmydata.util.idlib import shortnodeid_b2a
from allmydata.util.hashutil import tagged_hash, ssk_writekey_hash, \
ssk_pubkey_fingerprint_hash
from allmydata.interfaces import IRepairResults, ICheckAndRepairResults, \
self._pending_timer = None
pending = self._pending
self._pending = {}
- extra = []
for peerid in self._sequence:
if peerid in pending:
d, shares = pending.pop(peerid)
# publish a file and create shares, which can then be manipulated
# later.
self.CONTENTS = "New contents go here" * 1000
- num_peers = 20
self._storage = FakeStorage()
self._nodemaker = make_nodemaker(self._storage)
self._storage_broker = self._nodemaker.storage_broker
"Contents 3a",
"Contents 3b"]
self._copied_shares = {}
- num_peers = 20
self._storage = FakeStorage()
self._nodemaker = make_nodemaker(self._storage)
d = self._nodemaker.create_mutable_file(self.CONTENTS[0]) # seqnum=1
def test_mark_bad(self):
d = defer.succeed(None)
ms = self.make_servermap
- us = self.update_servermap
d.addCallback(lambda res: ms(mode=MODE_READ))
d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 6))
self.failUnlessEqual(len(smap.recoverable_versions()), 1)
self.failIf(smap.unrecoverable_versions())
# now, which should have won?
- roothash_s4a = self.get_roothash_for(3)
expected_contents = self.CONTENTS[3]
new_versionid = smap.best_recoverable_version()
self.failUnlessEqual(new_versionid[0], 5) # seqnum 5
sb = self._storage_broker
for peerid in sorted(sb.get_all_serverids()):
- peerid_s = shortnodeid_b2a(peerid)
for shnum in self._shares1.get(peerid, {}):
if shnum < len(places):
which = places[shnum]
in_1 = shnum in self._shares1[peerid]
in_2 = shnum in self._shares2.get(peerid, {})
in_3 = shnum in self._shares3.get(peerid, {})
- #print peerid_s, shnum, which, in_1, in_2, in_3
if which == 1:
if in_1:
peers[shnum] = self._shares1[peerid][shnum]
return ss
def test_create(self):
- ss = self.create("test_create")
+ self.create("test_create")
def allocate(self, ss, storage_index, sharenums, size, canary=None):
renew_secret = hashutil.tagged_hash("blah", "%d" % self._lease_secret.next())
self.failUnlessEqual(ss.remote_get_buckets("allocate"), {})
- canary = FakeCanary()
already,writers = self.allocate(ss, "allocate", [0,1,2], 75)
self.failUnlessEqual(already, set())
self.failUnlessEqual(set(writers.keys()), set([0,1,2]))
f.write(struct.pack(">L", 0)) # this is invalid: minimum used is v1
f.close()
- b = ss.remote_get_buckets("allocate")
+ ss.remote_get_buckets("allocate")
e = self.failUnlessRaises(UnknownImmutableContainerVersionError,
ss.remote_get_buckets, "si1")
ss = StorageServer(workdir, "\x00" * 20, discard_storage=True)
ss.setServiceParent(self.sparent)
- canary = FakeCanary()
already,writers = self.allocate(ss, "vid", [0,1,2], 75)
self.failUnlessEqual(already, set())
self.failUnlessEqual(set(writers.keys()), set([0,1,2]))
return ss
def test_create(self):
- ss = self.create("test_create")
+ self.create("test_create")
def write_enabler(self, we_tag):
return hashutil.tagged_hash("we_blah", we_tag)
def _done(res):
log.msg("DONE: %s" % (res,))
self._mutable_node_1 = res
- uri = res.get_uri()
d1.addCallback(_done)
return d1
d.addCallback(_create_mutable)
# network calls)
private_uri = self._private_node.get_uri()
- some_uri = self._root_directory_uri
client0_basedir = self.getdir("client0")
nodeargs = [
"--node-directory", client0_basedir,
]
- TESTDATA = "I will not write the same thing over and over.\n" * 100
d = defer.succeed(None)
u0 = uri.LiteralFileURI("data")
u1 = uri.LiteralDirectoryURI(u0)
self.failUnless(str(u1))
- u1s = u1.to_string()
self.failUnlessEqual(u1.to_string(), "URI:DIR2-LIT:mrqxiyi")
self.failUnless(u1.is_readonly())
self.failIf(u1.is_mutable())
try:
func(*args, **kwargs)
self.fail(msg)
- except AssertionError, e:
+ except AssertionError:
pass
def failUnlessListEqual(self, a, b, msg = None):
self.fail("assert was not caught")
def should_not_assert(self, func, *args, **kwargs):
- if "re" in kwargs:
- regexp = kwargs["re"]
- del kwargs["re"]
try:
func(*args, **kwargs)
except AssertionError, e:
try:
from allmydata import reliability
_hush_pyflakes = reliability
+ del _hush_pyflakes
except:
raise unittest.SkipTest("reliability tool requires NumPy")
def _check_json(res):
data = simplejson.loads(res)
self.failUnless(isinstance(data, dict))
- active = data["active"]
+ #active = data["active"]
# TODO: test more. We need a way to fake an active operation
# here.
d.addCallback(_check_json)
def is_uri(s):
try:
- uri = from_string(s)
+ from_string(s)
return True
except (TypeError, AssertionError):
return False
octets = []
pos = 2048
num = qs[0] * pos
- readybits = 5
i = 1
while len(octets) < numoctets:
while pos > 256:
x = 0
else:
x = int(math.log(n, k) + 0.5)
- r = k**x
if k**x < n:
return k**(x+1)
else:
def _render_si_link(self, ctx, storage_index):
si_s = base32.b2a(storage_index)
- root = get_root(ctx)
req = inevow.IRequest(ctx)
ophandle = req.prepath[-1]
target = "%s/operations/%s/%s" % (get_root(ctx), ophandle, si_s)
self.name = name
def childFactory(self, ctx, name):
- req = IRequest(ctx)
name = name.decode("utf-8")
if not name:
raise EmptyPathnameComponentError()
if encoding:
req.setHeader("content-encoding", encoding)
- save_to_filename = None
if boolean_of_arg(get_arg(req, "save", "False")):
# tell the browser to save the file rather display it we don't
# try to encode the filename, instead we echo back the exact same
def render_size(self, ctx, data):
node = self.original
- si = node.get_storage_index()
d = node.get_current_size()
def _no_size(size):
if size is None:
-from nevow import rend, inevow, tags as T
+from nevow import rend, tags as T
reliability = None # might not be usable
try:
from allmydata import reliability # requires NumPy
return "%d" % s
def get_parameters(self, ctx):
- req = inevow.IRequest(ctx)
parameters = {}
for (name,default,argtype,description) in self.DEFAULT_PARAMETERS:
v = get_arg(ctx, name, default)
ecr = ec["space-recovered"]
p = T.ul()
- pieces = []
def add(*pieces):
p[T.li[pieces]]
]
p = T.ul()
- pieces = []
def add(*pieces):
p[T.li[pieces]]