4 from twisted.trial import unittest
5 from twisted.internet import defer
6 from allmydata import check_results, uri
7 from allmydata import uri as tahoe_uri
8 from allmydata.util import base32
9 from allmydata.web import check_results as web_check_results
10 from allmydata.storage_client import StorageFarmBroker, NativeStorageServer
11 from allmydata.storage.server import storage_index_to_dir
12 from allmydata.monitor import Monitor
13 from allmydata.test.no_network import GridTestMixin
14 from allmydata.immutable.upload import Data
15 from allmydata.test.common_web import WebRenderingMixin
16 from allmydata.mutable.publish import MutableData
19 def get_storage_broker(self):
20 return self.storage_broker
22 class WebResultsRendering(unittest.TestCase, WebRenderingMixin):
24 def create_fake_client(self):
25 sb = StorageFarmBroker(None, True)
26 # s.get_name() (the "short description") will be "v0-00000000".
27 # s.get_longname() will include the -long suffix.
28 # s.get_peerid() (i.e. tubid) will be "aaa.." or "777.." or "ceir.."
29 servers = [("v0-00000000-long", "\x00"*20, "peer-0"),
30 ("v0-ffffffff-long", "\xff"*20, "peer-f"),
31 ("v0-11111111-long", "\x11"*20, "peer-11")]
32 for (key_s, peerid, nickname) in servers:
33 tubid_b32 = base32.b2a(peerid)
34 furl = "pb://%s@nowhere/fake" % tubid_b32
36 "service-name": "storage",
37 "anonymous-storage-FURL": furl,
38 "permutation-seed-base32": "",
39 "nickname": unicode(nickname),
40 "app-versions": {}, # need #466 and v2 introducer
42 "oldest-supported": "oldest",
44 s = NativeStorageServer(key_s, ann)
45 sb.test_add_server(peerid, s) # XXX: maybe use key_s?
50 def render_json(self, page):
51 d = self.render1(page, args={"output": ["json"]})
54 def test_literal(self):
55 c = self.create_fake_client()
56 lcr = web_check_results.LiteralCheckResultsRenderer(c)
60 s = self.remove_tags(html)
61 self.failUnlessIn("Literal files are always healthy", s)
63 d.addCallback(lambda ignored:
64 self.render1(lcr, args={"return_to": ["FOOURL"]}))
65 def _check_return_to(html):
66 s = self.remove_tags(html)
67 self.failUnlessIn("Literal files are always healthy", s)
68 self.failUnlessIn('<a href="FOOURL">Return to file.</a>',
70 d.addCallback(_check_return_to)
71 d.addCallback(lambda ignored: self.render_json(lcr))
72 def _check_json(json):
73 j = simplejson.loads(json)
74 self.failUnlessEqual(j["storage-index"], "")
75 self.failUnlessEqual(j["results"]["healthy"], True)
76 d.addCallback(_check_json)
80 c = self.create_fake_client()
81 serverid_1 = "\x00"*20
82 serverid_f = "\xff"*20
83 u = uri.CHKFileURI("\x00"*16, "\x00"*32, 3, 10, 1234)
84 cr = check_results.CheckResults(u, u.get_storage_index())
86 cr.set_needs_rebalancing(False)
87 cr.set_summary("groovy")
88 data = { "count_shares_needed": 3,
89 "count_shares_expected": 9,
90 "count_shares_good": 10,
91 "count_good_share_hosts": 11,
92 "count_recoverable_versions": 1,
93 "count_unrecoverable_versions": 0,
94 "servers_responding": [],
95 "sharemap": {"shareid1": [serverid_1, serverid_f]},
96 "count_wrong_shares": 0,
97 "list_corrupt_shares": [],
98 "count_corrupt_shares": 0,
99 "list_incompatible_shares": [],
100 "count_incompatible_shares": 0,
104 w = web_check_results.CheckResultsRenderer(c, cr)
105 html = self.render2(w)
106 s = self.remove_tags(html)
107 self.failUnlessIn("File Check Results for SI=2k6avp", s) # abbreviated
108 self.failUnlessIn("Healthy : groovy", s)
109 self.failUnlessIn("Share Counts: need 3-of-9, have 10", s)
110 self.failUnlessIn("Hosts with good shares: 11", s)
111 self.failUnlessIn("Corrupt shares: none", s)
112 self.failUnlessIn("Wrong Shares: 0", s)
113 self.failUnlessIn("Recoverable Versions: 1", s)
114 self.failUnlessIn("Unrecoverable Versions: 0", s)
116 cr.set_healthy(False)
117 cr.set_recoverable(True)
118 cr.set_summary("ungroovy")
119 html = self.render2(w)
120 s = self.remove_tags(html)
121 self.failUnlessIn("File Check Results for SI=2k6avp", s) # abbreviated
122 self.failUnlessIn("Not Healthy! : ungroovy", s)
124 cr.set_healthy(False)
125 cr.set_recoverable(False)
126 cr.set_summary("rather dead")
127 data["count_corrupt_shares"] = 1
128 data["list_corrupt_shares"] = [(serverid_1, u.get_storage_index(), 2)]
130 html = self.render2(w)
131 s = self.remove_tags(html)
132 self.failUnlessIn("File Check Results for SI=2k6avp", s) # abbreviated
133 self.failUnlessIn("Not Recoverable! : rather dead", s)
134 self.failUnlessIn("Corrupt shares: Share ID Nickname Node ID sh#2 peer-0 aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", s)
136 html = self.render2(w)
137 s = self.remove_tags(html)
138 self.failUnlessIn("File Check Results for SI=2k6avp", s) # abbreviated
139 self.failUnlessIn("Not Recoverable! : rather dead", s)
141 html = self.render2(w, args={"return_to": ["FOOURL"]})
142 self.failUnlessIn('<a href="FOOURL">Return to file/directory.</a>',
145 d = self.render_json(w)
146 def _check_json(jdata):
147 j = simplejson.loads(jdata)
148 self.failUnlessEqual(j["summary"], "rather dead")
149 self.failUnlessEqual(j["storage-index"],
150 "2k6avpjga3dho3zsjo6nnkt7n4")
151 expected = {'needs-rebalancing': False,
152 'count-shares-expected': 9,
154 'count-unrecoverable-versions': 0,
155 'count-shares-needed': 3,
156 'sharemap': {"shareid1":
157 ["aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
158 "77777777777777777777777777777777"]},
159 'count-recoverable-versions': 1,
160 'list-corrupt-shares':
161 [["aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
162 "2k6avpjga3dho3zsjo6nnkt7n4", 2]],
163 'count-good-share-hosts': 11,
164 'count-wrong-shares': 0,
165 'count-shares-good': 10,
166 'count-corrupt-shares': 1,
167 'servers-responding': [],
168 'recoverable': False,
170 self.failUnlessEqual(j["results"], expected)
171 d.addCallback(_check_json)
172 d.addCallback(lambda ignored: self.render1(w))
174 s = self.remove_tags(html)
175 self.failUnlessIn("File Check Results for SI=2k6avp", s)
176 self.failUnlessIn("Not Recoverable! : rather dead", s)
177 d.addCallback(_check)
181 def test_check_and_repair(self):
182 c = self.create_fake_client()
183 serverid_1 = "\x00"*20
184 serverid_f = "\xff"*20
185 u = uri.CHKFileURI("\x00"*16, "\x00"*32, 3, 10, 1234)
187 pre_cr = check_results.CheckResults(u, u.get_storage_index())
188 pre_cr.set_healthy(False)
189 pre_cr.set_recoverable(True)
190 pre_cr.set_needs_rebalancing(False)
191 pre_cr.set_summary("illing")
192 data = { "count_shares_needed": 3,
193 "count_shares_expected": 10,
194 "count_shares_good": 6,
195 "count_good_share_hosts": 7,
196 "count_recoverable_versions": 1,
197 "count_unrecoverable_versions": 0,
198 "servers_responding": [],
199 "sharemap": {"shareid1": [serverid_1, serverid_f]},
200 "count_wrong_shares": 0,
201 "list_corrupt_shares": [],
202 "count_corrupt_shares": 0,
203 "list_incompatible_shares": [],
204 "count_incompatible_shares": 0,
206 pre_cr.set_data(**data)
208 post_cr = check_results.CheckResults(u, u.get_storage_index())
209 post_cr.set_healthy(True)
210 post_cr.set_recoverable(True)
211 post_cr.set_needs_rebalancing(False)
212 post_cr.set_summary("groovy")
213 data = { "count_shares_needed": 3,
214 "count_shares_expected": 10,
215 "count_shares_good": 10,
216 "count_good_share_hosts": 11,
217 "count_recoverable_versions": 1,
218 "count_unrecoverable_versions": 0,
219 "servers_responding": [],
220 "sharemap": {"shareid1": [serverid_1, serverid_f]},
221 "count_wrong_shares": 0,
222 "count_corrupt_shares": 0,
223 "list_corrupt_shares": [],
224 "list_incompatible_shares": [],
225 "count_incompatible_shares": 0,
227 post_cr.set_data(**data)
229 crr = check_results.CheckAndRepairResults(u.get_storage_index())
230 crr.pre_repair_results = pre_cr
231 crr.post_repair_results = post_cr
232 crr.repair_attempted = False
234 w = web_check_results.CheckAndRepairResultsRenderer(c, crr)
235 html = self.render2(w)
236 s = self.remove_tags(html)
238 self.failUnlessIn("File Check-And-Repair Results for SI=2k6avp", s)
239 self.failUnlessIn("Healthy : groovy", s)
240 self.failUnlessIn("No repair necessary", s)
241 self.failUnlessIn("Post-Repair Checker Results:", s)
242 self.failUnlessIn("Share Counts: need 3-of-10, have 10", s)
244 crr.repair_attempted = True
245 crr.repair_successful = True
246 html = self.render2(w)
247 s = self.remove_tags(html)
249 self.failUnlessIn("File Check-And-Repair Results for SI=2k6avp", s)
250 self.failUnlessIn("Healthy : groovy", s)
251 self.failUnlessIn("Repair successful", s)
252 self.failUnlessIn("Post-Repair Checker Results:", s)
254 crr.repair_attempted = True
255 crr.repair_successful = False
256 post_cr.set_healthy(False)
257 post_cr.set_summary("better")
258 html = self.render2(w)
259 s = self.remove_tags(html)
261 self.failUnlessIn("File Check-And-Repair Results for SI=2k6avp", s)
262 self.failUnlessIn("Not Healthy! : better", s)
263 self.failUnlessIn("Repair unsuccessful", s)
264 self.failUnlessIn("Post-Repair Checker Results:", s)
266 crr.repair_attempted = True
267 crr.repair_successful = False
268 post_cr.set_healthy(False)
269 post_cr.set_recoverable(False)
270 post_cr.set_summary("worse")
271 html = self.render2(w)
272 s = self.remove_tags(html)
274 self.failUnlessIn("File Check-And-Repair Results for SI=2k6avp", s)
275 self.failUnlessIn("Not Recoverable! : worse", s)
276 self.failUnlessIn("Repair unsuccessful", s)
277 self.failUnlessIn("Post-Repair Checker Results:", s)
279 d = self.render_json(w)
281 j = simplejson.loads(data)
282 self.failUnlessEqual(j["repair-attempted"], True)
283 self.failUnlessEqual(j["storage-index"],
284 "2k6avpjga3dho3zsjo6nnkt7n4")
285 self.failUnlessEqual(j["pre-repair-results"]["summary"], "illing")
286 self.failUnlessEqual(j["post-repair-results"]["summary"], "worse")
287 d.addCallback(_got_json)
289 w2 = web_check_results.CheckAndRepairResultsRenderer(c, None)
290 d.addCallback(lambda ignored: self.render_json(w2))
291 def _got_lit_results(data):
292 j = simplejson.loads(data)
293 self.failUnlessEqual(j["repair-attempted"], False)
294 self.failUnlessEqual(j["storage-index"], "")
295 d.addCallback(_got_lit_results)
298 class BalancingAct(GridTestMixin, unittest.TestCase):
299 # test for #1115 regarding the 'count-good-share-hosts' metric
302 def add_server(self, server_number, readonly=False):
303 assert self.g, "I tried to find a grid at self.g, but failed"
304 ss = self.g.make_server(server_number, readonly)
305 #log.msg("just created a server, number: %s => %s" % (server_number, ss,))
306 self.g.add_server(server_number, ss)
308 def add_server_with_share(self, server_number, uri, share_number=None,
310 self.add_server(server_number, readonly)
311 if share_number is not None:
312 self.copy_share_to_server(uri, share_number, server_number)
314 def copy_share_to_server(self, uri, share_number, server_number):
315 ss = self.g.servers_by_number[server_number]
316 # Copy share i from the directory associated with the first
317 # storage server to the directory associated with this one.
318 assert self.g, "I tried to find a grid at self.g, but failed"
319 assert self.shares, "I tried to find shares at self.shares, but failed"
320 old_share_location = self.shares[share_number][2]
321 new_share_location = os.path.join(ss.storedir, "shares")
322 si = tahoe_uri.from_string(self.uri).get_storage_index()
323 new_share_location = os.path.join(new_share_location,
324 storage_index_to_dir(si))
325 if not os.path.exists(new_share_location):
326 os.makedirs(new_share_location)
327 new_share_location = os.path.join(new_share_location,
329 if old_share_location != new_share_location:
330 shutil.copy(old_share_location, new_share_location)
331 shares = self.find_uri_shares(uri)
332 # Make sure that the storage server has the share.
333 self.failUnless((share_number, ss.my_nodeid, new_share_location)
336 def _pretty_shares_chart(self, uri):
337 # Servers are labeled A-Z, shares are labeled 0-9
338 letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
339 assert len(self.g.servers_by_number) < len(letters), \
340 "This little printing function is only meant for < 26 servers"
342 names = dict(zip([ss.my_nodeid
343 for _,ss in self.g.servers_by_number.iteritems()],
345 for shnum, serverid, _ in self.find_uri_shares(uri):
346 shares_chart.setdefault(shnum, []).append(names[serverid])
349 def test_good_share_hosts(self):
350 self.basedir = "checker/BalancingAct/1115"
351 self.set_up_grid(num_servers=1)
352 c0 = self.g.clients[0]
353 c0.DEFAULT_ENCODING_PARAMETERS['happy'] = 1
354 c0.DEFAULT_ENCODING_PARAMETERS['n'] = 4
355 c0.DEFAULT_ENCODING_PARAMETERS['k'] = 3
358 d = c0.upload(Data(DATA, convergence=""))
359 def _stash_immutable(ur):
360 self.imm = c0.create_node_from_uri(ur.get_uri())
361 self.uri = self.imm.get_uri()
362 d.addCallback(_stash_immutable)
363 d.addCallback(lambda ign:
364 self.find_uri_shares(self.uri))
365 def _store_shares(shares):
367 d.addCallback(_store_shares)
370 # Add a new server with just share 3
371 self.add_server_with_share(i, self.uri, 3)
372 #print self._pretty_shares_chart(self.uri)
374 d.addCallback(add_three, i)
376 def _check_and_repair(_):
377 return self.imm.check_and_repair(Monitor())
378 def _check_counts(crr, shares_good, good_share_hosts):
379 p_crr = crr.get_post_repair_results().get_data()
380 #print self._pretty_shares_chart(self.uri)
381 self.failUnlessEqual(p_crr['count-shares-good'], shares_good)
382 self.failUnlessEqual(p_crr['count-good-share-hosts'],
387 0:[A] 1:[A] 2:[A] 3:[A,B,C,D,E]
388 4 good shares, but 5 good hosts
389 After deleting all instances of share #3 and repairing:
390 0:[A,B], 1:[A,C], 2:[A,D], 3:[E]
391 Still 4 good shares and 5 good hosts
393 d.addCallback(_check_and_repair)
394 d.addCallback(_check_counts, 4, 5)
395 d.addCallback(lambda _: self.delete_shares_numbered(self.uri, [3]))
396 d.addCallback(_check_and_repair)
397 d.addCallback(_check_counts, 4, 5)
398 d.addCallback(lambda _: [self.g.break_server(sid)
399 for sid in self.g.get_all_serverids()])
400 d.addCallback(_check_and_repair)
401 d.addCallback(_check_counts, 0, 0)
404 class AddLease(GridTestMixin, unittest.TestCase):
405 # test for #875, in which failures in the add-lease call cause
406 # false-negatives in the checker
409 self.basedir = "checker/AddLease/875"
410 self.set_up_grid(num_servers=1)
411 c0 = self.g.clients[0]
412 c0.DEFAULT_ENCODING_PARAMETERS['happy'] = 1
415 d = c0.upload(Data(DATA, convergence=""))
416 def _stash_immutable(ur):
417 self.imm = c0.create_node_from_uri(ur.get_uri())
418 d.addCallback(_stash_immutable)
419 d.addCallback(lambda ign:
420 c0.create_mutable_file(MutableData("contents")))
421 def _stash_mutable(node):
423 d.addCallback(_stash_mutable)
425 def _check_cr(cr, which):
426 self.failUnless(cr.is_healthy(), which)
428 # these two should work normally
429 d.addCallback(lambda ign: self.imm.check(Monitor(), add_lease=True))
430 d.addCallback(_check_cr, "immutable-normal")
431 d.addCallback(lambda ign: self.mut.check(Monitor(), add_lease=True))
432 d.addCallback(_check_cr, "mutable-normal")
434 really_did_break = []
435 # now break the server's remote_add_lease call
436 def _break_add_lease(ign):
437 def broken_add_lease(*args, **kwargs):
438 really_did_break.append(1)
439 raise KeyError("intentional failure, should be ignored")
440 assert self.g.servers_by_number[0].remote_add_lease
441 self.g.servers_by_number[0].remote_add_lease = broken_add_lease
442 d.addCallback(_break_add_lease)
444 # and confirm that the files still look healthy
445 d.addCallback(lambda ign: self.mut.check(Monitor(), add_lease=True))
446 d.addCallback(_check_cr, "mutable-broken")
447 d.addCallback(lambda ign: self.imm.check(Monitor(), add_lease=True))
448 d.addCallback(_check_cr, "immutable-broken")
450 d.addCallback(lambda ign: self.failUnless(really_did_break))
453 class CounterHolder(object):
455 self._num_active_block_fetches = 0
456 self._max_active_block_fetches = 0
458 from allmydata.immutable.checker import ValidatedReadBucketProxy
459 class MockVRBP(ValidatedReadBucketProxy):
460 def __init__(self, sharenum, bucket, share_hash_tree, num_blocks, block_size, share_size, counterholder):
461 ValidatedReadBucketProxy.__init__(self, sharenum, bucket,
462 share_hash_tree, num_blocks,
463 block_size, share_size)
464 self.counterholder = counterholder
466 def get_block(self, blocknum):
467 self.counterholder._num_active_block_fetches += 1
468 if self.counterholder._num_active_block_fetches > self.counterholder._max_active_block_fetches:
469 self.counterholder._max_active_block_fetches = self.counterholder._num_active_block_fetches
470 d = ValidatedReadBucketProxy.get_block(self, blocknum)
471 def _mark_no_longer_active(res):
472 self.counterholder._num_active_block_fetches -= 1
474 d.addBoth(_mark_no_longer_active)
477 class TooParallel(GridTestMixin, unittest.TestCase):
478 # bug #1395: immutable verifier was aggressively parallized, checking all
479 # blocks of all shares at the same time, blowing our memory budget and
480 # crashing with MemoryErrors on >1GB files.
482 def test_immutable(self):
483 import allmydata.immutable.checker
484 origVRBP = allmydata.immutable.checker.ValidatedReadBucketProxy
486 self.basedir = "checker/TooParallel/immutable"
488 # If any code asks to instantiate a ValidatedReadBucketProxy,
489 # we give them a MockVRBP which is configured to use our
491 counterholder = CounterHolder()
492 def make_mock_VRBP(*args, **kwargs):
493 return MockVRBP(counterholder=counterholder, *args, **kwargs)
494 allmydata.immutable.checker.ValidatedReadBucketProxy = make_mock_VRBP
496 d = defer.succeed(None)
498 self.set_up_grid(num_servers=4)
499 self.c0 = self.g.clients[0]
500 self.c0.DEFAULT_ENCODING_PARAMETERS = { "k": 1,
503 "max_segment_size": 5,
506 DATA = "data" * 100 # 400/5 = 80 blocks
507 return self.c0.upload(Data(DATA, convergence=""))
508 d.addCallback(_start)
510 n = self.c0.create_node_from_uri(ur.get_uri())
511 return n.check(Monitor(), verify=True)
512 d.addCallback(_do_check)
514 # the verifier works on all 4 shares in parallel, but only
515 # fetches one block from each share at a time, so we expect to
516 # see 4 parallel fetches
517 self.failUnlessEqual(counterholder._max_active_block_fetches, 4)
518 d.addCallback(_check)
520 allmydata.immutable.checker.ValidatedReadBucketProxy = origVRBP
525 test_immutable.timeout = 80