3 from twisted.trial import unittest
4 from twisted.internet import defer
5 from allmydata import check_results, uri
6 from allmydata.web import check_results as web_check_results
7 from allmydata.storage_client import StorageFarmBroker, NativeStorageServer
8 from allmydata.monitor import Monitor
9 from allmydata.test.no_network import GridTestMixin
10 from allmydata.immutable.upload import Data
11 from allmydata.test.common_web import WebRenderingMixin
14 def get_storage_broker(self):
15 return self.storage_broker
17 class WebResultsRendering(unittest.TestCase, WebRenderingMixin):
19 def create_fake_client(self):
20 sb = StorageFarmBroker(None, True)
21 for (peerid, nickname) in [("\x00"*20, "peer-0"),
22 ("\xff"*20, "peer-f"),
23 ("\x11"*20, "peer-11")] :
24 ann_d = { "version": 0,
25 "service-name": "storage",
27 "nickname": unicode(nickname),
28 "app-versions": {}, # need #466 and v2 introducer
30 "oldest-supported": "oldest",
32 s = NativeStorageServer(peerid, ann_d)
33 sb.test_add_server(peerid, s)
38 def render_json(self, page):
39 d = self.render1(page, args={"output": ["json"]})
42 def test_literal(self):
43 c = self.create_fake_client()
44 lcr = web_check_results.LiteralCheckResults(c)
48 s = self.remove_tags(html)
49 self.failUnlessIn("Literal files are always healthy", s)
51 d.addCallback(lambda ignored:
52 self.render1(lcr, args={"return_to": ["FOOURL"]}))
53 def _check_return_to(html):
54 s = self.remove_tags(html)
55 self.failUnlessIn("Literal files are always healthy", s)
56 self.failUnlessIn('<a href="FOOURL">Return to file.</a>',
58 d.addCallback(_check_return_to)
59 d.addCallback(lambda ignored: self.render_json(lcr))
60 def _check_json(json):
61 j = simplejson.loads(json)
62 self.failUnlessEqual(j["storage-index"], "")
63 self.failUnlessEqual(j["results"]["healthy"], True)
64 d.addCallback(_check_json)
68 c = self.create_fake_client()
69 serverid_1 = "\x00"*20
70 serverid_f = "\xff"*20
71 u = uri.CHKFileURI("\x00"*16, "\x00"*32, 3, 10, 1234)
72 cr = check_results.CheckResults(u, u.get_storage_index())
74 cr.set_needs_rebalancing(False)
75 cr.set_summary("groovy")
76 data = { "count-shares-needed": 3,
77 "count-shares-expected": 9,
78 "count-shares-good": 10,
79 "count-good-share-hosts": 11,
80 "list-corrupt-shares": [],
81 "count-wrong-shares": 0,
82 "sharemap": {"shareid1": [serverid_1, serverid_f]},
83 "count-recoverable-versions": 1,
84 "count-unrecoverable-versions": 0,
85 "servers-responding": [],
89 w = web_check_results.CheckResults(c, cr)
90 html = self.render2(w)
91 s = self.remove_tags(html)
92 self.failUnlessIn("File Check Results for SI=2k6avp", s) # abbreviated
93 self.failUnlessIn("Healthy : groovy", s)
94 self.failUnlessIn("Share Counts: need 3-of-9, have 10", s)
95 self.failUnlessIn("Hosts with good shares: 11", s)
96 self.failUnlessIn("Corrupt shares: none", s)
97 self.failUnlessIn("Wrong Shares: 0", s)
98 self.failUnlessIn("Recoverable Versions: 1", s)
99 self.failUnlessIn("Unrecoverable Versions: 0", s)
101 cr.set_healthy(False)
102 cr.set_recoverable(True)
103 cr.set_summary("ungroovy")
104 html = self.render2(w)
105 s = self.remove_tags(html)
106 self.failUnlessIn("File Check Results for SI=2k6avp", s) # abbreviated
107 self.failUnlessIn("Not Healthy! : ungroovy", s)
109 cr.set_healthy(False)
110 cr.set_recoverable(False)
111 cr.set_summary("rather dead")
112 data["list-corrupt-shares"] = [(serverid_1, u.get_storage_index(), 2)]
114 html = self.render2(w)
115 s = self.remove_tags(html)
116 self.failUnlessIn("File Check Results for SI=2k6avp", s) # abbreviated
117 self.failUnlessIn("Not Recoverable! : rather dead", s)
118 self.failUnlessIn("Corrupt shares: Share ID Nickname Node ID sh#2 peer-0 aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", s)
120 html = self.render2(w)
121 s = self.remove_tags(html)
122 self.failUnlessIn("File Check Results for SI=2k6avp", s) # abbreviated
123 self.failUnlessIn("Not Recoverable! : rather dead", s)
125 html = self.render2(w, args={"return_to": ["FOOURL"]})
126 self.failUnlessIn('<a href="FOOURL">Return to file/directory.</a>',
129 d = self.render_json(w)
130 def _check_json(jdata):
131 j = simplejson.loads(jdata)
132 self.failUnlessEqual(j["summary"], "rather dead")
133 self.failUnlessEqual(j["storage-index"],
134 "2k6avpjga3dho3zsjo6nnkt7n4")
135 expected = {'needs-rebalancing': False,
136 'count-shares-expected': 9,
138 'count-unrecoverable-versions': 0,
139 'count-shares-needed': 3,
140 'sharemap': {"shareid1":
141 ["aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
142 "77777777777777777777777777777777"]},
143 'count-recoverable-versions': 1,
144 'list-corrupt-shares':
145 [["aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
146 "2k6avpjga3dho3zsjo6nnkt7n4", 2]],
147 'count-good-share-hosts': 11,
148 'count-wrong-shares': 0,
149 'count-shares-good': 10,
150 'count-corrupt-shares': 0,
151 'servers-responding': [],
152 'recoverable': False,
154 self.failUnlessEqual(j["results"], expected)
155 d.addCallback(_check_json)
156 d.addCallback(lambda ignored: self.render1(w))
158 s = self.remove_tags(html)
159 self.failUnlessIn("File Check Results for SI=2k6avp", s)
160 self.failUnlessIn("Not Recoverable! : rather dead", s)
161 d.addCallback(_check)
165 def test_check_and_repair(self):
166 c = self.create_fake_client()
167 serverid_1 = "\x00"*20
168 serverid_f = "\xff"*20
169 u = uri.CHKFileURI("\x00"*16, "\x00"*32, 3, 10, 1234)
171 pre_cr = check_results.CheckResults(u, u.get_storage_index())
172 pre_cr.set_healthy(False)
173 pre_cr.set_recoverable(True)
174 pre_cr.set_needs_rebalancing(False)
175 pre_cr.set_summary("illing")
176 data = { "count-shares-needed": 3,
177 "count-shares-expected": 10,
178 "count-shares-good": 6,
179 "count-good-share-hosts": 7,
180 "list-corrupt-shares": [],
181 "count-wrong-shares": 0,
182 "sharemap": {"shareid1": [serverid_1, serverid_f]},
183 "count-recoverable-versions": 1,
184 "count-unrecoverable-versions": 0,
185 "servers-responding": [],
187 pre_cr.set_data(data)
189 post_cr = check_results.CheckResults(u, u.get_storage_index())
190 post_cr.set_healthy(True)
191 post_cr.set_recoverable(True)
192 post_cr.set_needs_rebalancing(False)
193 post_cr.set_summary("groovy")
194 data = { "count-shares-needed": 3,
195 "count-shares-expected": 10,
196 "count-shares-good": 10,
197 "count-good-share-hosts": 11,
198 "list-corrupt-shares": [],
199 "count-wrong-shares": 0,
200 "sharemap": {"shareid1": [serverid_1, serverid_f]},
201 "count-recoverable-versions": 1,
202 "count-unrecoverable-versions": 0,
203 "servers-responding": [],
205 post_cr.set_data(data)
207 crr = check_results.CheckAndRepairResults(u.get_storage_index())
208 crr.pre_repair_results = pre_cr
209 crr.post_repair_results = post_cr
210 crr.repair_attempted = False
212 w = web_check_results.CheckAndRepairResults(c, crr)
213 html = self.render2(w)
214 s = self.remove_tags(html)
216 self.failUnlessIn("File Check-And-Repair Results for SI=2k6avp", s)
217 self.failUnlessIn("Healthy : groovy", s)
218 self.failUnlessIn("No repair necessary", s)
219 self.failUnlessIn("Post-Repair Checker Results:", s)
220 self.failUnlessIn("Share Counts: need 3-of-10, have 10", s)
222 crr.repair_attempted = True
223 crr.repair_successful = True
224 html = self.render2(w)
225 s = self.remove_tags(html)
227 self.failUnlessIn("File Check-And-Repair Results for SI=2k6avp", s)
228 self.failUnlessIn("Healthy : groovy", s)
229 self.failUnlessIn("Repair successful", s)
230 self.failUnlessIn("Post-Repair Checker Results:", s)
232 crr.repair_attempted = True
233 crr.repair_successful = False
234 post_cr.set_healthy(False)
235 post_cr.set_summary("better")
236 html = self.render2(w)
237 s = self.remove_tags(html)
239 self.failUnlessIn("File Check-And-Repair Results for SI=2k6avp", s)
240 self.failUnlessIn("Not Healthy! : better", s)
241 self.failUnlessIn("Repair unsuccessful", s)
242 self.failUnlessIn("Post-Repair Checker Results:", s)
244 crr.repair_attempted = True
245 crr.repair_successful = False
246 post_cr.set_healthy(False)
247 post_cr.set_recoverable(False)
248 post_cr.set_summary("worse")
249 html = self.render2(w)
250 s = self.remove_tags(html)
252 self.failUnlessIn("File Check-And-Repair Results for SI=2k6avp", s)
253 self.failUnlessIn("Not Recoverable! : worse", s)
254 self.failUnlessIn("Repair unsuccessful", s)
255 self.failUnlessIn("Post-Repair Checker Results:", s)
257 d = self.render_json(w)
259 j = simplejson.loads(data)
260 self.failUnlessEqual(j["repair-attempted"], True)
261 self.failUnlessEqual(j["storage-index"],
262 "2k6avpjga3dho3zsjo6nnkt7n4")
263 self.failUnlessEqual(j["pre-repair-results"]["summary"], "illing")
264 self.failUnlessEqual(j["post-repair-results"]["summary"], "worse")
265 d.addCallback(_got_json)
267 w2 = web_check_results.CheckAndRepairResults(c, None)
268 d.addCallback(lambda ignored: self.render_json(w2))
269 def _got_lit_results(data):
270 j = simplejson.loads(data)
271 self.failUnlessEqual(j["repair-attempted"], False)
272 self.failUnlessEqual(j["storage-index"], "")
273 d.addCallback(_got_lit_results)
276 class AddLease(GridTestMixin, unittest.TestCase):
277 # test for #875, in which failures in the add-lease call cause
278 # false-negatives in the checker
281 self.basedir = "checker/AddLease/875"
282 self.set_up_grid(num_servers=1)
283 c0 = self.g.clients[0]
284 c0.DEFAULT_ENCODING_PARAMETERS['happy'] = 1
287 d = c0.upload(Data(DATA, convergence=""))
288 def _stash_immutable(ur):
289 self.imm = c0.create_node_from_uri(ur.uri)
290 d.addCallback(_stash_immutable)
291 d.addCallback(lambda ign: c0.create_mutable_file("contents"))
292 def _stash_mutable(node):
294 d.addCallback(_stash_mutable)
296 def _check_cr(cr, which):
297 self.failUnless(cr.is_healthy(), which)
299 # these two should work normally
300 d.addCallback(lambda ign: self.imm.check(Monitor(), add_lease=True))
301 d.addCallback(_check_cr, "immutable-normal")
302 d.addCallback(lambda ign: self.mut.check(Monitor(), add_lease=True))
303 d.addCallback(_check_cr, "mutable-normal")
305 really_did_break = []
306 # now break the server's remote_add_lease call
307 def _break_add_lease(ign):
308 def broken_add_lease(*args, **kwargs):
309 really_did_break.append(1)
310 raise KeyError("intentional failure, should be ignored")
311 assert self.g.servers_by_number[0].remote_add_lease
312 self.g.servers_by_number[0].remote_add_lease = broken_add_lease
313 d.addCallback(_break_add_lease)
315 # and confirm that the files still look healthy
316 d.addCallback(lambda ign: self.mut.check(Monitor(), add_lease=True))
317 d.addCallback(_check_cr, "mutable-broken")
318 d.addCallback(lambda ign: self.imm.check(Monitor(), add_lease=True))
319 d.addCallback(_check_cr, "immutable-broken")
321 d.addCallback(lambda ign: self.failUnless(really_did_break))
324 class CounterHolder(object):
326 self._num_active_block_fetches = 0
327 self._max_active_block_fetches = 0
329 from allmydata.immutable.checker import ValidatedReadBucketProxy
330 class MockVRBP(ValidatedReadBucketProxy):
331 def __init__(self, sharenum, bucket, share_hash_tree, num_blocks, block_size, share_size, counterholder):
332 ValidatedReadBucketProxy.__init__(self, sharenum, bucket,
333 share_hash_tree, num_blocks,
334 block_size, share_size)
335 self.counterholder = counterholder
337 def get_block(self, blocknum):
338 self.counterholder._num_active_block_fetches += 1
339 if self.counterholder._num_active_block_fetches > self.counterholder._max_active_block_fetches:
340 self.counterholder._max_active_block_fetches = self.counterholder._num_active_block_fetches
341 d = ValidatedReadBucketProxy.get_block(self, blocknum)
342 def _mark_no_longer_active(res):
343 self.counterholder._num_active_block_fetches -= 1
345 d.addBoth(_mark_no_longer_active)
348 class TooParallel(GridTestMixin, unittest.TestCase):
349 # bug #1395: immutable verifier was aggressively parallized, checking all
350 # blocks of all shares at the same time, blowing our memory budget and
351 # crashing with MemoryErrors on >1GB files.
353 def test_immutable(self):
354 import allmydata.immutable.checker
355 origVRBP = allmydata.immutable.checker.ValidatedReadBucketProxy
357 self.basedir = "checker/TooParallel/immutable"
359 # If any code asks to instantiate a ValidatedReadBucketProxy,
360 # we give them a MockVRBP which is configured to use our
362 counterholder = CounterHolder()
363 def make_mock_VRBP(*args, **kwargs):
364 return MockVRBP(counterholder=counterholder, *args, **kwargs)
365 allmydata.immutable.checker.ValidatedReadBucketProxy = make_mock_VRBP
367 d = defer.succeed(None)
369 self.set_up_grid(num_servers=4)
370 self.c0 = self.g.clients[0]
371 self.c0.DEFAULT_ENCODING_PARAMETERS = { "k": 1,
374 "max_segment_size": 5,
377 DATA = "data" * 100 # 400/5 = 80 blocks
378 return self.c0.upload(Data(DATA, convergence=""))
379 d.addCallback(_start)
381 n = self.c0.create_node_from_uri(ur.uri)
382 return n.check(Monitor(), verify=True)
383 d.addCallback(_do_check)
385 # the verifier works on all 4 shares in parallel, but only
386 # fetches one block from each share at a time, so we expect to
387 # see 4 parallel fetches
388 self.failUnlessEqual(counterholder._max_active_block_fetches, 4)
389 d.addCallback(_check)
391 allmydata.immutable.checker.ValidatedReadBucketProxy = origVRBP
396 test_immutable.timeout = 40