1 from allmydata.test import common
2 from allmydata.monitor import Monitor
3 from allmydata import check_results
4 from allmydata.interfaces import NotEnoughSharesError
5 from twisted.internet import defer
6 from twisted.trial import unittest
9 READ_LEEWAY = 18 # We'll allow you to pass this test even if you trigger eighteen times as many disk reads and block fetches as would be optimal.
10 DELTA_READS = 10 * READ_LEEWAY # N = 10
12 class Verifier(common.ShareManglingMixin, unittest.TestCase):
13 def test_check_without_verify(self):
14 """ Check says the file is healthy when none of the shares have been touched. It says
15 that the file is unhealthy when all of them have been removed. It doesn't use any reads.
17 d = defer.succeed(self.filenode)
18 def _check1(filenode):
19 before_check_reads = self._count_reads()
21 d2 = filenode.check(Monitor(), verify=False)
22 def _after_check(checkresults):
23 after_check_reads = self._count_reads()
24 self.failIf(after_check_reads - before_check_reads > 0, after_check_reads - before_check_reads)
25 self.failUnless(checkresults.is_healthy())
27 d2.addCallback(_after_check)
29 d.addCallback(_check1)
31 d.addCallback(lambda ignore: self.replace_shares({}, storage_index=self.uri.storage_index))
33 before_check_reads = self._count_reads()
34 d2 = self.filenode.check(Monitor(), verify=False)
36 def _after_check(checkresults):
37 after_check_reads = self._count_reads()
38 self.failIf(after_check_reads - before_check_reads > 0, after_check_reads - before_check_reads)
39 self.failIf(checkresults.is_healthy())
41 d2.addCallback(_after_check)
43 d.addCallback(_check2)
47 def _help_test_verify(self, corruptor_funcs, judgement_func):
48 d = defer.succeed(None)
50 d.addCallback(self.find_shares)
55 d.addCallback(_stash_it)
56 def _put_it_all_back(ignored):
57 self.replace_shares(stash[0], storage_index=self.uri.storage_index)
60 def _verify_after_corruption(corruptor_func):
61 before_check_reads = self._count_reads()
62 d2 = self.filenode.check(Monitor(), verify=True)
63 def _after_check(checkresults):
64 after_check_reads = self._count_reads()
65 self.failIf(after_check_reads - before_check_reads > DELTA_READS, (after_check_reads, before_check_reads))
67 return judgement_func(checkresults)
69 le.args = tuple(le.args + ("corruptor_func: " + corruptor_func.__name__,))
72 d2.addCallback(_after_check)
75 for corruptor_func in corruptor_funcs:
76 d.addCallback(self._corrupt_a_random_share, corruptor_func)
77 d.addCallback(_verify_after_corruption)
78 d.addCallback(_put_it_all_back)
82 def test_verify_no_problem(self):
83 """ Verify says the file is healthy when none of the shares have been touched in a way
84 that matters. It doesn't use more than seven times as many reads as it needs."""
85 def judge(checkresults):
86 self.failUnless(checkresults.is_healthy(), (checkresults, checkresults.is_healthy(), checkresults.get_data()))
87 data = checkresults.get_data()
88 self.failUnless(data['count-shares-good'] == 10, data)
89 self.failUnless(len(data['sharemap']) == 10, data)
90 self.failUnless(data['count-shares-needed'] == 3, data)
91 self.failUnless(data['count-shares-expected'] == 10, data)
92 self.failUnless(data['count-good-share-hosts'] == 5, data)
93 self.failUnless(len(data['servers-responding']) == 5, data)
94 self.failUnless(len(data['list-corrupt-shares']) == 0, data)
95 return self._help_test_verify([
96 common._corrupt_nothing,
97 common._corrupt_size_of_file_data,
98 common._corrupt_size_of_sharedata,
99 common._corrupt_segment_size, ], judge)
101 def test_verify_server_visible_corruption(self):
102 """ Corruption which is detected by the server means that the server will send you back
103 a Failure in response to get_bucket instead of giving you the share data. Test that
104 verifier handles these answers correctly. It doesn't use more than seven times as many
105 reads as it needs."""
106 def judge(checkresults):
107 self.failIf(checkresults.is_healthy(), (checkresults, checkresults.is_healthy(), checkresults.get_data()))
108 data = checkresults.get_data()
109 # The server might fail to serve up its other share as well as the corrupted
110 # one, so count-shares-good could be 8 or 9.
111 self.failUnless(data['count-shares-good'] in (8, 9), data)
112 self.failUnless(len(data['sharemap']) in (8, 9,), data)
113 self.failUnless(data['count-shares-needed'] == 3, data)
114 self.failUnless(data['count-shares-expected'] == 10, data)
115 # The server may have served up the non-corrupted share, or it may not have, so
116 # the checker could have detected either 4 or 5 good servers.
117 self.failUnless(data['count-good-share-hosts'] in (4, 5), data)
118 self.failUnless(len(data['servers-responding']) in (4, 5), data)
119 # If the server served up the other share, then the checker should consider it good, else it should
121 self.failUnless((data['count-shares-good'] == 9) == (data['count-good-share-hosts'] == 5), data)
122 self.failUnless(len(data['list-corrupt-shares']) == 0, data)
123 return self._help_test_verify([
124 common._corrupt_file_version_number,
127 def test_verify_share_incompatibility(self):
128 def judge(checkresults):
129 self.failIf(checkresults.is_healthy(), (checkresults, checkresults.is_healthy(), checkresults.get_data()))
130 data = checkresults.get_data()
131 self.failUnless(data['count-shares-good'] == 9, data)
132 self.failUnless(len(data['sharemap']) == 9, data)
133 self.failUnless(data['count-shares-needed'] == 3, data)
134 self.failUnless(data['count-shares-expected'] == 10, data)
135 self.failUnless(data['count-good-share-hosts'] == 5, data)
136 self.failUnless(len(data['servers-responding']) == 5, data)
137 self.failUnless(len(data['list-corrupt-shares']) == 1, data)
138 self.failUnless(len(data['list-corrupt-shares']) == data['count-corrupt-shares'], data)
139 self.failUnless(len(data['list-incompatible-shares']) == data['count-incompatible-shares'], data)
140 self.failUnless(len(data['list-incompatible-shares']) == 0, data)
141 return self._help_test_verify([
142 common._corrupt_sharedata_version_number,
145 def test_verify_server_invisible_corruption(self):
146 def judge(checkresults):
147 self.failIf(checkresults.is_healthy(), (checkresults, checkresults.is_healthy(), checkresults.get_data()))
148 data = checkresults.get_data()
149 self.failUnless(data['count-shares-good'] == 9, data)
150 self.failUnless(data['count-shares-needed'] == 3, data)
151 self.failUnless(data['count-shares-expected'] == 10, data)
152 self.failUnless(data['count-good-share-hosts'] == 5, data)
153 self.failUnless(data['count-corrupt-shares'] == 1, (data,))
154 self.failUnless(len(data['list-corrupt-shares']) == 1, data)
155 self.failUnless(len(data['list-corrupt-shares']) == data['count-corrupt-shares'], data)
156 self.failUnless(len(data['list-incompatible-shares']) == data['count-incompatible-shares'], data)
157 self.failUnless(len(data['list-incompatible-shares']) == 0, data)
158 self.failUnless(len(data['servers-responding']) == 5, data)
159 self.failUnless(len(data['sharemap']) == 9, data)
160 return self._help_test_verify([
161 common._corrupt_offset_of_sharedata,
162 common._corrupt_offset_of_uri_extension,
163 common._corrupt_offset_of_uri_extension_to_force_short_read,
164 common._corrupt_share_data,
165 common._corrupt_length_of_uri_extension,
166 common._corrupt_uri_extension,
169 def test_verify_server_invisible_corruption_offset_of_block_hashtree_to_truncate_crypttext_hashtree_TODO(self):
170 def judge(checkresults):
171 self.failIf(checkresults.is_healthy(), (checkresults, checkresults.is_healthy(), checkresults.get_data()))
172 data = checkresults.get_data()
173 self.failUnless(data['count-shares-good'] == 9, data)
174 self.failUnless(data['count-shares-needed'] == 3, data)
175 self.failUnless(data['count-shares-expected'] == 10, data)
176 self.failUnless(data['count-good-share-hosts'] == 5, data)
177 self.failUnless(data['count-corrupt-shares'] == 1, (data,))
178 self.failUnless(len(data['list-corrupt-shares']) == 1, data)
179 self.failUnless(len(data['list-corrupt-shares']) == data['count-corrupt-shares'], data)
180 self.failUnless(len(data['list-incompatible-shares']) == data['count-incompatible-shares'], data)
181 self.failUnless(len(data['list-incompatible-shares']) == 0, data)
182 self.failUnless(len(data['servers-responding']) == 5, data)
183 self.failUnless(len(data['sharemap']) == 9, data)
184 return self._help_test_verify([
185 common._corrupt_offset_of_block_hashes_to_truncate_crypttext_hashes,
187 test_verify_server_invisible_corruption_offset_of_block_hashtree_to_truncate_crypttext_hashtree_TODO.todo = "Verifier doesn't yet properly detect this kind of corruption."
189 def test_verify_server_invisible_corruption_offset_of_block_hashtree_TODO(self):
190 def judge(checkresults):
191 self.failIf(checkresults.is_healthy(), (checkresults, checkresults.is_healthy(), checkresults.get_data()))
192 data = checkresults.get_data()
193 self.failUnless(data['count-shares-good'] == 9, data)
194 self.failUnless(data['count-shares-needed'] == 3, data)
195 self.failUnless(data['count-shares-expected'] == 10, data)
196 self.failUnless(data['count-good-share-hosts'] == 5, data)
197 self.failUnless(data['count-corrupt-shares'] == 1, (data,))
198 self.failUnless(len(data['list-corrupt-shares']) == 1, data)
199 self.failUnless(len(data['list-corrupt-shares']) == data['count-corrupt-shares'], data)
200 self.failUnless(len(data['list-incompatible-shares']) == data['count-incompatible-shares'], data)
201 self.failUnless(len(data['list-incompatible-shares']) == 0, data)
202 self.failUnless(len(data['servers-responding']) == 5, data)
203 self.failUnless(len(data['sharemap']) == 9, data)
204 return self._help_test_verify([
205 common._corrupt_offset_of_block_hashes,
207 test_verify_server_invisible_corruption_offset_of_block_hashtree_TODO.todo = "Verifier doesn't yet properly detect this kind of corruption."
209 def test_verify_server_invisible_corruption_sharedata_plausible_version(self):
210 def judge(checkresults):
211 self.failIf(checkresults.is_healthy(), (checkresults, checkresults.is_healthy(), checkresults.get_data()))
212 data = checkresults.get_data()
213 self.failUnless(data['count-shares-good'] == 9, data)
214 self.failUnless(data['count-shares-needed'] == 3, data)
215 self.failUnless(data['count-shares-expected'] == 10, data)
216 self.failUnless(data['count-good-share-hosts'] == 5, data)
217 self.failUnless(data['count-corrupt-shares'] == 1, (data,))
218 self.failUnless(len(data['list-corrupt-shares']) == 1, data)
219 self.failUnless(len(data['list-corrupt-shares']) == data['count-corrupt-shares'], data)
220 self.failUnless(len(data['list-incompatible-shares']) == data['count-incompatible-shares'], data)
221 self.failUnless(len(data['list-incompatible-shares']) == 0, data)
222 self.failUnless(len(data['servers-responding']) == 5, data)
223 self.failUnless(len(data['sharemap']) == 9, data)
224 return self._help_test_verify([
225 common._corrupt_sharedata_version_number_to_plausible_version,
228 def test_verify_server_invisible_corruption_offset_of_share_hashtree_TODO(self):
229 def judge(checkresults):
230 self.failIf(checkresults.is_healthy(), (checkresults, checkresults.is_healthy(), checkresults.get_data()))
231 data = checkresults.get_data()
232 self.failUnless(data['count-shares-good'] == 9, data)
233 self.failUnless(data['count-shares-needed'] == 3, data)
234 self.failUnless(data['count-shares-expected'] == 10, data)
235 self.failUnless(data['count-good-share-hosts'] == 5, data)
236 self.failUnless(data['count-corrupt-shares'] == 1, (data,))
237 self.failUnless(len(data['list-corrupt-shares']) == 1, data)
238 self.failUnless(len(data['list-corrupt-shares']) == data['count-corrupt-shares'], data)
239 self.failUnless(len(data['list-incompatible-shares']) == data['count-incompatible-shares'], data)
240 self.failUnless(len(data['list-incompatible-shares']) == 0, data)
241 self.failUnless(len(data['servers-responding']) == 5, data)
242 self.failUnless(len(data['sharemap']) == 9, data)
243 return self._help_test_verify([
244 common._corrupt_offset_of_share_hashes,
246 test_verify_server_invisible_corruption_offset_of_share_hashtree_TODO.todo = "Verifier doesn't yet properly detect this kind of corruption."
248 def test_verify_server_invisible_corruption_offset_of_ciphertext_hashtree_TODO(self):
249 def judge(checkresults):
250 self.failIf(checkresults.is_healthy(), (checkresults, checkresults.is_healthy(), checkresults.get_data()))
251 data = checkresults.get_data()
252 self.failUnless(data['count-shares-good'] == 9, data)
253 self.failUnless(data['count-shares-needed'] == 3, data)
254 self.failUnless(data['count-shares-expected'] == 10, data)
255 self.failUnless(data['count-good-share-hosts'] == 5, data)
256 self.failUnless(data['count-corrupt-shares'] == 1, (data,))
257 self.failUnless(len(data['list-corrupt-shares']) == 1, data)
258 self.failUnless(len(data['list-corrupt-shares']) == data['count-corrupt-shares'], data)
259 self.failUnless(len(data['list-incompatible-shares']) == data['count-incompatible-shares'], data)
260 self.failUnless(len(data['list-incompatible-shares']) == 0, data)
261 self.failUnless(len(data['servers-responding']) == 5, data)
262 self.failUnless(len(data['sharemap']) == 9, data)
263 return self._help_test_verify([
264 common._corrupt_offset_of_ciphertext_hash_tree,
266 test_verify_server_invisible_corruption_offset_of_ciphertext_hashtree_TODO.todo = "Verifier doesn't yet properly detect this kind of corruption."
268 def test_verify_server_invisible_corruption_cryptext_hash_tree_TODO(self):
269 def judge(checkresults):
270 self.failIf(checkresults.is_healthy(), (checkresults, checkresults.is_healthy(), checkresults.get_data()))
271 data = checkresults.get_data()
272 self.failUnless(data['count-shares-good'] == 9, data)
273 self.failUnless(data['count-shares-needed'] == 3, data)
274 self.failUnless(data['count-shares-expected'] == 10, data)
275 self.failUnless(data['count-good-share-hosts'] == 5, data)
276 self.failUnless(data['count-corrupt-shares'] == 1, (data,))
277 self.failUnless(len(data['list-corrupt-shares']) == 1, data)
278 self.failUnless(len(data['list-corrupt-shares']) == data['count-corrupt-shares'], data)
279 self.failUnless(len(data['list-incompatible-shares']) == data['count-incompatible-shares'], data)
280 self.failUnless(len(data['list-incompatible-shares']) == 0, data)
281 self.failUnless(len(data['servers-responding']) == 5, data)
282 self.failUnless(len(data['sharemap']) == 9, data)
283 return self._help_test_verify([
284 common._corrupt_crypttext_hash_tree,
286 test_verify_server_invisible_corruption_cryptext_hash_tree_TODO.todo = "Verifier doesn't yet properly detect this kind of corruption."
288 def test_verify_server_invisible_corruption_block_hash_tree_TODO(self):
289 def judge(checkresults):
290 self.failIf(checkresults.is_healthy(), (checkresults, checkresults.is_healthy(), checkresults.get_data()))
291 data = checkresults.get_data()
292 self.failUnless(data['count-shares-good'] == 9, data)
293 self.failUnless(data['count-shares-needed'] == 3, data)
294 self.failUnless(data['count-shares-expected'] == 10, data)
295 self.failUnless(data['count-good-share-hosts'] == 5, data)
296 self.failUnless(data['count-corrupt-shares'] == 1, (data,))
297 self.failUnless(len(data['list-corrupt-shares']) == 1, data)
298 self.failUnless(len(data['list-corrupt-shares']) == data['count-corrupt-shares'], data)
299 self.failUnless(len(data['list-incompatible-shares']) == data['count-incompatible-shares'], data)
300 self.failUnless(len(data['list-incompatible-shares']) == 0, data)
301 self.failUnless(len(data['servers-responding']) == 5, data)
302 self.failUnless(len(data['sharemap']) == 9, data)
303 return self._help_test_verify([
304 common._corrupt_block_hashes,
306 test_verify_server_invisible_corruption_block_hash_tree_TODO.todo = "Verifier doesn't yet properly detect this kind of corruption."
308 def test_verify_server_invisible_corruption_share_hash_tree_TODO(self):
309 def judge(checkresults):
310 self.failIf(checkresults.is_healthy(), (checkresults, checkresults.is_healthy(), checkresults.get_data()))
311 data = checkresults.get_data()
312 self.failUnless(data['count-shares-good'] == 9, data)
313 self.failUnless(data['count-shares-needed'] == 3, data)
314 self.failUnless(data['count-shares-expected'] == 10, data)
315 self.failUnless(data['count-good-share-hosts'] == 5, data)
316 self.failUnless(data['count-corrupt-shares'] == 1, (data,))
317 self.failUnless(len(data['list-corrupt-shares']) == 1, data)
318 self.failUnless(len(data['list-corrupt-shares']) == data['count-corrupt-shares'], data)
319 self.failUnless(len(data['list-incompatible-shares']) == data['count-incompatible-shares'], data)
320 self.failUnless(len(data['list-incompatible-shares']) == 0, data)
321 self.failUnless(len(data['servers-responding']) == 5, data)
322 self.failUnless(len(data['sharemap']) == 9, data)
323 return self._help_test_verify([
324 common._corrupt_share_hashes,
326 test_verify_server_invisible_corruption_share_hash_tree_TODO.todo = "Verifier doesn't yet properly detect this kind of corruption."
328 WRITE_LEEWAY = 10 # We'll allow you to pass this test even if you trigger ten times as many block sends and disk writes as would be optimal.
329 DELTA_WRITES_PER_SHARE = 1 * WRITE_LEEWAY # Optimally, you could repair one of these (small) files in a single write.
331 class Repairer(common.ShareManglingMixin, unittest.TestCase):
332 def test_test_code(self):
333 # The following process of stashing the shares, running
334 # replace_shares, and asserting that the new set of shares equals the
335 # old is more to test this test code than to test the Tahoe code...
336 d = defer.succeed(None)
337 d.addCallback(self.find_shares)
342 d.addCallback(_stash_it)
343 d.addCallback(self.replace_shares, storage_index=self.uri.storage_index)
347 self.failUnless(isinstance(oldshares, dict), oldshares)
348 self.failUnlessEqual(oldshares, res)
350 d.addCallback(self.find_shares)
351 d.addCallback(_compare)
353 d.addCallback(lambda ignore: self.replace_shares({}, storage_index=self.uri.storage_index))
354 d.addCallback(self.find_shares)
355 d.addCallback(lambda x: self.failUnlessEqual(x, {}))
357 # The following process of deleting 8 of the shares and asserting that you can't
358 # download it is more to test this test code than to test the Tahoe code...
359 def _then_delete_8(unused=None):
360 self.replace_shares(stash[0], storage_index=self.uri.storage_index)
362 self._delete_a_share()
363 d.addCallback(_then_delete_8)
365 def _then_download(unused=None):
366 self.downloader = self.clients[1].getServiceNamed("downloader")
367 d = self.downloader.download_to_data(self.uri)
369 def _after_download_callb(result):
370 self.fail() # should have gotten an errback instead
372 def _after_download_errb(failure):
373 failure.trap(NotEnoughSharesError)
374 return None # success!
375 d.addCallbacks(_after_download_callb, _after_download_errb)
376 d.addCallback(_then_download)
378 # The following process of deleting 8 of the shares and asserting that you can't repair
379 # it is more to test this test code than to test the Tahoe code...
380 def _then_delete_8(unused=None):
381 self.replace_shares(stash[0], storage_index=self.uri.storage_index)
383 self._delete_a_share()
384 d.addCallback(_then_delete_8)
386 def _then_repair(unused=None):
387 d2 = self.filenode.check_and_repair(Monitor(), verify=False)
388 def _after_repair_callb(result):
389 self.fail() # should have gotten an errback instead
391 def _after_repair_errb(f):
392 f.trap(NotEnoughSharesError)
393 return None # success!
394 d2.addCallbacks(_after_repair_callb, _after_repair_errb)
396 d.addCallback(_then_repair)
400 def test_repair_from_deletion_of_1(self):
401 """ Repair replaces a share that got deleted. """
402 d = defer.succeed(None)
403 d.addCallback(self._delete_a_share, sharenum=2)
405 def _repair_from_deletion_of_1(unused):
406 before_repair_reads = self._count_reads()
407 before_repair_allocates = self._count_writes()
409 d2 = self.filenode.check_and_repair(Monitor(), verify=False)
410 def _after_repair(checkandrepairresults):
411 assert isinstance(checkandrepairresults, check_results.CheckAndRepairResults), checkandrepairresults
412 prerepairres = checkandrepairresults.get_pre_repair_results()
413 assert isinstance(prerepairres, check_results.CheckResults), prerepairres
414 postrepairres = checkandrepairresults.get_post_repair_results()
415 assert isinstance(postrepairres, check_results.CheckResults), postrepairres
416 after_repair_reads = self._count_reads()
417 after_repair_allocates = self._count_writes()
419 # print "delta was ", after_repair_reads - before_repair_reads, after_repair_allocates - before_repair_allocates
420 self.failIf(after_repair_reads - before_repair_reads > DELTA_READS)
421 self.failIf(after_repair_allocates - before_repair_allocates > DELTA_WRITES_PER_SHARE, (after_repair_allocates, before_repair_allocates))
422 self.failIf(prerepairres.is_healthy())
423 self.failUnless(postrepairres.is_healthy())
425 # Now we inspect the filesystem to make sure that it has 10 shares.
426 shares = self.find_shares()
427 self.failIf(len(shares) < 10)
429 # Now we delete seven of the other shares, then try to download the file and
430 # assert that it succeeds at downloading and has the right contents. This can't
431 # work unless it has already repaired the previously-deleted share #2.
432 for sharenum in range(3, 10):
433 self._delete_a_share(sharenum=sharenum)
435 return self._download_and_check_plaintext()
437 d2.addCallback(_after_repair)
439 d.addCallback(_repair_from_deletion_of_1)
442 def test_repair_from_deletion_of_7(self):
443 """ Repair replaces seven shares that got deleted. """
444 shares = self.find_shares()
445 self.failIf(len(shares) != 10)
446 d = defer.succeed(None)
448 def _delete_7(unused=None):
450 random.shuffle(shnums)
451 for sharenum in shnums[:7]:
452 self._delete_a_share(sharenum=sharenum)
453 d.addCallback(_delete_7)
455 def _repair_from_deletion_of_7(unused):
456 before_repair_reads = self._count_reads()
457 before_repair_allocates = self._count_writes()
459 d2 = self.filenode.check_and_repair(Monitor(), verify=False)
460 def _after_repair(checkandrepairresults):
461 assert isinstance(checkandrepairresults, check_results.CheckAndRepairResults), checkandrepairresults
462 prerepairres = checkandrepairresults.get_pre_repair_results()
463 assert isinstance(prerepairres, check_results.CheckResults), prerepairres
464 postrepairres = checkandrepairresults.get_post_repair_results()
465 assert isinstance(postrepairres, check_results.CheckResults), postrepairres
466 after_repair_reads = self._count_reads()
467 after_repair_allocates = self._count_writes()
469 # print "delta was ", after_repair_reads - before_repair_reads, after_repair_allocates - before_repair_allocates
470 self.failIf(after_repair_reads - before_repair_reads > DELTA_READS)
471 self.failIf(after_repair_allocates - before_repair_allocates > (DELTA_WRITES_PER_SHARE * 7), (after_repair_allocates, before_repair_allocates))
472 self.failIf(prerepairres.is_healthy())
473 self.failUnless(postrepairres.is_healthy(), postrepairres.data)
475 # Now we inspect the filesystem to make sure that it has 10 shares.
476 shares = self.find_shares()
477 self.failIf(len(shares) < 10)
479 # Now we delete seven random shares, then try to download the file and
480 # assert that it succeeds at downloading and has the right contents.
482 self._delete_a_share()
484 return self._download_and_check_plaintext()
486 d2.addCallback(_after_repair)
488 d.addCallback(_repair_from_deletion_of_7)
491 def test_repair_from_corruption_of_1(self):
492 d = defer.succeed(None)
494 def _repair_from_corruption(unused, corruptor_func):
495 before_repair_reads = self._count_reads()
496 before_repair_allocates = self._count_writes()
498 d2 = self.filenode.check_and_repair(Monitor(), verify=True)
499 def _after_repair(checkandrepairresults):
500 prerepairres = checkandrepairresults.get_pre_repair_results()
501 postrepairres = checkandrepairresults.get_post_repair_results()
502 after_repair_reads = self._count_reads()
503 after_repair_allocates = self._count_writes()
505 # The "* 2" in reads is because you might read a whole share before figuring out that it is corrupted. It might be possible to make this delta reads number a little tighter.
506 self.failIf(after_repair_reads - before_repair_reads > (DELTA_READS * 2), (after_repair_reads, before_repair_reads))
507 # The "* 2" in writes is because each server has two shares, and it is reasonable for repairer to conclude that there are two shares that it should upload, if the server fails to serve the first share.
508 self.failIf(after_repair_allocates - before_repair_allocates > (DELTA_WRITES_PER_SHARE * 2), (after_repair_allocates, before_repair_allocates))
509 self.failIf(prerepairres.is_healthy(), (prerepairres.data, corruptor_func))
510 self.failUnless(postrepairres.is_healthy(), (postrepairres.data, corruptor_func))
512 return self._download_and_check_plaintext()
514 d2.addCallback(_after_repair)
517 for corruptor_func in (
518 common._corrupt_file_version_number,
519 common._corrupt_sharedata_version_number,
520 common._corrupt_offset_of_sharedata,
521 common._corrupt_offset_of_uri_extension,
522 common._corrupt_offset_of_uri_extension_to_force_short_read,
523 common._corrupt_share_data,
524 common._corrupt_length_of_uri_extension,
525 common._corrupt_uri_extension,
527 # Now we corrupt a share...
528 d.addCallback(self._corrupt_a_random_share, corruptor_func)
530 d.addCallback(_repair_from_corruption, corruptor_func)
533 test_repair_from_corruption_of_1.todo = "Repairer doesn't properly replace corrupted shares yet."
536 # XXX extend these tests to show that the checker detects which specific share on which specific server is broken -- this is necessary so that the checker results can be passed to the repairer and the repairer can go ahead and upload fixes without first doing what is effectively a check (/verify) run
538 # XXX extend these tests to show bad behavior of various kinds from servers: raising exception from each remove_foo() method, for example
540 # XXX test disconnect DeadReferenceError from get_buckets and get_block_whatsit
542 # XXX test corruption that truncates other hash trees than just the crypttext hash tree
544 # XXX test the notify-someone-about-corruption feature (also implement that feature)
546 # XXX test whether repairer (downloader) correctly downloads a file even if to do so it has to acquire shares from a server that has already tried to serve it a corrupted share. (I don't think the current downloader would pass this test, depending on the kind of corruption.)