1 from allmydata.test import common
2 from allmydata.monitor import Monitor
3 from allmydata import check_results
4 from allmydata.interfaces import IURI, NotEnoughSharesError
5 from allmydata.immutable import upload
6 from allmydata.util import hashutil, log
7 from twisted.internet import defer
8 from twisted.trial import unittest
10 import common_util as testutil
12 READ_LEEWAY = 18 # We'll allow you to pass this test even if you trigger eighteen times as many disk reads and block fetches as would be optimal.
13 DELTA_READS = 10 * READ_LEEWAY # N = 10
15 class Verifier(common.ShareManglingMixin, unittest.TestCase):
16 def test_check_without_verify(self):
17 """ Check says the file is healthy when none of the shares have been touched. It says
18 that the file is unhealthy when all of them have been removed. It doesn't use any reads.
20 d = defer.succeed(self.filenode)
21 def _check1(filenode):
22 before_check_reads = self._count_reads()
24 d2 = filenode.check(Monitor(), verify=False)
25 def _after_check(checkresults):
26 after_check_reads = self._count_reads()
27 self.failIf(after_check_reads - before_check_reads > 0, after_check_reads - before_check_reads)
28 self.failUnless(checkresults.is_healthy())
30 d2.addCallback(_after_check)
32 d.addCallback(_check1)
34 d.addCallback(lambda ignore: self.replace_shares({}, storage_index=self.uri.storage_index))
36 before_check_reads = self._count_reads()
37 d2 = self.filenode.check(Monitor(), verify=False)
39 def _after_check(checkresults):
40 after_check_reads = self._count_reads()
41 self.failIf(after_check_reads - before_check_reads > 0, after_check_reads - before_check_reads)
42 self.failIf(checkresults.is_healthy())
44 d2.addCallback(_after_check)
46 d.addCallback(_check2)
50 def _help_test_verify(self, corruptor_funcs, judgement_func):
51 d = defer.succeed(None)
53 d.addCallback(self.find_shares)
58 d.addCallback(_stash_it)
59 def _put_it_all_back(ignored):
60 self.replace_shares(stash[0], storage_index=self.uri.storage_index)
63 def _verify_after_corruption(corruptor_func):
64 before_check_reads = self._count_reads()
65 d2 = self.filenode.check(Monitor(), verify=True)
66 def _after_check(checkresults):
67 after_check_reads = self._count_reads()
68 self.failIf(after_check_reads - before_check_reads > DELTA_READS, (after_check_reads, before_check_reads))
70 return judgement_func(checkresults)
72 le.args = tuple(le.args + ("corruptor_func: " + corruptor_func.__name__,))
75 d2.addCallback(_after_check)
78 for corruptor_func in corruptor_funcs:
79 d.addCallback(self._corrupt_a_random_share, corruptor_func)
80 d.addCallback(_verify_after_corruption)
81 d.addCallback(_put_it_all_back)
85 def test_verify_no_problem(self):
86 """ Verify says the file is healthy when none of the shares have been touched in a way
87 that matters. It doesn't use more than seven times as many reads as it needs."""
88 def judge(checkresults):
89 self.failUnless(checkresults.is_healthy(), (checkresults, checkresults.is_healthy(), checkresults.get_data()))
90 data = checkresults.get_data()
91 self.failUnless(data['count-shares-good'] == 10, data)
92 self.failUnless(len(data['sharemap']) == 10, data)
93 self.failUnless(data['count-shares-needed'] == 3, data)
94 self.failUnless(data['count-shares-expected'] == 10, data)
95 self.failUnless(data['count-good-share-hosts'] == 5, data)
96 self.failUnless(len(data['servers-responding']) == 5, data)
97 self.failUnless(len(data['list-corrupt-shares']) == 0, data)
98 return self._help_test_verify([
99 common._corrupt_nothing,
100 common._corrupt_size_of_file_data,
101 common._corrupt_size_of_sharedata,
102 common._corrupt_segment_size, ], judge)
104 def test_verify_server_visible_corruption(self):
105 """ Corruption which is detected by the server means that the server will send you back
106 a Failure in response to get_bucket instead of giving you the share data. Test that
107 verifier handles these answers correctly. It doesn't use more than seven times as many
108 reads as it needs."""
109 def judge(checkresults):
110 self.failIf(checkresults.is_healthy(), (checkresults, checkresults.is_healthy(), checkresults.get_data()))
111 data = checkresults.get_data()
112 # The server might fail to serve up its other share as well as the corrupted
113 # one, so count-shares-good could be 8 or 9.
114 self.failUnless(data['count-shares-good'] in (8, 9), data)
115 self.failUnless(len(data['sharemap']) in (8, 9,), data)
116 self.failUnless(data['count-shares-needed'] == 3, data)
117 self.failUnless(data['count-shares-expected'] == 10, data)
118 # The server may have served up the non-corrupted share, or it may not have, so
119 # the checker could have detected either 4 or 5 good servers.
120 self.failUnless(data['count-good-share-hosts'] in (4, 5), data)
121 self.failUnless(len(data['servers-responding']) in (4, 5), data)
122 # If the server served up the other share, then the checker should consider it good, else it should
124 self.failUnless((data['count-shares-good'] == 9) == (data['count-good-share-hosts'] == 5), data)
125 self.failUnless(len(data['list-corrupt-shares']) == 0, data)
126 return self._help_test_verify([
127 common._corrupt_file_version_number,
130 def test_verify_share_incompatibility(self):
131 def judge(checkresults):
132 self.failIf(checkresults.is_healthy(), (checkresults, checkresults.is_healthy(), checkresults.get_data()))
133 data = checkresults.get_data()
134 self.failUnless(data['count-shares-good'] == 9, data)
135 self.failUnless(len(data['sharemap']) == 9, data)
136 self.failUnless(data['count-shares-needed'] == 3, data)
137 self.failUnless(data['count-shares-expected'] == 10, data)
138 self.failUnless(data['count-good-share-hosts'] == 5, data)
139 self.failUnless(len(data['servers-responding']) == 5, data)
140 self.failUnless(len(data['list-corrupt-shares']) == 1, data)
141 self.failUnless(len(data['list-corrupt-shares']) == data['count-corrupt-shares'], data)
142 self.failUnless(len(data['list-incompatible-shares']) == data['count-incompatible-shares'], data)
143 self.failUnless(len(data['list-incompatible-shares']) == 0, data)
144 return self._help_test_verify([
145 common._corrupt_sharedata_version_number,
148 def test_verify_server_invisible_corruption(self):
149 def judge(checkresults):
150 self.failIf(checkresults.is_healthy(), (checkresults, checkresults.is_healthy(), checkresults.get_data()))
151 data = checkresults.get_data()
152 self.failUnless(data['count-shares-good'] == 9, data)
153 self.failUnless(data['count-shares-needed'] == 3, data)
154 self.failUnless(data['count-shares-expected'] == 10, data)
155 self.failUnless(data['count-good-share-hosts'] == 5, data)
156 self.failUnless(data['count-corrupt-shares'] == 1, (data,))
157 self.failUnless(len(data['list-corrupt-shares']) == 1, data)
158 self.failUnless(len(data['list-corrupt-shares']) == data['count-corrupt-shares'], data)
159 self.failUnless(len(data['list-incompatible-shares']) == data['count-incompatible-shares'], data)
160 self.failUnless(len(data['list-incompatible-shares']) == 0, data)
161 self.failUnless(len(data['servers-responding']) == 5, data)
162 self.failUnless(len(data['sharemap']) == 9, data)
163 return self._help_test_verify([
164 common._corrupt_offset_of_sharedata,
165 common._corrupt_offset_of_uri_extension,
166 common._corrupt_offset_of_uri_extension_to_force_short_read,
167 common._corrupt_share_data,
168 common._corrupt_length_of_uri_extension,
169 common._corrupt_uri_extension,
172 def test_verify_server_invisible_corruption_offset_of_block_hashtree_to_truncate_crypttext_hashtree_TODO(self):
173 def judge(checkresults):
174 self.failIf(checkresults.is_healthy(), (checkresults, checkresults.is_healthy(), checkresults.get_data()))
175 data = checkresults.get_data()
176 self.failUnless(data['count-shares-good'] == 9, data)
177 self.failUnless(data['count-shares-needed'] == 3, data)
178 self.failUnless(data['count-shares-expected'] == 10, data)
179 self.failUnless(data['count-good-share-hosts'] == 5, data)
180 self.failUnless(data['count-corrupt-shares'] == 1, (data,))
181 self.failUnless(len(data['list-corrupt-shares']) == 1, data)
182 self.failUnless(len(data['list-corrupt-shares']) == data['count-corrupt-shares'], data)
183 self.failUnless(len(data['list-incompatible-shares']) == data['count-incompatible-shares'], data)
184 self.failUnless(len(data['list-incompatible-shares']) == 0, data)
185 self.failUnless(len(data['servers-responding']) == 5, data)
186 self.failUnless(len(data['sharemap']) == 9, data)
187 return self._help_test_verify([
188 common._corrupt_offset_of_block_hashes_to_truncate_crypttext_hashes,
190 test_verify_server_invisible_corruption_offset_of_block_hashtree_to_truncate_crypttext_hashtree_TODO.todo = "Verifier doesn't yet properly detect this kind of corruption."
192 def test_verify_server_invisible_corruption_offset_of_block_hashtree_TODO(self):
193 def judge(checkresults):
194 self.failIf(checkresults.is_healthy(), (checkresults, checkresults.is_healthy(), checkresults.get_data()))
195 data = checkresults.get_data()
196 self.failUnless(data['count-shares-good'] == 9, data)
197 self.failUnless(data['count-shares-needed'] == 3, data)
198 self.failUnless(data['count-shares-expected'] == 10, data)
199 self.failUnless(data['count-good-share-hosts'] == 5, data)
200 self.failUnless(data['count-corrupt-shares'] == 1, (data,))
201 self.failUnless(len(data['list-corrupt-shares']) == 1, data)
202 self.failUnless(len(data['list-corrupt-shares']) == data['count-corrupt-shares'], data)
203 self.failUnless(len(data['list-incompatible-shares']) == data['count-incompatible-shares'], data)
204 self.failUnless(len(data['list-incompatible-shares']) == 0, data)
205 self.failUnless(len(data['servers-responding']) == 5, data)
206 self.failUnless(len(data['sharemap']) == 9, data)
207 return self._help_test_verify([
208 common._corrupt_offset_of_block_hashes,
210 test_verify_server_invisible_corruption_offset_of_block_hashtree_TODO.todo = "Verifier doesn't yet properly detect this kind of corruption."
212 def test_verify_server_invisible_corruption_sharedata_plausible_version(self):
213 def judge(checkresults):
214 self.failIf(checkresults.is_healthy(), (checkresults, checkresults.is_healthy(), checkresults.get_data()))
215 data = checkresults.get_data()
216 self.failUnless(data['count-shares-good'] == 9, data)
217 self.failUnless(data['count-shares-needed'] == 3, data)
218 self.failUnless(data['count-shares-expected'] == 10, data)
219 self.failUnless(data['count-good-share-hosts'] == 5, data)
220 self.failUnless(data['count-corrupt-shares'] == 1, (data,))
221 self.failUnless(len(data['list-corrupt-shares']) == 1, data)
222 self.failUnless(len(data['list-corrupt-shares']) == data['count-corrupt-shares'], data)
223 self.failUnless(len(data['list-incompatible-shares']) == data['count-incompatible-shares'], data)
224 self.failUnless(len(data['list-incompatible-shares']) == 0, data)
225 self.failUnless(len(data['servers-responding']) == 5, data)
226 self.failUnless(len(data['sharemap']) == 9, data)
227 return self._help_test_verify([
228 common._corrupt_sharedata_version_number_to_plausible_version,
231 def test_verify_server_invisible_corruption_offset_of_share_hashtree_TODO(self):
232 def judge(checkresults):
233 self.failIf(checkresults.is_healthy(), (checkresults, checkresults.is_healthy(), checkresults.get_data()))
234 data = checkresults.get_data()
235 self.failUnless(data['count-shares-good'] == 9, data)
236 self.failUnless(data['count-shares-needed'] == 3, data)
237 self.failUnless(data['count-shares-expected'] == 10, data)
238 self.failUnless(data['count-good-share-hosts'] == 5, data)
239 self.failUnless(data['count-corrupt-shares'] == 1, (data,))
240 self.failUnless(len(data['list-corrupt-shares']) == 1, data)
241 self.failUnless(len(data['list-corrupt-shares']) == data['count-corrupt-shares'], data)
242 self.failUnless(len(data['list-incompatible-shares']) == data['count-incompatible-shares'], data)
243 self.failUnless(len(data['list-incompatible-shares']) == 0, data)
244 self.failUnless(len(data['servers-responding']) == 5, data)
245 self.failUnless(len(data['sharemap']) == 9, data)
246 return self._help_test_verify([
247 common._corrupt_offset_of_share_hashes,
249 test_verify_server_invisible_corruption_offset_of_share_hashtree_TODO.todo = "Verifier doesn't yet properly detect this kind of corruption."
251 def test_verify_server_invisible_corruption_offset_of_ciphertext_hashtree_TODO(self):
252 def judge(checkresults):
253 self.failIf(checkresults.is_healthy(), (checkresults, checkresults.is_healthy(), checkresults.get_data()))
254 data = checkresults.get_data()
255 self.failUnless(data['count-shares-good'] == 9, data)
256 self.failUnless(data['count-shares-needed'] == 3, data)
257 self.failUnless(data['count-shares-expected'] == 10, data)
258 self.failUnless(data['count-good-share-hosts'] == 5, data)
259 self.failUnless(data['count-corrupt-shares'] == 1, (data,))
260 self.failUnless(len(data['list-corrupt-shares']) == 1, data)
261 self.failUnless(len(data['list-corrupt-shares']) == data['count-corrupt-shares'], data)
262 self.failUnless(len(data['list-incompatible-shares']) == data['count-incompatible-shares'], data)
263 self.failUnless(len(data['list-incompatible-shares']) == 0, data)
264 self.failUnless(len(data['servers-responding']) == 5, data)
265 self.failUnless(len(data['sharemap']) == 9, data)
266 return self._help_test_verify([
267 common._corrupt_offset_of_ciphertext_hash_tree,
269 test_verify_server_invisible_corruption_offset_of_ciphertext_hashtree_TODO.todo = "Verifier doesn't yet properly detect this kind of corruption."
271 def test_verify_server_invisible_corruption_cryptext_hash_tree_TODO(self):
272 def judge(checkresults):
273 self.failIf(checkresults.is_healthy(), (checkresults, checkresults.is_healthy(), checkresults.get_data()))
274 data = checkresults.get_data()
275 self.failUnless(data['count-shares-good'] == 9, data)
276 self.failUnless(data['count-shares-needed'] == 3, data)
277 self.failUnless(data['count-shares-expected'] == 10, data)
278 self.failUnless(data['count-good-share-hosts'] == 5, data)
279 self.failUnless(data['count-corrupt-shares'] == 1, (data,))
280 self.failUnless(len(data['list-corrupt-shares']) == 1, data)
281 self.failUnless(len(data['list-corrupt-shares']) == data['count-corrupt-shares'], data)
282 self.failUnless(len(data['list-incompatible-shares']) == data['count-incompatible-shares'], data)
283 self.failUnless(len(data['list-incompatible-shares']) == 0, data)
284 self.failUnless(len(data['servers-responding']) == 5, data)
285 self.failUnless(len(data['sharemap']) == 9, data)
286 return self._help_test_verify([
287 common._corrupt_crypttext_hash_tree,
289 test_verify_server_invisible_corruption_cryptext_hash_tree_TODO.todo = "Verifier doesn't yet properly detect this kind of corruption."
291 def test_verify_server_invisible_corruption_block_hash_tree_TODO(self):
292 def judge(checkresults):
293 self.failIf(checkresults.is_healthy(), (checkresults, checkresults.is_healthy(), checkresults.get_data()))
294 data = checkresults.get_data()
295 self.failUnless(data['count-shares-good'] == 9, data)
296 self.failUnless(data['count-shares-needed'] == 3, data)
297 self.failUnless(data['count-shares-expected'] == 10, data)
298 self.failUnless(data['count-good-share-hosts'] == 5, data)
299 self.failUnless(data['count-corrupt-shares'] == 1, (data,))
300 self.failUnless(len(data['list-corrupt-shares']) == 1, data)
301 self.failUnless(len(data['list-corrupt-shares']) == data['count-corrupt-shares'], data)
302 self.failUnless(len(data['list-incompatible-shares']) == data['count-incompatible-shares'], data)
303 self.failUnless(len(data['list-incompatible-shares']) == 0, data)
304 self.failUnless(len(data['servers-responding']) == 5, data)
305 self.failUnless(len(data['sharemap']) == 9, data)
306 return self._help_test_verify([
307 common._corrupt_block_hashes,
309 test_verify_server_invisible_corruption_block_hash_tree_TODO.todo = "Verifier doesn't yet properly detect this kind of corruption."
311 def test_verify_server_invisible_corruption_share_hash_tree_TODO(self):
312 def judge(checkresults):
313 self.failIf(checkresults.is_healthy(), (checkresults, checkresults.is_healthy(), checkresults.get_data()))
314 data = checkresults.get_data()
315 self.failUnless(data['count-shares-good'] == 9, data)
316 self.failUnless(data['count-shares-needed'] == 3, data)
317 self.failUnless(data['count-shares-expected'] == 10, data)
318 self.failUnless(data['count-good-share-hosts'] == 5, data)
319 self.failUnless(data['count-corrupt-shares'] == 1, (data,))
320 self.failUnless(len(data['list-corrupt-shares']) == 1, data)
321 self.failUnless(len(data['list-corrupt-shares']) == data['count-corrupt-shares'], data)
322 self.failUnless(len(data['list-incompatible-shares']) == data['count-incompatible-shares'], data)
323 self.failUnless(len(data['list-incompatible-shares']) == 0, data)
324 self.failUnless(len(data['servers-responding']) == 5, data)
325 self.failUnless(len(data['sharemap']) == 9, data)
326 return self._help_test_verify([
327 common._corrupt_share_hashes,
329 test_verify_server_invisible_corruption_share_hash_tree_TODO.todo = "Verifier doesn't yet properly detect this kind of corruption."
331 WRITE_LEEWAY = 10 # We'll allow you to pass this test even if you trigger ten times as many block sends and disk writes as would be optimal.
332 DELTA_WRITES_PER_SHARE = 1 * WRITE_LEEWAY # Optimally, you could repair one of these (small) files in a single write.
334 class Repairer(common.ShareManglingMixin, unittest.TestCase):
335 def test_test_code(self):
336 # The following process of stashing the shares, running
337 # replace_shares, and asserting that the new set of shares equals the
338 # old is more to test this test code than to test the Tahoe code...
339 d = defer.succeed(None)
340 d.addCallback(self.find_shares)
345 d.addCallback(_stash_it)
346 d.addCallback(self.replace_shares, storage_index=self.uri.storage_index)
350 self.failUnless(isinstance(oldshares, dict), oldshares)
351 self.failUnlessEqual(oldshares, res)
353 d.addCallback(self.find_shares)
354 d.addCallback(_compare)
356 d.addCallback(lambda ignore: self.replace_shares({}, storage_index=self.uri.storage_index))
357 d.addCallback(self.find_shares)
358 d.addCallback(lambda x: self.failUnlessEqual(x, {}))
360 # The following process of deleting 8 of the shares and asserting that you can't
361 # download it is more to test this test code than to test the Tahoe code...
362 def _then_delete_8(unused=None):
363 self.replace_shares(stash[0], storage_index=self.uri.storage_index)
365 self._delete_a_share()
366 d.addCallback(_then_delete_8)
368 def _then_download(unused=None):
369 self.downloader = self.clients[1].getServiceNamed("downloader")
370 d = self.downloader.download_to_data(self.uri)
372 def _after_download_callb(result):
373 self.fail() # should have gotten an errback instead
375 def _after_download_errb(failure):
376 failure.trap(NotEnoughSharesError)
377 return None # success!
378 d.addCallbacks(_after_download_callb, _after_download_errb)
379 d.addCallback(_then_download)
381 # The following process of deleting 8 of the shares and asserting that you can't repair
382 # it is more to test this test code than to test the Tahoe code...
383 def _then_delete_8(unused=None):
384 self.replace_shares(stash[0], storage_index=self.uri.storage_index)
386 self._delete_a_share()
387 d.addCallback(_then_delete_8)
389 def _then_repair(unused=None):
390 d2 = self.filenode.check_and_repair(Monitor(), verify=False)
391 def _after_repair_callb(result):
392 self.fail() # should have gotten an errback instead
394 def _after_repair_errb(f):
395 f.trap(NotEnoughSharesError)
396 return None # success!
397 d2.addCallbacks(_after_repair_callb, _after_repair_errb)
399 d.addCallback(_then_repair)
403 def test_repair_from_deletion_of_1(self):
404 """ Repair replaces a share that got deleted. """
405 d = defer.succeed(None)
406 d.addCallback(self._delete_a_share, sharenum=2)
408 def _repair_from_deletion_of_1(unused):
409 before_repair_reads = self._count_reads()
410 before_repair_allocates = self._count_writes()
412 d2 = self.filenode.check_and_repair(Monitor(), verify=False)
413 def _after_repair(checkandrepairresults):
414 assert isinstance(checkandrepairresults, check_results.CheckAndRepairResults), checkandrepairresults
415 prerepairres = checkandrepairresults.get_pre_repair_results()
416 assert isinstance(prerepairres, check_results.CheckResults), prerepairres
417 postrepairres = checkandrepairresults.get_post_repair_results()
418 assert isinstance(postrepairres, check_results.CheckResults), postrepairres
419 after_repair_reads = self._count_reads()
420 after_repair_allocates = self._count_writes()
422 # print "delta was ", after_repair_reads - before_repair_reads, after_repair_allocates - before_repair_allocates
423 self.failIf(after_repair_reads - before_repair_reads > DELTA_READS)
424 self.failIf(after_repair_allocates - before_repair_allocates > DELTA_WRITES_PER_SHARE, (after_repair_allocates, before_repair_allocates))
425 self.failIf(prerepairres.is_healthy())
426 self.failUnless(postrepairres.is_healthy())
428 # Now we inspect the filesystem to make sure that it has 10 shares.
429 shares = self.find_shares()
430 self.failIf(len(shares) < 10)
432 # Now we delete seven of the other shares, then try to download the file and
433 # assert that it succeeds at downloading and has the right contents. This can't
434 # work unless it has already repaired the previously-deleted share #2.
435 for sharenum in range(3, 10):
436 self._delete_a_share(sharenum=sharenum)
438 return self._download_and_check_plaintext()
440 d2.addCallback(_after_repair)
442 d.addCallback(_repair_from_deletion_of_1)
445 def test_repair_from_deletion_of_7(self):
446 """ Repair replaces seven shares that got deleted. """
447 shares = self.find_shares()
448 self.failIf(len(shares) != 10)
449 d = defer.succeed(None)
451 def _delete_7(unused=None):
453 random.shuffle(shnums)
454 for sharenum in shnums[:7]:
455 self._delete_a_share(sharenum=sharenum)
456 d.addCallback(_delete_7)
458 def _repair_from_deletion_of_7(unused):
459 before_repair_reads = self._count_reads()
460 before_repair_allocates = self._count_writes()
462 d2 = self.filenode.check_and_repair(Monitor(), verify=False)
463 def _after_repair(checkandrepairresults):
464 assert isinstance(checkandrepairresults, check_results.CheckAndRepairResults), checkandrepairresults
465 prerepairres = checkandrepairresults.get_pre_repair_results()
466 assert isinstance(prerepairres, check_results.CheckResults), prerepairres
467 postrepairres = checkandrepairresults.get_post_repair_results()
468 assert isinstance(postrepairres, check_results.CheckResults), postrepairres
469 after_repair_reads = self._count_reads()
470 after_repair_allocates = self._count_writes()
472 # print "delta was ", after_repair_reads - before_repair_reads, after_repair_allocates - before_repair_allocates
473 self.failIf(after_repair_reads - before_repair_reads > DELTA_READS)
474 self.failIf(after_repair_allocates - before_repair_allocates > (DELTA_WRITES_PER_SHARE * 7), (after_repair_allocates, before_repair_allocates))
475 self.failIf(prerepairres.is_healthy())
476 self.failUnless(postrepairres.is_healthy(), postrepairres.data)
478 # Now we inspect the filesystem to make sure that it has 10 shares.
479 shares = self.find_shares()
480 self.failIf(len(shares) < 10)
482 # Now we delete seven random shares, then try to download the file and
483 # assert that it succeeds at downloading and has the right contents.
485 self._delete_a_share()
487 return self._download_and_check_plaintext()
489 d2.addCallback(_after_repair)
491 d.addCallback(_repair_from_deletion_of_7)
494 def test_repair_from_corruption_of_1(self):
495 d = defer.succeed(None)
497 def _repair_from_corruption(unused, corruptor_func):
498 before_repair_reads = self._count_reads()
499 before_repair_allocates = self._count_writes()
501 d2 = self.filenode.check_and_repair(Monitor(), verify=True)
502 def _after_repair(checkandrepairresults):
503 prerepairres = checkandrepairresults.get_pre_repair_results()
504 postrepairres = checkandrepairresults.get_post_repair_results()
505 after_repair_reads = self._count_reads()
506 after_repair_allocates = self._count_writes()
508 # The "* 2" in reads is because you might read a whole share before figuring out that it is corrupted. It might be possible to make this delta reads number a little tighter.
509 self.failIf(after_repair_reads - before_repair_reads > (DELTA_READS * 2), (after_repair_reads, before_repair_reads))
510 # The "* 2" in writes is because each server has two shares, and it is reasonable for repairer to conclude that there are two shares that it should upload, if the server fails to serve the first share.
511 self.failIf(after_repair_allocates - before_repair_allocates > (DELTA_WRITES_PER_SHARE * 2), (after_repair_allocates, before_repair_allocates))
512 self.failIf(prerepairres.is_healthy(), (prerepairres.data, corruptor_func))
513 self.failUnless(postrepairres.is_healthy(), (postrepairres.data, corruptor_func))
515 return self._download_and_check_plaintext()
517 d2.addCallback(_after_repair)
520 for corruptor_func in (
521 common._corrupt_file_version_number,
522 common._corrupt_sharedata_version_number,
523 common._corrupt_offset_of_sharedata,
524 common._corrupt_offset_of_uri_extension,
525 common._corrupt_offset_of_uri_extension_to_force_short_read,
526 common._corrupt_share_data,
527 common._corrupt_length_of_uri_extension,
528 common._corrupt_uri_extension,
530 # Now we corrupt a share...
531 d.addCallback(self._corrupt_a_random_share, corruptor_func)
533 d.addCallback(_repair_from_corruption, corruptor_func)
536 test_repair_from_corruption_of_1.todo = "Repairer doesn't properly replace corrupted shares yet."
539 # XXX extend these tests to show that the checker detects which specific share on which specific server is broken -- this is necessary so that the checker results can be passed to the repairer and the repairer can go ahead and upload fixes without first doing what is effectively a check (/verify) run
541 # XXX extend these tests to show bad behavior of various kinds from servers: raising exception from each remove_foo() method, for example
543 # XXX test disconnect DeadReferenceError from get_buckets and get_block_whatsit
545 # XXX test corruption that truncates other hash trees than just the crypttext hash tree
547 # XXX test the notify-someone-about-corruption feature (also implement that feature)
549 # XXX test whether repairer (downloader) correctly downloads a file even if to do so it has to acquire shares from a server that has already tried to serve it a corrupted share. (I don't think the current downloader would pass this test, depending on the kind of corruption.)