]> git.rkrishnan.org Git - tahoe-lafs/tahoe-lafs.git/blob - src/allmydata/test/test_immutable.py
edac3e699bdf949f6e03865056ec6dd9146ddde0
[tahoe-lafs/tahoe-lafs.git] / src / allmydata / test / test_immutable.py
1 from allmydata.test import common
2 from allmydata.interfaces import NotEnoughSharesError
3 from allmydata.util.consumer import download_to_data
4 from allmydata import uri
5 from twisted.internet import defer
6 from twisted.trial import unittest
7 import random
8
9 from foolscap.api import eventually
10 from allmydata.util import log
11
12 from allmydata.immutable.downloader import finder
13
14 import mock
15
16 class MockNode(object):
17     def __init__(self, check_reneging, check_fetch_failed):
18         self.got = 0
19         self.finished_d = defer.Deferred()
20         self.segment_size = 78
21         self.guessed_segment_size = 78
22         self._no_more_shares = False
23         self.check_reneging = check_reneging
24         self.check_fetch_failed = check_fetch_failed
25         self._si_prefix='aa'
26         self.have_UEB = True
27         self.share_hash_tree = mock.Mock()
28         self.share_hash_tree.needed_hashes.return_value = False
29         self.on_want_more_shares = None
30
31     def when_finished(self):
32         return self.finished_d
33     def get_num_segments(self):
34         return (5, True)
35     def _calculate_sizes(self, guessed_segment_size):
36         return {'block_size': 4, 'num_segments': 5}
37     def no_more_shares(self):
38         self._no_more_shares = True
39     def got_shares(self, shares):
40         if self.check_reneging:
41             if self._no_more_shares:
42                 self.finished_d.errback(unittest.FailTest("The node was told by the share finder that it is destined to remain hungry, then was given another share."))
43                 return
44         self.got += len(shares)
45         log.msg("yyy 3 %s.got_shares(%s) got: %s" % (self, shares, self.got))
46         if self.got == 3:
47             self.finished_d.callback(True)
48     def get_desired_ciphertext_hashes(self, *args, **kwargs):
49         return iter([])
50     def fetch_failed(self, *args, **kwargs):
51         if self.check_fetch_failed:
52             if self.finished_d:
53                 self.finished_d.errback(unittest.FailTest("The node was told by the segment fetcher that the download failed."))
54                 self.finished_d = None
55     def want_more_shares(self):
56         if self.on_want_more_shares:
57             self.on_want_more_shares()
58     def process_blocks(self, *args, **kwargs):
59         if self.finished_d:
60             self.finished_d.callback(None)
61
62 class TestShareFinder(unittest.TestCase):
63     def test_no_reneging_on_no_more_shares_ever(self):
64         # ticket #1191
65
66         # Suppose that K=3 and you send two DYHB requests, the first
67         # response offers two shares, and then the last offers one
68         # share. If you tell your share consumer "no more shares,
69         # ever", and then immediately tell them "oh, and here's
70         # another share", then you lose.
71
72         rcap = uri.CHKFileURI('a'*32, 'a'*32, 3, 99, 100)
73         vcap = rcap.get_verify_cap()
74
75         class MockServer(object):
76             def __init__(self, buckets):
77                 self.version = {
78                     'http://allmydata.org/tahoe/protocols/storage/v1': {
79                         "tolerates-immutable-read-overrun": True
80                         }
81                     }
82                 self.buckets = buckets
83                 self.d = defer.Deferred()
84                 self.s = None
85             def callRemote(self, methname, *args, **kwargs):
86                 d = defer.Deferred()
87
88                 # Even after the 3rd answer we're still hungry because
89                 # we're interested in finding a share on a 3rd server
90                 # so we don't have to download more than one share
91                 # from the first server. This is actually necessary to
92                 # trigger the bug.
93                 def _give_buckets_and_hunger_again():
94                     d.callback(self.buckets)
95                     self.s.hungry()
96                 eventually(_give_buckets_and_hunger_again)
97                 return d
98
99         mockserver1 = MockServer({1: mock.Mock(), 2: mock.Mock()})
100         mockserver2 = MockServer({})
101         mockserver3 = MockServer({3: mock.Mock()})
102         mockstoragebroker = mock.Mock()
103         mockstoragebroker.get_servers_for_index.return_value = [ ('ms1', mockserver1), ('ms2', mockserver2), ('ms3', mockserver3), ]
104         mockdownloadstatus = mock.Mock()
105         mocknode = MockNode(check_reneging=True, check_fetch_failed=True)
106
107         s = finder.ShareFinder(mockstoragebroker, vcap, mocknode, mockdownloadstatus)
108
109         mockserver1.s = s
110         mockserver2.s = s
111         mockserver3.s = s
112
113         s.hungry()
114
115         return mocknode.when_finished()
116
117 class Test(common.ShareManglingMixin, common.ShouldFailMixin, unittest.TestCase):
118     def test_test_code(self):
119         # The following process of stashing the shares, running
120         # replace_shares, and asserting that the new set of shares equals the
121         # old is more to test this test code than to test the Tahoe code...
122         d = defer.succeed(None)
123         d.addCallback(self.find_all_shares)
124         stash = [None]
125         def _stash_it(res):
126             stash[0] = res
127             return res
128         d.addCallback(_stash_it)
129
130         # The following process of deleting 8 of the shares and asserting
131         # that you can't download it is more to test this test code than to
132         # test the Tahoe code...
133         def _then_delete_8(unused=None):
134             self.replace_shares(stash[0], storage_index=self.uri.get_storage_index())
135             for i in range(8):
136                 self._delete_a_share()
137         d.addCallback(_then_delete_8)
138
139         def _then_download(unused=None):
140             d2 = download_to_data(self.n)
141
142             def _after_download_callb(result):
143                 self.fail() # should have gotten an errback instead
144                 return result
145             def _after_download_errb(failure):
146                 failure.trap(NotEnoughSharesError)
147                 return None # success!
148             d2.addCallbacks(_after_download_callb, _after_download_errb)
149             return d2
150         d.addCallback(_then_download)
151
152         return d
153
154     def test_download(self):
155         """ Basic download. (This functionality is more or less already
156         tested by test code in other modules, but this module is also going
157         to test some more specific things about immutable download.)
158         """
159         d = defer.succeed(None)
160         before_download_reads = self._count_reads()
161         def _after_download(unused=None):
162             after_download_reads = self._count_reads()
163             #print before_download_reads, after_download_reads
164             self.failIf(after_download_reads-before_download_reads > 41,
165                         (after_download_reads, before_download_reads))
166         d.addCallback(self._download_and_check_plaintext)
167         d.addCallback(_after_download)
168         return d
169
170     def test_download_from_only_3_remaining_shares(self):
171         """ Test download after 7 random shares (of the 10) have been
172         removed."""
173         d = defer.succeed(None)
174         def _then_delete_7(unused=None):
175             for i in range(7):
176                 self._delete_a_share()
177         before_download_reads = self._count_reads()
178         d.addCallback(_then_delete_7)
179         def _after_download(unused=None):
180             after_download_reads = self._count_reads()
181             #print before_download_reads, after_download_reads
182             self.failIf(after_download_reads-before_download_reads > 41, (after_download_reads, before_download_reads))
183         d.addCallback(self._download_and_check_plaintext)
184         d.addCallback(_after_download)
185         return d
186
187     def test_download_from_only_3_shares_with_good_crypttext_hash(self):
188         """ Test download after 7 random shares (of the 10) have had their
189         crypttext hash tree corrupted."""
190         d = defer.succeed(None)
191         def _then_corrupt_7(unused=None):
192             shnums = range(10)
193             random.shuffle(shnums)
194             for i in shnums[:7]:
195                 self._corrupt_a_share(None, common._corrupt_offset_of_block_hashes_to_truncate_crypttext_hashes, i)
196         #before_download_reads = self._count_reads()
197         d.addCallback(_then_corrupt_7)
198         d.addCallback(self._download_and_check_plaintext)
199         return d
200
201     def test_download_abort_if_too_many_missing_shares(self):
202         """ Test that download gives up quickly when it realizes there aren't
203         enough shares out there."""
204         for i in range(8):
205             self._delete_a_share()
206         d = self.shouldFail(NotEnoughSharesError, "delete 8", None,
207                             download_to_data, self.n)
208         # the new downloader pipelines a bunch of read requests in parallel,
209         # so don't bother asserting anything about the number of reads
210         return d
211
212     def test_download_abort_if_too_many_corrupted_shares(self):
213         """Test that download gives up quickly when it realizes there aren't
214         enough uncorrupted shares out there. It should be able to tell
215         because the corruption occurs in the sharedata version number, which
216         it checks first."""
217         d = defer.succeed(None)
218         def _then_corrupt_8(unused=None):
219             shnums = range(10)
220             random.shuffle(shnums)
221             for shnum in shnums[:8]:
222                 self._corrupt_a_share(None, common._corrupt_sharedata_version_number, shnum)
223         d.addCallback(_then_corrupt_8)
224
225         before_download_reads = self._count_reads()
226         def _attempt_to_download(unused=None):
227             d2 = download_to_data(self.n)
228
229             def _callb(res):
230                 self.fail("Should have gotten an error from attempt to download, not %r" % (res,))
231             def _errb(f):
232                 self.failUnless(f.check(NotEnoughSharesError))
233             d2.addCallbacks(_callb, _errb)
234             return d2
235
236         d.addCallback(_attempt_to_download)
237
238         def _after_attempt(unused=None):
239             after_download_reads = self._count_reads()
240             #print before_download_reads, after_download_reads
241             # To pass this test, you are required to give up before reading
242             # all of the share data. Actually, we could give up sooner than
243             # 45 reads, but currently our download code does 45 reads. This
244             # test then serves as a "performance regression detector" -- if
245             # you change download code so that it takes *more* reads, then
246             # this test will fail.
247             self.failIf(after_download_reads-before_download_reads > 45,
248                         (after_download_reads, before_download_reads))
249         d.addCallback(_after_attempt)
250         return d
251
252
253 # XXX extend these tests to show bad behavior of various kinds from servers:
254 # raising exception from each remove_foo() method, for example
255
256 # XXX test disconnect DeadReferenceError from get_buckets and get_block_whatsit
257
258 # TODO: delete this whole file