]> git.rkrishnan.org Git - tahoe-lafs/tahoe-lafs.git/blob - src/allmydata/test/test_immutable.py
add remaining get_* methods to storage_client.Server, NoNetworkServer, and
[tahoe-lafs/tahoe-lafs.git] / src / allmydata / test / test_immutable.py
1 from allmydata.test import common
2 from allmydata.interfaces import NotEnoughSharesError
3 from allmydata.util.consumer import download_to_data
4 from allmydata import uri
5 from twisted.internet import defer
6 from twisted.trial import unittest
7 import random
8
9 from foolscap.api import eventually
10 from allmydata.util import log
11
12 from allmydata.immutable.downloader import finder
13
14 import mock
15
16 class MockNode(object):
17     def __init__(self, check_reneging, check_fetch_failed):
18         self.got = 0
19         self.finished_d = defer.Deferred()
20         self.segment_size = 78
21         self.guessed_segment_size = 78
22         self._no_more_shares = False
23         self.check_reneging = check_reneging
24         self.check_fetch_failed = check_fetch_failed
25         self._si_prefix='aa'
26         self.have_UEB = True
27         self.share_hash_tree = mock.Mock()
28         self.share_hash_tree.needed_hashes.return_value = False
29         self.on_want_more_shares = None
30
31     def when_finished(self):
32         return self.finished_d
33     def get_num_segments(self):
34         return (5, True)
35     def _calculate_sizes(self, guessed_segment_size):
36         return {'block_size': 4, 'num_segments': 5}
37     def no_more_shares(self):
38         self._no_more_shares = True
39     def got_shares(self, shares):
40         if self.check_reneging:
41             if self._no_more_shares:
42                 self.finished_d.errback(unittest.FailTest("The node was told by the share finder that it is destined to remain hungry, then was given another share."))
43                 return
44         self.got += len(shares)
45         log.msg("yyy 3 %s.got_shares(%s) got: %s" % (self, shares, self.got))
46         if self.got == 3:
47             self.finished_d.callback(True)
48     def get_desired_ciphertext_hashes(self, *args, **kwargs):
49         return iter([])
50     def fetch_failed(self, *args, **kwargs):
51         if self.check_fetch_failed:
52             if self.finished_d:
53                 self.finished_d.errback(unittest.FailTest("The node was told by the segment fetcher that the download failed."))
54                 self.finished_d = None
55     def want_more_shares(self):
56         if self.on_want_more_shares:
57             self.on_want_more_shares()
58     def process_blocks(self, *args, **kwargs):
59         if self.finished_d:
60             self.finished_d.callback(None)
61
62 class TestShareFinder(unittest.TestCase):
63     def test_no_reneging_on_no_more_shares_ever(self):
64         # ticket #1191
65
66         # Suppose that K=3 and you send two DYHB requests, the first
67         # response offers two shares, and then the last offers one
68         # share. If you tell your share consumer "no more shares,
69         # ever", and then immediately tell them "oh, and here's
70         # another share", then you lose.
71
72         rcap = uri.CHKFileURI('a'*32, 'a'*32, 3, 99, 100)
73         vcap = rcap.get_verify_cap()
74
75         class MockServer(object):
76             def __init__(self, buckets):
77                 self.version = {
78                     'http://allmydata.org/tahoe/protocols/storage/v1': {
79                         "tolerates-immutable-read-overrun": True
80                         }
81                     }
82                 self.buckets = buckets
83                 self.d = defer.Deferred()
84                 self.s = None
85             def callRemote(self, methname, *args, **kwargs):
86                 d = defer.Deferred()
87
88                 # Even after the 3rd answer we're still hungry because
89                 # we're interested in finding a share on a 3rd server
90                 # so we don't have to download more than one share
91                 # from the first server. This is actually necessary to
92                 # trigger the bug.
93                 def _give_buckets_and_hunger_again():
94                     d.callback(self.buckets)
95                     self.s.hungry()
96                 eventually(_give_buckets_and_hunger_again)
97                 return d
98         class MockIServer(object):
99             def __init__(self, serverid, rref):
100                 self.serverid = serverid
101                 self.rref = rref
102             def get_serverid(self):
103                 return self.serverid
104             def get_rref(self):
105                 return self.rref
106             def name(self):
107                 return "name-%s" % self.serverid
108             def get_version(self):
109                 return self.rref.version
110
111         mockserver1 = MockServer({1: mock.Mock(), 2: mock.Mock()})
112         mockserver2 = MockServer({})
113         mockserver3 = MockServer({3: mock.Mock()})
114         mockstoragebroker = mock.Mock()
115         servers = [ MockIServer("ms1", mockserver1),
116                     MockIServer("ms2", mockserver2),
117                     MockIServer("ms3", mockserver3), ]
118         mockstoragebroker.get_servers_for_psi.return_value = servers
119         mockdownloadstatus = mock.Mock()
120         mocknode = MockNode(check_reneging=True, check_fetch_failed=True)
121
122         s = finder.ShareFinder(mockstoragebroker, vcap, mocknode, mockdownloadstatus)
123
124         mockserver1.s = s
125         mockserver2.s = s
126         mockserver3.s = s
127
128         s.hungry()
129
130         return mocknode.when_finished()
131
132 class Test(common.ShareManglingMixin, common.ShouldFailMixin, unittest.TestCase):
133     def test_test_code(self):
134         # The following process of stashing the shares, running
135         # replace_shares, and asserting that the new set of shares equals the
136         # old is more to test this test code than to test the Tahoe code...
137         d = defer.succeed(None)
138         d.addCallback(self.find_all_shares)
139         stash = [None]
140         def _stash_it(res):
141             stash[0] = res
142             return res
143         d.addCallback(_stash_it)
144
145         # The following process of deleting 8 of the shares and asserting
146         # that you can't download it is more to test this test code than to
147         # test the Tahoe code...
148         def _then_delete_8(unused=None):
149             self.replace_shares(stash[0], storage_index=self.uri.get_storage_index())
150             for i in range(8):
151                 self._delete_a_share()
152         d.addCallback(_then_delete_8)
153
154         def _then_download(unused=None):
155             d2 = download_to_data(self.n)
156
157             def _after_download_callb(result):
158                 self.fail() # should have gotten an errback instead
159                 return result
160             def _after_download_errb(failure):
161                 failure.trap(NotEnoughSharesError)
162                 return None # success!
163             d2.addCallbacks(_after_download_callb, _after_download_errb)
164             return d2
165         d.addCallback(_then_download)
166
167         return d
168
169     def test_download(self):
170         """ Basic download. (This functionality is more or less already
171         tested by test code in other modules, but this module is also going
172         to test some more specific things about immutable download.)
173         """
174         d = defer.succeed(None)
175         before_download_reads = self._count_reads()
176         def _after_download(unused=None):
177             after_download_reads = self._count_reads()
178             #print before_download_reads, after_download_reads
179             self.failIf(after_download_reads-before_download_reads > 41,
180                         (after_download_reads, before_download_reads))
181         d.addCallback(self._download_and_check_plaintext)
182         d.addCallback(_after_download)
183         return d
184
185     def test_download_from_only_3_remaining_shares(self):
186         """ Test download after 7 random shares (of the 10) have been
187         removed."""
188         d = defer.succeed(None)
189         def _then_delete_7(unused=None):
190             for i in range(7):
191                 self._delete_a_share()
192         before_download_reads = self._count_reads()
193         d.addCallback(_then_delete_7)
194         def _after_download(unused=None):
195             after_download_reads = self._count_reads()
196             #print before_download_reads, after_download_reads
197             self.failIf(after_download_reads-before_download_reads > 41, (after_download_reads, before_download_reads))
198         d.addCallback(self._download_and_check_plaintext)
199         d.addCallback(_after_download)
200         return d
201
202     def test_download_from_only_3_shares_with_good_crypttext_hash(self):
203         """ Test download after 7 random shares (of the 10) have had their
204         crypttext hash tree corrupted."""
205         d = defer.succeed(None)
206         def _then_corrupt_7(unused=None):
207             shnums = range(10)
208             random.shuffle(shnums)
209             for i in shnums[:7]:
210                 self._corrupt_a_share(None, common._corrupt_offset_of_block_hashes_to_truncate_crypttext_hashes, i)
211         #before_download_reads = self._count_reads()
212         d.addCallback(_then_corrupt_7)
213         d.addCallback(self._download_and_check_plaintext)
214         return d
215
216     def test_download_abort_if_too_many_missing_shares(self):
217         """ Test that download gives up quickly when it realizes there aren't
218         enough shares out there."""
219         for i in range(8):
220             self._delete_a_share()
221         d = self.shouldFail(NotEnoughSharesError, "delete 8", None,
222                             download_to_data, self.n)
223         # the new downloader pipelines a bunch of read requests in parallel,
224         # so don't bother asserting anything about the number of reads
225         return d
226
227     def test_download_abort_if_too_many_corrupted_shares(self):
228         """Test that download gives up quickly when it realizes there aren't
229         enough uncorrupted shares out there. It should be able to tell
230         because the corruption occurs in the sharedata version number, which
231         it checks first."""
232         d = defer.succeed(None)
233         def _then_corrupt_8(unused=None):
234             shnums = range(10)
235             random.shuffle(shnums)
236             for shnum in shnums[:8]:
237                 self._corrupt_a_share(None, common._corrupt_sharedata_version_number, shnum)
238         d.addCallback(_then_corrupt_8)
239
240         before_download_reads = self._count_reads()
241         def _attempt_to_download(unused=None):
242             d2 = download_to_data(self.n)
243
244             def _callb(res):
245                 self.fail("Should have gotten an error from attempt to download, not %r" % (res,))
246             def _errb(f):
247                 self.failUnless(f.check(NotEnoughSharesError))
248             d2.addCallbacks(_callb, _errb)
249             return d2
250
251         d.addCallback(_attempt_to_download)
252
253         def _after_attempt(unused=None):
254             after_download_reads = self._count_reads()
255             #print before_download_reads, after_download_reads
256             # To pass this test, you are required to give up before reading
257             # all of the share data. Actually, we could give up sooner than
258             # 45 reads, but currently our download code does 45 reads. This
259             # test then serves as a "performance regression detector" -- if
260             # you change download code so that it takes *more* reads, then
261             # this test will fail.
262             self.failIf(after_download_reads-before_download_reads > 45,
263                         (after_download_reads, before_download_reads))
264         d.addCallback(_after_attempt)
265         return d
266
267
268 # XXX extend these tests to show bad behavior of various kinds from servers:
269 # raising exception from each remove_foo() method, for example
270
271 # XXX test disconnect DeadReferenceError from get_buckets and get_block_whatsit
272
273 # TODO: delete this whole file