]> git.rkrishnan.org Git - tahoe-lafs/tahoe-lafs.git/blob - src/allmydata/test/test_hung_server.py
copy the rest of David-Sarah's changes to make my tree match 1.8.0beta
[tahoe-lafs/tahoe-lafs.git] / src / allmydata / test / test_hung_server.py
1 # -*- coding: utf-8 -*-
2
3 import os, shutil
4 from twisted.trial import unittest
5 from twisted.internet import defer
6 from allmydata import uri
7 from allmydata.util.consumer import download_to_data
8 from allmydata.immutable import upload
9 from allmydata.mutable.common import UnrecoverableFileError
10 from allmydata.storage.common import storage_index_to_dir
11 from allmydata.test.no_network import GridTestMixin
12 from allmydata.test.common import ShouldFailMixin
13 from allmydata.util.pollmixin import PollMixin
14 from allmydata.interfaces import NotEnoughSharesError
15
16 immutable_plaintext = "data" * 10000
17 mutable_plaintext = "muta" * 10000
18
19 class HungServerDownloadTest(GridTestMixin, ShouldFailMixin, PollMixin,
20                              unittest.TestCase):
21     # Many of these tests take around 60 seconds on François's ARM buildslave:
22     # http://tahoe-lafs.org/buildbot/builders/FranXois%20lenny-armv5tel
23     # allmydata.test.test_hung_server.HungServerDownloadTest.test_2_good_8_broken_duplicate_share_fail
24     # once ERRORed after 197 seconds on Midnight Magic's NetBSD buildslave:
25     # http://tahoe-lafs.org/buildbot/builders/MM%20netbsd4%20i386%20warp
26     # MM's buildslave varies a lot in how long it takes to run tests.
27
28     timeout = 240
29
30     def _break(self, servers):
31         for (id, ss) in servers:
32             self.g.break_server(id)
33
34     def _hang(self, servers, **kwargs):
35         for (id, ss) in servers:
36             self.g.hang_server(id, **kwargs)
37
38     def _unhang(self, servers, **kwargs):
39         for (id, ss) in servers:
40             self.g.unhang_server(id, **kwargs)
41
42     def _hang_shares(self, shnums, **kwargs):
43         # hang all servers who are holding the given shares
44         hung_serverids = set()
45         for (i_shnum, i_serverid, i_sharefile) in self.shares:
46             if i_shnum in shnums:
47                 if i_serverid not in hung_serverids:
48                     self.g.hang_server(i_serverid, **kwargs)
49                     hung_serverids.add(i_serverid)
50
51     def _delete_all_shares_from(self, servers):
52         serverids = [id for (id, ss) in servers]
53         for (i_shnum, i_serverid, i_sharefile) in self.shares:
54             if i_serverid in serverids:
55                 os.unlink(i_sharefile)
56
57     def _corrupt_all_shares_in(self, servers, corruptor_func):
58         serverids = [id for (id, ss) in servers]
59         for (i_shnum, i_serverid, i_sharefile) in self.shares:
60             if i_serverid in serverids:
61                 self._corrupt_share((i_shnum, i_sharefile), corruptor_func)
62
63     def _copy_all_shares_from(self, from_servers, to_server):
64         serverids = [id for (id, ss) in from_servers]
65         for (i_shnum, i_serverid, i_sharefile) in self.shares:
66             if i_serverid in serverids:
67                 self._copy_share((i_shnum, i_sharefile), to_server)
68
69     def _copy_share(self, share, to_server):
70         (sharenum, sharefile) = share
71         (id, ss) = to_server
72         shares_dir = os.path.join(ss.original.storedir, "shares")
73         si = uri.from_string(self.uri).get_storage_index()
74         si_dir = os.path.join(shares_dir, storage_index_to_dir(si))
75         if not os.path.exists(si_dir):
76             os.makedirs(si_dir)
77         new_sharefile = os.path.join(si_dir, str(sharenum))
78         shutil.copy(sharefile, new_sharefile)
79         self.shares = self.find_uri_shares(self.uri)
80         # Make sure that the storage server has the share.
81         self.failUnless((sharenum, ss.original.my_nodeid, new_sharefile)
82                         in self.shares)
83
84     def _corrupt_share(self, share, corruptor_func):
85         (sharenum, sharefile) = share
86         data = open(sharefile, "rb").read()
87         newdata = corruptor_func(data)
88         os.unlink(sharefile)
89         wf = open(sharefile, "wb")
90         wf.write(newdata)
91         wf.close()
92
93     def _set_up(self, mutable, testdir, num_clients=1, num_servers=10):
94         self.mutable = mutable
95         if mutable:
96             self.basedir = "hung_server/mutable_" + testdir
97         else:
98             self.basedir = "hung_server/immutable_" + testdir
99
100         self.set_up_grid(num_clients=num_clients, num_servers=num_servers)
101
102         self.c0 = self.g.clients[0]
103         nm = self.c0.nodemaker
104         self.servers = [(id, ss) for (id, ss) in nm.storage_broker.get_all_servers()]
105
106         if mutable:
107             d = nm.create_mutable_file(mutable_plaintext)
108             def _uploaded_mutable(node):
109                 self.uri = node.get_uri()
110                 self.shares = self.find_uri_shares(self.uri)
111             d.addCallback(_uploaded_mutable)
112         else:
113             data = upload.Data(immutable_plaintext, convergence="")
114             d = self.c0.upload(data)
115             def _uploaded_immutable(upload_res):
116                 self.uri = upload_res.uri
117                 self.shares = self.find_uri_shares(self.uri)
118             d.addCallback(_uploaded_immutable)
119         return d
120
121     def _start_download(self):
122         n = self.c0.create_node_from_uri(self.uri)
123         if self.mutable:
124             d = n.download_best_version()
125             stage_4_d = None # currently we aren't doing any tests which require this for mutable files
126         else:
127             d = download_to_data(n)
128             #stage_4_d = n._downloader._all_downloads.keys()[0]._stage_4_d # too ugly! FIXME
129             stage_4_d = None
130         return (d, stage_4_d,)
131
132     def _wait_for_data(self, n):
133         if self.mutable:
134             d = n.download_best_version()
135         else:
136             d = download_to_data(n)
137         return d
138
139     def _check(self, resultingdata):
140         if self.mutable:
141             self.failUnlessEqual(resultingdata, mutable_plaintext)
142         else:
143             self.failUnlessEqual(resultingdata, immutable_plaintext)
144
145     def _download_and_check(self):
146         d, stage4d = self._start_download()
147         d.addCallback(self._check)
148         return d
149
150     def _should_fail_download(self):
151         if self.mutable:
152             return self.shouldFail(UnrecoverableFileError, self.basedir,
153                                    "no recoverable versions",
154                                    self._download_and_check)
155         else:
156             return self.shouldFail(NotEnoughSharesError, self.basedir,
157                                    "ran out of shares",
158                                    self._download_and_check)
159
160
161     def test_10_good_sanity_check(self):
162         d = defer.succeed(None)
163         for mutable in [False, True]:
164             d.addCallback(lambda ign: self._set_up(mutable, "test_10_good_sanity_check"))
165             d.addCallback(lambda ign: self._download_and_check())
166         return d
167
168     def test_10_good_copied_share(self):
169         d = defer.succeed(None)
170         for mutable in [False, True]:
171             d.addCallback(lambda ign: self._set_up(mutable, "test_10_good_copied_share"))
172             d.addCallback(lambda ign: self._copy_all_shares_from(self.servers[2:3], self.servers[0]))
173             d.addCallback(lambda ign: self._download_and_check())
174             return d
175
176     def test_3_good_7_noshares(self):
177         d = defer.succeed(None)
178         for mutable in [False, True]:
179             d.addCallback(lambda ign: self._set_up(mutable, "test_3_good_7_noshares"))
180             d.addCallback(lambda ign: self._delete_all_shares_from(self.servers[3:]))
181             d.addCallback(lambda ign: self._download_and_check())
182         return d
183
184     def test_2_good_8_broken_fail(self):
185         d = defer.succeed(None)
186         for mutable in [False, True]:
187             d.addCallback(lambda ign: self._set_up(mutable, "test_2_good_8_broken_fail"))
188             d.addCallback(lambda ign: self._break(self.servers[2:]))
189             d.addCallback(lambda ign: self._should_fail_download())
190         return d
191
192     def test_2_good_8_noshares_fail(self):
193         d = defer.succeed(None)
194         for mutable in [False, True]:
195             d.addCallback(lambda ign: self._set_up(mutable, "test_2_good_8_noshares_fail"))
196             d.addCallback(lambda ign: self._delete_all_shares_from(self.servers[2:]))
197             d.addCallback(lambda ign: self._should_fail_download())
198         return d
199
200     def test_2_good_8_broken_copied_share(self):
201         d = defer.succeed(None)
202         for mutable in [False, True]:
203             d.addCallback(lambda ign: self._set_up(mutable, "test_2_good_8_broken_copied_share"))
204             d.addCallback(lambda ign: self._copy_all_shares_from(self.servers[2:3], self.servers[0]))
205             d.addCallback(lambda ign: self._break(self.servers[2:]))
206             d.addCallback(lambda ign: self._download_and_check())
207         return d
208
209     def test_2_good_8_broken_duplicate_share_fail(self):
210         d = defer.succeed(None)
211         for mutable in [False, True]:
212             d.addCallback(lambda ign: self._set_up(mutable, "test_2_good_8_broken_duplicate_share_fail"))
213             d.addCallback(lambda ign: self._copy_all_shares_from(self.servers[1:2], self.servers[0]))
214             d.addCallback(lambda ign: self._break(self.servers[2:]))
215             d.addCallback(lambda ign: self._should_fail_download())
216         return d
217
218     # The tests below do not currently pass for mutable files.
219
220     def test_3_good_7_hung_immutable(self):
221         d = defer.succeed(None)
222         d.addCallback(lambda ign: self._set_up(False, "test_3_good_7_hung"))
223         d.addCallback(lambda ign: self._hang(self.servers[3:]))
224         d.addCallback(lambda ign: self._download_and_check())
225         return d
226
227     def test_5_overdue_immutable(self):
228         # restrict the ShareFinder to only allow 5 outstanding requests, and
229         # arrange for the first 5 servers to hang. Then trigger the OVERDUE
230         # timers (simulating 10 seconds passed), at which point the
231         # ShareFinder should send additional queries and finish the download
232         # quickly. If we didn't have OVERDUE timers, this test would fail by
233         # timing out.
234         done = []
235         d = self._set_up(False, "test_5_overdue_immutable")
236         def _reduce_max_outstanding_requests_and_download(ign):
237             self._hang_shares(range(5))
238             n = self.c0.create_node_from_uri(self.uri)
239             self._sf = n._cnode._node._sharefinder
240             self._sf.max_outstanding_requests = 5
241             self._sf.OVERDUE_TIMEOUT = 1000.0
242             d2 = download_to_data(n)
243             # start download, but don't wait for it to complete yet
244             def _done(res):
245                 done.append(res) # we will poll for this later
246             d2.addBoth(_done)
247         d.addCallback(_reduce_max_outstanding_requests_and_download)
248         from foolscap.eventual import fireEventually, flushEventualQueue
249         # wait here a while
250         d.addCallback(lambda res: fireEventually(res))
251         d.addCallback(lambda res: flushEventualQueue())
252         d.addCallback(lambda ign: self.failIf(done))
253         def _check_waiting(ign):
254             # all the share requests should now be stuck waiting
255             self.failUnlessEqual(len(self._sf.pending_requests), 5)
256             # but none should be marked as OVERDUE until the timers expire
257             self.failUnlessEqual(len(self._sf.overdue_requests), 0)
258         d.addCallback(_check_waiting)
259         def _mark_overdue(ign):
260             # declare four requests overdue, allowing new requests to take
261             # their place, and leaving one stuck. The finder will keep
262             # sending requests until there are 5 non-overdue ones
263             # outstanding, at which point we'll have 4 OVERDUE, 1
264             # stuck-but-not-overdue, and 4 live requests. All 4 live requests
265             # will retire before the download is complete and the ShareFinder
266             # is shut off. That will leave 4 OVERDUE and 1
267             # stuck-but-not-overdue, for a total of 5 requests in in
268             # _sf.pending_requests
269             for t in self._sf.overdue_timers.values()[:4]:
270                 t.reset(-1.0)
271             # the timers ought to fire before the eventual-send does
272             return fireEventually()
273         d.addCallback(_mark_overdue)
274         def _we_are_done():
275             return bool(done)
276         d.addCallback(lambda ign: self.poll(_we_are_done))
277         def _check_done(ign):
278             self.failUnlessEqual(done, [immutable_plaintext])
279             self.failUnlessEqual(len(self._sf.pending_requests), 5)
280             self.failUnlessEqual(len(self._sf.overdue_requests), 4)
281         d.addCallback(_check_done)
282         return d
283
284     def test_3_good_7_hung_mutable(self):
285         raise unittest.SkipTest("still broken")
286         d = defer.succeed(None)
287         d.addCallback(lambda ign: self._set_up(True, "test_3_good_7_hung"))
288         d.addCallback(lambda ign: self._hang(self.servers[3:]))
289         d.addCallback(lambda ign: self._download_and_check())
290         return d
291
292     def test_2_good_8_hung_then_1_recovers_immutable(self):
293         d = defer.succeed(None)
294         d.addCallback(lambda ign: self._set_up(False, "test_2_good_8_hung_then_1_recovers"))
295         d.addCallback(lambda ign: self._hang(self.servers[2:3]))
296         d.addCallback(lambda ign: self._hang(self.servers[3:]))
297         d.addCallback(lambda ign: self._unhang(self.servers[2:3]))
298         d.addCallback(lambda ign: self._download_and_check())
299         return d
300
301     def test_2_good_8_hung_then_1_recovers_mutable(self):
302         raise unittest.SkipTest("still broken")
303         d = defer.succeed(None)
304         d.addCallback(lambda ign: self._set_up(True, "test_2_good_8_hung_then_1_recovers"))
305         d.addCallback(lambda ign: self._hang(self.servers[2:3]))
306         d.addCallback(lambda ign: self._hang(self.servers[3:]))
307         d.addCallback(lambda ign: self._unhang(self.servers[2:3]))
308         d.addCallback(lambda ign: self._download_and_check())
309         return d
310
311     def test_2_good_8_hung_then_1_recovers_with_2_shares_immutable(self):
312         d = defer.succeed(None)
313         d.addCallback(lambda ign: self._set_up(False, "test_2_good_8_hung_then_1_recovers_with_2_shares"))
314         d.addCallback(lambda ign: self._copy_all_shares_from(self.servers[0:1], self.servers[2]))
315         d.addCallback(lambda ign: self._hang(self.servers[2:3]))
316         d.addCallback(lambda ign: self._hang(self.servers[3:]))
317         d.addCallback(lambda ign: self._unhang(self.servers[2:3]))
318         d.addCallback(lambda ign: self._download_and_check())
319         return d
320
321     def test_2_good_8_hung_then_1_recovers_with_2_shares_mutable(self):
322         raise unittest.SkipTest("still broken")
323         d = defer.succeed(None)
324         d.addCallback(lambda ign: self._set_up(True, "test_2_good_8_hung_then_1_recovers_with_2_shares"))
325         d.addCallback(lambda ign: self._copy_all_shares_from(self.servers[0:1], self.servers[2]))
326         d.addCallback(lambda ign: self._hang(self.servers[2:3]))
327         d.addCallback(lambda ign: self._hang(self.servers[3:]))
328         d.addCallback(lambda ign: self._unhang(self.servers[2:3]))
329         d.addCallback(lambda ign: self._download_and_check())
330         return d