]> git.rkrishnan.org Git - tahoe-lafs/tahoe-lafs.git/blob - src/allmydata/test/test_introducer.py
Change relative imports to absolute
[tahoe-lafs/tahoe-lafs.git] / src / allmydata / test / test_introducer.py
1
2 import os, re
3 from base64 import b32decode
4
5 from twisted.trial import unittest
6 from twisted.internet import defer
7 from twisted.python import log
8
9 from foolscap.api import Tub, Referenceable, fireEventually, flushEventualQueue
10 from twisted.application import service
11 from allmydata.interfaces import InsufficientVersionError
12 from allmydata.introducer.client import IntroducerClient
13 from allmydata.introducer.server import IntroducerService
14 # test compatibility with old introducer .tac files
15 from allmydata.introducer import IntroducerNode
16 from allmydata.util import pollmixin
17 import allmydata.test.common_util as testutil
18
19 class LoggingMultiService(service.MultiService):
20     def log(self, msg, **kw):
21         log.msg(msg, **kw)
22
23 class Node(testutil.SignalMixin, unittest.TestCase):
24     def test_loadable(self):
25         basedir = "introducer.IntroducerNode.test_loadable"
26         os.mkdir(basedir)
27         q = IntroducerNode(basedir)
28         d = fireEventually(None)
29         d.addCallback(lambda res: q.startService())
30         d.addCallback(lambda res: q.when_tub_ready())
31         d.addCallback(lambda res: q.stopService())
32         d.addCallback(flushEventualQueue)
33         return d
34
35 class ServiceMixin:
36     def setUp(self):
37         self.parent = LoggingMultiService()
38         self.parent.startService()
39     def tearDown(self):
40         log.msg("TestIntroducer.tearDown")
41         d = defer.succeed(None)
42         d.addCallback(lambda res: self.parent.stopService())
43         d.addCallback(flushEventualQueue)
44         return d
45
46 class Introducer(ServiceMixin, unittest.TestCase, pollmixin.PollMixin):
47
48     def test_create(self):
49         ic = IntroducerClient(None, "introducer.furl", u"my_nickname",
50                               "my_version", "oldest_version")
51         self.failUnless(isinstance(ic, IntroducerClient))
52
53     def test_listen(self):
54         i = IntroducerService()
55         i.setServiceParent(self.parent)
56
57     def test_duplicate(self):
58         i = IntroducerService()
59         self.failUnlessEqual(len(i.get_announcements()), 0)
60         self.failUnlessEqual(len(i.get_subscribers()), 0)
61         furl1 = "pb://62ubehyunnyhzs7r6vdonnm2hpi52w6y@192.168.69.247:36106,127.0.0.1:36106/gydnpigj2ja2qr2srq4ikjwnl7xfgbra"
62         furl2 = "pb://ttwwooyunnyhzs7r6vdonnm2hpi52w6y@192.168.69.247:36111,127.0.0.1:36106/ttwwoogj2ja2qr2srq4ikjwnl7xfgbra"
63         ann1 = (furl1, "storage", "RIStorage", "nick1", "ver23", "ver0")
64         ann1b = (furl1, "storage", "RIStorage", "nick1", "ver24", "ver0")
65         ann2 = (furl2, "storage", "RIStorage", "nick2", "ver30", "ver0")
66         i.remote_publish(ann1)
67         self.failUnlessEqual(len(i.get_announcements()), 1)
68         self.failUnlessEqual(len(i.get_subscribers()), 0)
69         i.remote_publish(ann2)
70         self.failUnlessEqual(len(i.get_announcements()), 2)
71         self.failUnlessEqual(len(i.get_subscribers()), 0)
72         i.remote_publish(ann1b)
73         self.failUnlessEqual(len(i.get_announcements()), 2)
74         self.failUnlessEqual(len(i.get_subscribers()), 0)
75
76 class SystemTestMixin(ServiceMixin, pollmixin.PollMixin):
77
78     def create_tub(self, portnum=0):
79         tubfile = os.path.join(self.basedir, "tub.pem")
80         self.central_tub = tub = Tub(certFile=tubfile)
81         #tub.setOption("logLocalFailures", True)
82         #tub.setOption("logRemoteFailures", True)
83         tub.setOption("expose-remote-exception-types", False)
84         tub.setServiceParent(self.parent)
85         l = tub.listenOn("tcp:%d" % portnum)
86         self.central_portnum = l.getPortnum()
87         if portnum != 0:
88             assert self.central_portnum == portnum
89         tub.setLocation("localhost:%d" % self.central_portnum)
90
91 class SystemTest(SystemTestMixin, unittest.TestCase):
92
93     def test_system(self):
94         self.basedir = "introducer/SystemTest/system"
95         os.makedirs(self.basedir)
96         return self.do_system_test(IntroducerService)
97     test_system.timeout = 480 # occasionally takes longer than 350s on "draco"
98
99     def do_system_test(self, create_introducer):
100         self.create_tub()
101         introducer = create_introducer()
102         introducer.setServiceParent(self.parent)
103         iff = os.path.join(self.basedir, "introducer.furl")
104         tub = self.central_tub
105         ifurl = self.central_tub.registerReference(introducer, furlFile=iff)
106         self.introducer_furl = ifurl
107
108         NUMCLIENTS = 5
109         # we have 5 clients who publish themselves, and an extra one does
110         # which not. When the connections are fully established, all six nodes
111         # should have 5 connections each.
112
113         clients = []
114         tubs = {}
115         received_announcements = {}
116         NUM_SERVERS = NUMCLIENTS
117         subscribing_clients = []
118         publishing_clients = []
119
120         for i in range(NUMCLIENTS+1):
121             tub = Tub()
122             #tub.setOption("logLocalFailures", True)
123             #tub.setOption("logRemoteFailures", True)
124             tub.setOption("expose-remote-exception-types", False)
125             tub.setServiceParent(self.parent)
126             l = tub.listenOn("tcp:0")
127             portnum = l.getPortnum()
128             tub.setLocation("localhost:%d" % portnum)
129
130             log.msg("creating client %d: %s" % (i, tub.getShortTubID()))
131             c = IntroducerClient(tub, self.introducer_furl, u"nickname-%d" % i,
132                                  "version", "oldest")
133             received_announcements[c] = {}
134             def got(serverid, ann_d, announcements):
135                 announcements[serverid] = ann_d
136             c.subscribe_to("storage", got, received_announcements[c])
137             subscribing_clients.append(c)
138
139             if i < NUMCLIENTS:
140                 node_furl = tub.registerReference(Referenceable())
141                 c.publish(node_furl, "storage", "ri_name")
142                 publishing_clients.append(c)
143             # the last one does not publish anything
144
145             c.setServiceParent(self.parent)
146             clients.append(c)
147             tubs[c] = tub
148
149         def _wait_for_all_connections():
150             for c in subscribing_clients:
151                 if len(received_announcements[c]) < NUM_SERVERS:
152                     return False
153             return True
154         d = self.poll(_wait_for_all_connections)
155
156         def _check1(res):
157             log.msg("doing _check1")
158             dc = introducer._debug_counts
159             self.failUnlessEqual(dc["inbound_message"], NUM_SERVERS)
160             self.failUnlessEqual(dc["inbound_duplicate"], 0)
161             self.failUnlessEqual(dc["inbound_update"], 0)
162             self.failUnless(dc["outbound_message"])
163
164             for c in clients:
165                 self.failUnless(c.connected_to_introducer())
166             for c in subscribing_clients:
167                 cdc = c._debug_counts
168                 self.failUnless(cdc["inbound_message"])
169                 self.failUnlessEqual(cdc["inbound_announcement"],
170                                      NUM_SERVERS)
171                 self.failUnlessEqual(cdc["wrong_service"], 0)
172                 self.failUnlessEqual(cdc["duplicate_announcement"], 0)
173                 self.failUnlessEqual(cdc["update"], 0)
174                 self.failUnlessEqual(cdc["new_announcement"],
175                                      NUM_SERVERS)
176                 anns = received_announcements[c]
177                 self.failUnlessEqual(len(anns), NUM_SERVERS)
178
179                 nodeid0 = b32decode(tubs[clients[0]].tubID.upper())
180                 ann_d = anns[nodeid0]
181                 nick = ann_d["nickname"]
182                 self.failUnlessEqual(type(nick), unicode)
183                 self.failUnlessEqual(nick, u"nickname-0")
184             for c in publishing_clients:
185                 cdc = c._debug_counts
186                 self.failUnlessEqual(cdc["outbound_message"], 1)
187         d.addCallback(_check1)
188
189         # force an introducer reconnect, by shutting down the Tub it's using
190         # and starting a new Tub (with the old introducer). Everybody should
191         # reconnect and republish, but the introducer should ignore the
192         # republishes as duplicates. However, because the server doesn't know
193         # what each client does and does not know, it will send them a copy
194         # of the current announcement table anyway.
195
196         d.addCallback(lambda _ign: log.msg("shutting down introducer's Tub"))
197         d.addCallback(lambda _ign: self.central_tub.disownServiceParent())
198
199         def _wait_for_introducer_loss():
200             for c in clients:
201                 if c.connected_to_introducer():
202                     return False
203             return True
204         d.addCallback(lambda res: self.poll(_wait_for_introducer_loss))
205
206         def _restart_introducer_tub(_ign):
207             log.msg("restarting introducer's Tub")
208
209             dc = introducer._debug_counts
210             self.expected_count = dc["inbound_message"] + NUM_SERVERS
211             self.expected_subscribe_count = dc["inbound_subscribe"] + NUMCLIENTS+1
212             introducer._debug0 = dc["outbound_message"]
213             for c in subscribing_clients:
214                 cdc = c._debug_counts
215                 c._debug0 = cdc["inbound_message"]
216
217             self.create_tub(self.central_portnum)
218             newfurl = self.central_tub.registerReference(introducer,
219                                                          furlFile=iff)
220             assert newfurl == self.introducer_furl
221         d.addCallback(_restart_introducer_tub)
222
223         def _wait_for_introducer_reconnect():
224             # wait until:
225             #  all clients are connected
226             #  the introducer has received publish messages from all of them
227             #  the introducer has received subscribe messages from all of them
228             #  the introducer has sent (duplicate) announcements to all of them
229             #  all clients have received (duplicate) announcements
230             dc = introducer._debug_counts
231             for c in clients:
232                 if not c.connected_to_introducer():
233                     return False
234             if dc["inbound_message"] < self.expected_count:
235                 return False
236             if dc["inbound_subscribe"] < self.expected_subscribe_count:
237                 return False
238             for c in subscribing_clients:
239                 cdc = c._debug_counts
240                 if cdc["inbound_message"] < c._debug0+1:
241                     return False
242             return True
243         d.addCallback(lambda res: self.poll(_wait_for_introducer_reconnect))
244
245         def _check2(res):
246             log.msg("doing _check2")
247             # assert that the introducer sent out new messages, one per
248             # subscriber
249             dc = introducer._debug_counts
250             self.failUnlessEqual(dc["inbound_message"], 2*NUM_SERVERS)
251             self.failUnlessEqual(dc["inbound_duplicate"], NUM_SERVERS)
252             self.failUnlessEqual(dc["inbound_update"], 0)
253             self.failUnlessEqual(dc["outbound_message"],
254                                  introducer._debug0 + len(subscribing_clients))
255             for c in clients:
256                 self.failUnless(c.connected_to_introducer())
257             for c in subscribing_clients:
258                 cdc = c._debug_counts
259                 self.failUnlessEqual(cdc["duplicate_announcement"], NUM_SERVERS)
260         d.addCallback(_check2)
261
262         # Then force an introducer restart, by shutting down the Tub,
263         # destroying the old introducer, and starting a new Tub+Introducer.
264         # Everybody should reconnect and republish, and the (new) introducer
265         # will distribute the new announcements, but the clients should
266         # ignore the republishes as duplicates.
267
268         d.addCallback(lambda _ign: log.msg("shutting down introducer"))
269         d.addCallback(lambda _ign: self.central_tub.disownServiceParent())
270         d.addCallback(lambda res: self.poll(_wait_for_introducer_loss))
271
272         def _restart_introducer(_ign):
273             log.msg("restarting introducer")
274             self.create_tub(self.central_portnum)
275
276             for c in subscribing_clients:
277                 # record some counters for later comparison. Stash the values
278                 # on the client itself, because I'm lazy.
279                 cdc = c._debug_counts
280                 c._debug1 = cdc["inbound_announcement"]
281                 c._debug2 = cdc["inbound_message"]
282                 c._debug3 = cdc["new_announcement"]
283             newintroducer = create_introducer()
284             self.expected_message_count = NUM_SERVERS
285             self.expected_announcement_count = NUM_SERVERS*len(subscribing_clients)
286             self.expected_subscribe_count = len(subscribing_clients)
287             newfurl = self.central_tub.registerReference(newintroducer,
288                                                          furlFile=iff)
289             assert newfurl == self.introducer_furl
290         d.addCallback(_restart_introducer)
291         def _wait_for_introducer_reconnect2():
292             # wait until:
293             #  all clients are connected
294             #  the introducer has received publish messages from all of them
295             #  the introducer has received subscribe messages from all of them
296             #  the introducer has sent announcements for everybody to everybody
297             #  all clients have received all the (duplicate) announcements
298             # at that point, the system should be quiescent
299             dc = introducer._debug_counts
300             for c in clients:
301                 if not c.connected_to_introducer():
302                     return False
303             if dc["inbound_message"] < self.expected_message_count:
304                 return False
305             if dc["outbound_announcements"] < self.expected_announcement_count:
306                 return False
307             if dc["inbound_subscribe"] < self.expected_subscribe_count:
308                 return False
309             for c in subscribing_clients:
310                 cdc = c._debug_counts
311                 if cdc["inbound_announcement"] < c._debug1+NUM_SERVERS:
312                     return False
313             return True
314         d.addCallback(lambda res: self.poll(_wait_for_introducer_reconnect2))
315
316         def _check3(res):
317             log.msg("doing _check3")
318             for c in clients:
319                 self.failUnless(c.connected_to_introducer())
320             for c in subscribing_clients:
321                 cdc = c._debug_counts
322                 self.failUnless(cdc["inbound_announcement"] > c._debug1)
323                 self.failUnless(cdc["inbound_message"] > c._debug2)
324                 # there should have been no new announcements
325                 self.failUnlessEqual(cdc["new_announcement"], c._debug3)
326                 # and the right number of duplicate ones. There were
327                 # NUM_SERVERS from the servertub restart, and there should be
328                 # another NUM_SERVERS now
329                 self.failUnlessEqual(cdc["duplicate_announcement"],
330                                      2*NUM_SERVERS)
331
332         d.addCallback(_check3)
333         return d
334
335 class TooNewServer(IntroducerService):
336     VERSION = { "http://allmydata.org/tahoe/protocols/introducer/v999":
337                  { },
338                 "application-version": "greetings from the crazy future",
339                 }
340
341 class NonV1Server(SystemTestMixin, unittest.TestCase):
342     # if the 1.3.0 client connects to a server that doesn't provide the 'v1'
343     # protocol, it is supposed to provide a useful error instead of a weird
344     # exception.
345
346     def test_failure(self):
347         self.basedir = "introducer/NonV1Server/failure"
348         os.makedirs(self.basedir)
349         self.create_tub()
350         i = TooNewServer()
351         i.setServiceParent(self.parent)
352         self.introducer_furl = self.central_tub.registerReference(i)
353
354         tub = Tub()
355         tub.setOption("expose-remote-exception-types", False)
356         tub.setServiceParent(self.parent)
357         l = tub.listenOn("tcp:0")
358         portnum = l.getPortnum()
359         tub.setLocation("localhost:%d" % portnum)
360
361         c = IntroducerClient(tub, self.introducer_furl,
362                              u"nickname-client", "version", "oldest")
363         announcements = {}
364         def got(serverid, ann_d):
365             announcements[serverid] = ann_d
366         c.subscribe_to("storage", got)
367
368         c.setServiceParent(self.parent)
369
370         # now we wait for it to connect and notice the bad version
371
372         def _got_bad():
373             return bool(c._introducer_error) or bool(c._publisher)
374         d = self.poll(_got_bad)
375         def _done(res):
376             self.failUnless(c._introducer_error)
377             self.failUnless(c._introducer_error.check(InsufficientVersionError))
378         d.addCallback(_done)
379         return d
380
381 class DecodeFurl(unittest.TestCase):
382     def test_decode(self):
383         # make sure we have a working base64.b32decode. The one in
384         # python2.4.[01] was broken.
385         furl = 'pb://t5g7egomnnktbpydbuijt6zgtmw4oqi5@127.0.0.1:51857/hfzv36i'
386         m = re.match(r'pb://(\w+)@', furl)
387         assert m
388         nodeid = b32decode(m.group(1).upper())
389         self.failUnlessEqual(nodeid, "\x9fM\xf2\x19\xcckU0\xbf\x03\r\x10\x99\xfb&\x9b-\xc7A\x1d")
390