]> git.rkrishnan.org Git - tahoe-lafs/tahoe-lafs.git/blob - src/allmydata/test/test_upload.py
9dae7ec646edbd9131f10353fc6079a4980306b5
[tahoe-lafs/tahoe-lafs.git] / src / allmydata / test / test_upload.py
1 import os, shutil
2 from cStringIO import StringIO
3 from twisted.trial import unittest
4 from twisted.python.failure import Failure
5 from twisted.python import log
6 from twisted.internet import defer
7 from foolscap.api import fireEventually
8
9 import allmydata # for __full_version__
10 from allmydata import uri, monitor, client
11 from allmydata.immutable import upload, encode
12 from allmydata.interfaces import FileTooLargeError, UploadUnhappinessError
13 from allmydata.util.assertutil import precondition
14 from allmydata.util.deferredutil import DeferredListShouldSucceed
15 from allmydata.test.no_network import GridTestMixin
16 from allmydata.test.common_util import ShouldFailMixin
17 from allmydata.util.happinessutil import servers_of_happiness, \
18                                          shares_by_server, merge_peers
19 from allmydata.storage_client import StorageFarmBroker
20 from allmydata.storage.server import storage_index_to_dir
21
22 MiB = 1024*1024
23
24 def extract_uri(results):
25     return results.uri
26
27 # Some of these took longer than 480 seconds on Zandr's arm box, but this may
28 # have been due to an earlier test ERROR'ing out due to timeout, which seems
29 # to screw up subsequent tests.
30 timeout = 960
31
32 class Uploadable(unittest.TestCase):
33     def shouldEqual(self, data, expected):
34         self.failUnless(isinstance(data, list))
35         for e in data:
36             self.failUnless(isinstance(e, str))
37         s = "".join(data)
38         self.failUnlessEqual(s, expected)
39
40     def test_filehandle_random_key(self):
41         return self._test_filehandle(convergence=None)
42
43     def test_filehandle_convergent_encryption(self):
44         return self._test_filehandle(convergence="some convergence string")
45
46     def _test_filehandle(self, convergence):
47         s = StringIO("a"*41)
48         u = upload.FileHandle(s, convergence=convergence)
49         d = u.get_size()
50         d.addCallback(self.failUnlessEqual, 41)
51         d.addCallback(lambda res: u.read(1))
52         d.addCallback(self.shouldEqual, "a")
53         d.addCallback(lambda res: u.read(80))
54         d.addCallback(self.shouldEqual, "a"*40)
55         d.addCallback(lambda res: u.close()) # this doesn't close the filehandle
56         d.addCallback(lambda res: s.close()) # that privilege is reserved for us
57         return d
58
59     def test_filename(self):
60         basedir = "upload/Uploadable/test_filename"
61         os.makedirs(basedir)
62         fn = os.path.join(basedir, "file")
63         f = open(fn, "w")
64         f.write("a"*41)
65         f.close()
66         u = upload.FileName(fn, convergence=None)
67         d = u.get_size()
68         d.addCallback(self.failUnlessEqual, 41)
69         d.addCallback(lambda res: u.read(1))
70         d.addCallback(self.shouldEqual, "a")
71         d.addCallback(lambda res: u.read(80))
72         d.addCallback(self.shouldEqual, "a"*40)
73         d.addCallback(lambda res: u.close())
74         return d
75
76     def test_data(self):
77         s = "a"*41
78         u = upload.Data(s, convergence=None)
79         d = u.get_size()
80         d.addCallback(self.failUnlessEqual, 41)
81         d.addCallback(lambda res: u.read(1))
82         d.addCallback(self.shouldEqual, "a")
83         d.addCallback(lambda res: u.read(80))
84         d.addCallback(self.shouldEqual, "a"*40)
85         d.addCallback(lambda res: u.close())
86         return d
87
88 class ServerError(Exception):
89     pass
90
91 class SetDEPMixin:
92     def set_encoding_parameters(self, k, happy, n, max_segsize=1*MiB):
93         p = {"k": k,
94              "happy": happy,
95              "n": n,
96              "max_segment_size": max_segsize,
97              }
98         self.node.DEFAULT_ENCODING_PARAMETERS = p
99
100 class FakeStorageServer:
101     def __init__(self, mode):
102         self.mode = mode
103         self.allocated = []
104         self.queries = 0
105         self.version = { "http://allmydata.org/tahoe/protocols/storage/v1" :
106                          { "maximum-immutable-share-size": 2**32 },
107                          "application-version": str(allmydata.__full_version__),
108                          }
109         if mode == "small":
110             self.version = { "http://allmydata.org/tahoe/protocols/storage/v1" :
111                              { "maximum-immutable-share-size": 10 },
112                              "application-version": str(allmydata.__full_version__),
113                              }
114
115
116     def callRemote(self, methname, *args, **kwargs):
117         def _call():
118             meth = getattr(self, methname)
119             return meth(*args, **kwargs)
120         d = fireEventually()
121         d.addCallback(lambda res: _call())
122         return d
123
124     def allocate_buckets(self, storage_index, renew_secret, cancel_secret,
125                          sharenums, share_size, canary):
126         #print "FakeStorageServer.allocate_buckets(num=%d, size=%d)" % (len(sharenums), share_size)
127         if self.mode == "first-fail":
128             if self.queries == 0:
129                 raise ServerError
130         if self.mode == "second-fail":
131             if self.queries == 1:
132                 raise ServerError
133         self.queries += 1
134         if self.mode == "full":
135             return (set(), {},)
136         elif self.mode == "already got them":
137             return (set(sharenums), {},)
138         else:
139             for shnum in sharenums:
140                 self.allocated.append( (storage_index, shnum) )
141             return (set(),
142                     dict([( shnum, FakeBucketWriter(share_size) )
143                           for shnum in sharenums]),
144                     )
145
146 class FakeBucketWriter:
147     # a diagnostic version of storageserver.BucketWriter
148     def __init__(self, size):
149         self.data = StringIO()
150         self.closed = False
151         self._size = size
152
153     def callRemote(self, methname, *args, **kwargs):
154         def _call():
155             meth = getattr(self, "remote_" + methname)
156             return meth(*args, **kwargs)
157         d = fireEventually()
158         d.addCallback(lambda res: _call())
159         return d
160
161
162     def callRemoteOnly(self, methname, *args, **kwargs):
163         d = self.callRemote(methname, *args, **kwargs)
164         del d # callRemoteOnly ignores this
165         return None
166
167
168     def remote_write(self, offset, data):
169         precondition(not self.closed)
170         precondition(offset >= 0)
171         precondition(offset+len(data) <= self._size,
172                      "offset=%d + data=%d > size=%d" %
173                      (offset, len(data), self._size))
174         self.data.seek(offset)
175         self.data.write(data)
176
177     def remote_close(self):
178         precondition(not self.closed)
179         self.closed = True
180
181     def remote_abort(self):
182         pass
183
184 class FakeClient:
185     DEFAULT_ENCODING_PARAMETERS = {"k":25,
186                                    "happy": 25,
187                                    "n": 100,
188                                    "max_segment_size": 1*MiB,
189                                    }
190     def __init__(self, mode="good", num_servers=50):
191         self.num_servers = num_servers
192         if type(mode) is str:
193             mode = dict([i,mode] for i in range(num_servers))
194         peers = [ ("%20d"%fakeid, FakeStorageServer(mode[fakeid]))
195                   for fakeid in range(self.num_servers) ]
196         self.storage_broker = StorageFarmBroker(None, permute_peers=True)
197         for (serverid, server) in peers:
198             self.storage_broker.test_add_server(serverid, server)
199         self.last_peers = [p[1] for p in peers]
200
201     def log(self, *args, **kwargs):
202         pass
203     def get_encoding_parameters(self):
204         return self.DEFAULT_ENCODING_PARAMETERS
205     def get_storage_broker(self):
206         return self.storage_broker
207     _secret_holder = client.SecretHolder("lease secret", "convergence secret")
208
209 class GotTooFarError(Exception):
210     pass
211
212 class GiganticUploadable(upload.FileHandle):
213     def __init__(self, size):
214         self._size = size
215         self._fp = 0
216
217     def get_encryption_key(self):
218         return defer.succeed("\x00" * 16)
219     def get_size(self):
220         return defer.succeed(self._size)
221     def read(self, length):
222         left = self._size - self._fp
223         length = min(left, length)
224         self._fp += length
225         if self._fp > 1000000:
226             # terminate the test early.
227             raise GotTooFarError("we shouldn't be allowed to get this far")
228         return defer.succeed(["\x00" * length])
229     def close(self):
230         pass
231
232 DATA = """
233 Once upon a time, there was a beautiful princess named Buttercup. She lived
234 in a magical land where every file was stored securely among millions of
235 machines, and nobody ever worried about their data being lost ever again.
236 The End.
237 """
238 assert len(DATA) > upload.Uploader.URI_LIT_SIZE_THRESHOLD
239
240 SIZE_ZERO = 0
241 SIZE_SMALL = 16
242 SIZE_LARGE = len(DATA)
243
244 def upload_data(uploader, data):
245     u = upload.Data(data, convergence=None)
246     return uploader.upload(u)
247 def upload_filename(uploader, filename):
248     u = upload.FileName(filename, convergence=None)
249     return uploader.upload(u)
250 def upload_filehandle(uploader, fh):
251     u = upload.FileHandle(fh, convergence=None)
252     return uploader.upload(u)
253
254 class GoodServer(unittest.TestCase, ShouldFailMixin, SetDEPMixin):
255     def setUp(self):
256         self.node = FakeClient(mode="good")
257         self.u = upload.Uploader()
258         self.u.running = True
259         self.u.parent = self.node
260
261     def _check_small(self, newuri, size):
262         u = uri.from_string(newuri)
263         self.failUnless(isinstance(u, uri.LiteralFileURI))
264         self.failUnlessEqual(len(u.data), size)
265
266     def _check_large(self, newuri, size):
267         u = uri.from_string(newuri)
268         self.failUnless(isinstance(u, uri.CHKFileURI))
269         self.failUnless(isinstance(u.get_storage_index(), str))
270         self.failUnlessEqual(len(u.get_storage_index()), 16)
271         self.failUnless(isinstance(u.key, str))
272         self.failUnlessEqual(len(u.key), 16)
273         self.failUnlessEqual(u.size, size)
274
275     def get_data(self, size):
276         return DATA[:size]
277
278     def test_too_large(self):
279         # we've removed the 4GiB share size limit (see ticket #346 for
280         # details), but still have an 8-byte field, so the limit is now
281         # 2**64, so make sure we reject files larger than that.
282         k = 3; happy = 7; n = 10
283         self.set_encoding_parameters(k, happy, n)
284         big = k*(2**64)
285         data1 = GiganticUploadable(big)
286         d = self.shouldFail(FileTooLargeError, "test_too_large-data1",
287                             "This file is too large to be uploaded (data_size)",
288                             self.u.upload, data1)
289         data2 = GiganticUploadable(big-3)
290         d.addCallback(lambda res:
291                       self.shouldFail(FileTooLargeError,
292                                       "test_too_large-data2",
293                                       "This file is too large to be uploaded (offsets)",
294                                       self.u.upload, data2))
295         # I don't know where the actual limit is.. it depends upon how large
296         # the hash trees wind up. It's somewhere close to k*4GiB-ln2(size).
297         return d
298
299     def test_data_zero(self):
300         data = self.get_data(SIZE_ZERO)
301         d = upload_data(self.u, data)
302         d.addCallback(extract_uri)
303         d.addCallback(self._check_small, SIZE_ZERO)
304         return d
305
306     def test_data_small(self):
307         data = self.get_data(SIZE_SMALL)
308         d = upload_data(self.u, data)
309         d.addCallback(extract_uri)
310         d.addCallback(self._check_small, SIZE_SMALL)
311         return d
312
313     def test_data_large(self):
314         data = self.get_data(SIZE_LARGE)
315         d = upload_data(self.u, data)
316         d.addCallback(extract_uri)
317         d.addCallback(self._check_large, SIZE_LARGE)
318         return d
319
320     def test_data_large_odd_segments(self):
321         data = self.get_data(SIZE_LARGE)
322         segsize = int(SIZE_LARGE / 2.5)
323         # we want 3 segments, since that's not a power of two
324         self.set_encoding_parameters(25, 25, 100, segsize)
325         d = upload_data(self.u, data)
326         d.addCallback(extract_uri)
327         d.addCallback(self._check_large, SIZE_LARGE)
328         return d
329
330     def test_filehandle_zero(self):
331         data = self.get_data(SIZE_ZERO)
332         d = upload_filehandle(self.u, StringIO(data))
333         d.addCallback(extract_uri)
334         d.addCallback(self._check_small, SIZE_ZERO)
335         return d
336
337     def test_filehandle_small(self):
338         data = self.get_data(SIZE_SMALL)
339         d = upload_filehandle(self.u, StringIO(data))
340         d.addCallback(extract_uri)
341         d.addCallback(self._check_small, SIZE_SMALL)
342         return d
343
344     def test_filehandle_large(self):
345         data = self.get_data(SIZE_LARGE)
346         d = upload_filehandle(self.u, StringIO(data))
347         d.addCallback(extract_uri)
348         d.addCallback(self._check_large, SIZE_LARGE)
349         return d
350
351     def test_filename_zero(self):
352         fn = "Uploader-test_filename_zero.data"
353         f = open(fn, "wb")
354         data = self.get_data(SIZE_ZERO)
355         f.write(data)
356         f.close()
357         d = upload_filename(self.u, fn)
358         d.addCallback(extract_uri)
359         d.addCallback(self._check_small, SIZE_ZERO)
360         return d
361
362     def test_filename_small(self):
363         fn = "Uploader-test_filename_small.data"
364         f = open(fn, "wb")
365         data = self.get_data(SIZE_SMALL)
366         f.write(data)
367         f.close()
368         d = upload_filename(self.u, fn)
369         d.addCallback(extract_uri)
370         d.addCallback(self._check_small, SIZE_SMALL)
371         return d
372
373     def test_filename_large(self):
374         fn = "Uploader-test_filename_large.data"
375         f = open(fn, "wb")
376         data = self.get_data(SIZE_LARGE)
377         f.write(data)
378         f.close()
379         d = upload_filename(self.u, fn)
380         d.addCallback(extract_uri)
381         d.addCallback(self._check_large, SIZE_LARGE)
382         return d
383
384 class ServerErrors(unittest.TestCase, ShouldFailMixin, SetDEPMixin):
385     def make_node(self, mode, num_servers=10):
386         self.node = FakeClient(mode, num_servers)
387         self.u = upload.Uploader()
388         self.u.running = True
389         self.u.parent = self.node
390
391     def _check_large(self, newuri, size):
392         u = uri.from_string(newuri)
393         self.failUnless(isinstance(u, uri.CHKFileURI))
394         self.failUnless(isinstance(u.get_storage_index(), str))
395         self.failUnlessEqual(len(u.get_storage_index()), 16)
396         self.failUnless(isinstance(u.key, str))
397         self.failUnlessEqual(len(u.key), 16)
398         self.failUnlessEqual(u.size, size)
399
400     def test_first_error(self):
401         mode = dict([(0,"good")] + [(i,"first-fail") for i in range(1,10)])
402         self.make_node(mode)
403         self.set_encoding_parameters(k=25, happy=1, n=50)
404         d = upload_data(self.u, DATA)
405         d.addCallback(extract_uri)
406         d.addCallback(self._check_large, SIZE_LARGE)
407         return d
408
409     def test_first_error_all(self):
410         self.make_node("first-fail")
411         d = self.shouldFail(UploadUnhappinessError, "first_error_all",
412                             "peer selection failed",
413                             upload_data, self.u, DATA)
414         def _check((f,)):
415             self.failUnlessIn("placed 0 shares out of 100 total", str(f.value))
416             # there should also be a 'last failure was' message
417             self.failUnlessIn("ServerError", str(f.value))
418         d.addCallback(_check)
419         return d
420
421     def test_second_error(self):
422         # we want to make sure we make it to a third pass. This means that
423         # the first pass was insufficient to place all shares, and at least
424         # one of second pass servers (other than the last one) accepted a
425         # share (so we'll believe that a third pass will be useful). (if
426         # everyone but the last server throws an error, then we'll send all
427         # the remaining shares to the last server at the end of the second
428         # pass, and if that succeeds, we won't make it to a third pass).
429         #
430         # we can achieve this 97.5% of the time by using 40 servers, having
431         # 39 of them fail on the second request, leaving only one to succeed
432         # on the second request. (we need to keep the number of servers low
433         # enough to ensure a second pass with 100 shares).
434         mode = dict([(0,"good")] + [(i,"second-fail") for i in range(1,40)])
435         self.make_node(mode, 40)
436         d = upload_data(self.u, DATA)
437         d.addCallback(extract_uri)
438         d.addCallback(self._check_large, SIZE_LARGE)
439         return d
440
441     def test_second_error_all(self):
442         self.make_node("second-fail")
443         d = self.shouldFail(UploadUnhappinessError, "second_error_all",
444                             "peer selection failed",
445                             upload_data, self.u, DATA)
446         def _check((f,)):
447             self.failUnlessIn("placed 10 shares out of 100 total", str(f.value))
448             # there should also be a 'last failure was' message
449             self.failUnlessIn("ServerError", str(f.value))
450         d.addCallback(_check)
451         return d
452
453 class FullServer(unittest.TestCase):
454     def setUp(self):
455         self.node = FakeClient(mode="full")
456         self.u = upload.Uploader()
457         self.u.running = True
458         self.u.parent = self.node
459
460     def _should_fail(self, f):
461         self.failUnless(isinstance(f, Failure) and f.check(UploadUnhappinessError), f)
462
463     def test_data_large(self):
464         data = DATA
465         d = upload_data(self.u, data)
466         d.addBoth(self._should_fail)
467         return d
468
469 class PeerSelection(unittest.TestCase):
470
471     def make_client(self, num_servers=50):
472         self.node = FakeClient(mode="good", num_servers=num_servers)
473         self.u = upload.Uploader()
474         self.u.running = True
475         self.u.parent = self.node
476
477     def get_data(self, size):
478         return DATA[:size]
479
480     def _check_large(self, newuri, size):
481         u = uri.from_string(newuri)
482         self.failUnless(isinstance(u, uri.CHKFileURI))
483         self.failUnless(isinstance(u.get_storage_index(), str))
484         self.failUnlessEqual(len(u.get_storage_index()), 16)
485         self.failUnless(isinstance(u.key, str))
486         self.failUnlessEqual(len(u.key), 16)
487         self.failUnlessEqual(u.size, size)
488
489     def set_encoding_parameters(self, k, happy, n, max_segsize=1*MiB):
490         p = {"k": k,
491              "happy": happy,
492              "n": n,
493              "max_segment_size": max_segsize,
494              }
495         self.node.DEFAULT_ENCODING_PARAMETERS = p
496
497     def test_one_each(self):
498         # if we have 50 shares, and there are 50 peers, and they all accept a
499         # share, we should get exactly one share per peer
500
501         self.make_client()
502         data = self.get_data(SIZE_LARGE)
503         self.set_encoding_parameters(25, 30, 50)
504         d = upload_data(self.u, data)
505         d.addCallback(extract_uri)
506         d.addCallback(self._check_large, SIZE_LARGE)
507         def _check(res):
508             for p in self.node.last_peers:
509                 allocated = p.allocated
510                 self.failUnlessEqual(len(allocated), 1)
511                 self.failUnlessEqual(p.queries, 1)
512         d.addCallback(_check)
513         return d
514
515     def test_two_each(self):
516         # if we have 100 shares, and there are 50 peers, and they all accept
517         # all shares, we should get exactly two shares per peer
518
519         self.make_client()
520         data = self.get_data(SIZE_LARGE)
521         # if there are 50 peers, then happy needs to be <= 50
522         self.set_encoding_parameters(50, 50, 100)
523         d = upload_data(self.u, data)
524         d.addCallback(extract_uri)
525         d.addCallback(self._check_large, SIZE_LARGE)
526         def _check(res):
527             for p in self.node.last_peers:
528                 allocated = p.allocated
529                 self.failUnlessEqual(len(allocated), 2)
530                 self.failUnlessEqual(p.queries, 2)
531         d.addCallback(_check)
532         return d
533
534     def test_one_each_plus_one_extra(self):
535         # if we have 51 shares, and there are 50 peers, then one peer gets
536         # two shares and the rest get just one
537
538         self.make_client()
539         data = self.get_data(SIZE_LARGE)
540         self.set_encoding_parameters(24, 41, 51)
541         d = upload_data(self.u, data)
542         d.addCallback(extract_uri)
543         d.addCallback(self._check_large, SIZE_LARGE)
544         def _check(res):
545             got_one = []
546             got_two = []
547             for p in self.node.last_peers:
548                 allocated = p.allocated
549                 self.failUnless(len(allocated) in (1,2), len(allocated))
550                 if len(allocated) == 1:
551                     self.failUnlessEqual(p.queries, 1)
552                     got_one.append(p)
553                 else:
554                     self.failUnlessEqual(p.queries, 2)
555                     got_two.append(p)
556             self.failUnlessEqual(len(got_one), 49)
557             self.failUnlessEqual(len(got_two), 1)
558         d.addCallback(_check)
559         return d
560
561     def test_four_each(self):
562         # if we have 200 shares, and there are 50 peers, then each peer gets
563         # 4 shares. The design goal is to accomplish this with only two
564         # queries per peer.
565
566         self.make_client()
567         data = self.get_data(SIZE_LARGE)
568         # if there are 50 peers, then happy should be no more than 50 if
569         # we want this to work.
570         self.set_encoding_parameters(100, 50, 200)
571         d = upload_data(self.u, data)
572         d.addCallback(extract_uri)
573         d.addCallback(self._check_large, SIZE_LARGE)
574         def _check(res):
575             for p in self.node.last_peers:
576                 allocated = p.allocated
577                 self.failUnlessEqual(len(allocated), 4)
578                 self.failUnlessEqual(p.queries, 2)
579         d.addCallback(_check)
580         return d
581
582     def test_three_of_ten(self):
583         # if we have 10 shares and 3 servers, I want to see 3+3+4 rather than
584         # 4+4+2
585
586         self.make_client(3)
587         data = self.get_data(SIZE_LARGE)
588         self.set_encoding_parameters(3, 3, 10)
589         d = upload_data(self.u, data)
590         d.addCallback(extract_uri)
591         d.addCallback(self._check_large, SIZE_LARGE)
592         def _check(res):
593             counts = {}
594             for p in self.node.last_peers:
595                 allocated = p.allocated
596                 counts[len(allocated)] = counts.get(len(allocated), 0) + 1
597             histogram = [counts.get(i, 0) for i in range(5)]
598             self.failUnlessEqual(histogram, [0,0,0,2,1])
599         d.addCallback(_check)
600         return d
601
602     def test_some_big_some_small(self):
603         # 10 shares, 20 servers, but half the servers don't support a
604         # share-size large enough for our file
605         mode = dict([(i,{0:"good",1:"small"}[i%2]) for i in range(20)])
606         self.node = FakeClient(mode, num_servers=20)
607         self.u = upload.Uploader()
608         self.u.running = True
609         self.u.parent = self.node
610
611         data = self.get_data(SIZE_LARGE)
612         self.set_encoding_parameters(3, 5, 10)
613         d = upload_data(self.u, data)
614         d.addCallback(extract_uri)
615         d.addCallback(self._check_large, SIZE_LARGE)
616         def _check(res):
617             # we should have put one share each on the big peers, and zero
618             # shares on the small peers
619             total_allocated = 0
620             for p in self.node.last_peers:
621                 if p.mode == "good":
622                     self.failUnlessEqual(len(p.allocated), 1)
623                 elif p.mode == "small":
624                     self.failUnlessEqual(len(p.allocated), 0)
625                 total_allocated += len(p.allocated)
626             self.failUnlessEqual(total_allocated, 10)
627         d.addCallback(_check)
628         return d
629
630
631 class StorageIndex(unittest.TestCase):
632     def test_params_must_matter(self):
633         DATA = "I am some data"
634         u = upload.Data(DATA, convergence="")
635         eu = upload.EncryptAnUploadable(u)
636         d1 = eu.get_storage_index()
637
638         # CHK means the same data should encrypt the same way
639         u = upload.Data(DATA, convergence="")
640         eu = upload.EncryptAnUploadable(u)
641         d1a = eu.get_storage_index()
642
643         # but if we use a different convergence string it should be different
644         u = upload.Data(DATA, convergence="wheee!")
645         eu = upload.EncryptAnUploadable(u)
646         d1salt1 = eu.get_storage_index()
647
648         # and if we add yet a different convergence it should be different again
649         u = upload.Data(DATA, convergence="NOT wheee!")
650         eu = upload.EncryptAnUploadable(u)
651         d1salt2 = eu.get_storage_index()
652
653         # and if we use the first string again it should be the same as last time
654         u = upload.Data(DATA, convergence="wheee!")
655         eu = upload.EncryptAnUploadable(u)
656         d1salt1a = eu.get_storage_index()
657
658         # and if we change the encoding parameters, it should be different (from the same convergence string with different encoding parameters)
659         u = upload.Data(DATA, convergence="")
660         u.encoding_param_k = u.default_encoding_param_k + 1
661         eu = upload.EncryptAnUploadable(u)
662         d2 = eu.get_storage_index()
663
664         # and if we use a random key, it should be different than the CHK
665         u = upload.Data(DATA, convergence=None)
666         eu = upload.EncryptAnUploadable(u)
667         d3 = eu.get_storage_index()
668         # and different from another instance
669         u = upload.Data(DATA, convergence=None)
670         eu = upload.EncryptAnUploadable(u)
671         d4 = eu.get_storage_index()
672
673         d = DeferredListShouldSucceed([d1,d1a,d1salt1,d1salt2,d1salt1a,d2,d3,d4])
674         def _done(res):
675             si1, si1a, si1salt1, si1salt2, si1salt1a, si2, si3, si4 = res
676             self.failUnlessEqual(si1, si1a)
677             self.failIfEqual(si1, si2)
678             self.failIfEqual(si1, si3)
679             self.failIfEqual(si1, si4)
680             self.failIfEqual(si3, si4)
681             self.failIfEqual(si1salt1, si1)
682             self.failIfEqual(si1salt1, si1salt2)
683             self.failIfEqual(si1salt2, si1)
684             self.failUnlessEqual(si1salt1, si1salt1a)
685         d.addCallback(_done)
686         return d
687
688 class EncodingParameters(GridTestMixin, unittest.TestCase, SetDEPMixin,
689     ShouldFailMixin):
690     def _do_upload_with_broken_servers(self, servers_to_break):
691         """
692         I act like a normal upload, but before I send the results of
693         Tahoe2PeerSelector to the Encoder, I break the first servers_to_break
694         PeerTrackers in the used_peers part of the return result.
695         """
696         assert self.g, "I tried to find a grid at self.g, but failed"
697         broker = self.g.clients[0].storage_broker
698         sh     = self.g.clients[0]._secret_holder
699         data = upload.Data("data" * 10000, convergence="")
700         data.encoding_param_k = 3
701         data.encoding_param_happy = 4
702         data.encoding_param_n = 10
703         uploadable = upload.EncryptAnUploadable(data)
704         encoder = encode.Encoder()
705         encoder.set_encrypted_uploadable(uploadable)
706         status = upload.UploadStatus()
707         selector = upload.Tahoe2PeerSelector("dglev", "test", status)
708         storage_index = encoder.get_param("storage_index")
709         share_size = encoder.get_param("share_size")
710         block_size = encoder.get_param("block_size")
711         num_segments = encoder.get_param("num_segments")
712         d = selector.get_shareholders(broker, sh, storage_index,
713                                       share_size, block_size, num_segments,
714                                       10, 3, 4)
715         def _have_shareholders((used_peers, already_peers)):
716             assert servers_to_break <= len(used_peers)
717             for index in xrange(servers_to_break):
718                 server = list(used_peers)[index]
719                 for share in server.buckets.keys():
720                     server.buckets[share].abort()
721             buckets = {}
722             servermap = already_peers.copy()
723             for peer in used_peers:
724                 buckets.update(peer.buckets)
725                 for bucket in peer.buckets:
726                     servermap.setdefault(bucket, set()).add(peer.peerid)
727             encoder.set_shareholders(buckets, servermap)
728             d = encoder.start()
729             return d
730         d.addCallback(_have_shareholders)
731         return d
732
733
734     def _add_server(self, server_number, readonly=False):
735         assert self.g, "I tried to find a grid at self.g, but failed"
736         ss = self.g.make_server(server_number, readonly)
737         self.g.add_server(server_number, ss)
738
739
740     def _add_server_with_share(self, server_number, share_number=None,
741                                readonly=False):
742         self._add_server(server_number, readonly)
743         if share_number is not None:
744             self._copy_share_to_server(share_number, server_number)
745
746
747     def _copy_share_to_server(self, share_number, server_number):
748         ss = self.g.servers_by_number[server_number]
749         # Copy share i from the directory associated with the first
750         # storage server to the directory associated with this one.
751         assert self.g, "I tried to find a grid at self.g, but failed"
752         assert self.shares, "I tried to find shares at self.shares, but failed"
753         old_share_location = self.shares[share_number][2]
754         new_share_location = os.path.join(ss.storedir, "shares")
755         si = uri.from_string(self.uri).get_storage_index()
756         new_share_location = os.path.join(new_share_location,
757                                           storage_index_to_dir(si))
758         if not os.path.exists(new_share_location):
759             os.makedirs(new_share_location)
760         new_share_location = os.path.join(new_share_location,
761                                           str(share_number))
762         if old_share_location != new_share_location:
763             shutil.copy(old_share_location, new_share_location)
764         shares = self.find_shares(self.uri)
765         # Make sure that the storage server has the share.
766         self.failUnless((share_number, ss.my_nodeid, new_share_location)
767                         in shares)
768
769     def _setup_grid(self):
770         """
771         I set up a NoNetworkGrid with a single server and client.
772         """
773         self.set_up_grid(num_clients=1, num_servers=1)
774
775     def _setup_and_upload(self, **kwargs):
776         """
777         I set up a NoNetworkGrid with a single server and client,
778         upload a file to it, store its uri in self.uri, and store its
779         sharedata in self.shares.
780         """
781         self._setup_grid()
782         client = self.g.clients[0]
783         client.DEFAULT_ENCODING_PARAMETERS['happy'] = 1
784         if "n" in kwargs and "k" in kwargs:
785             client.DEFAULT_ENCODING_PARAMETERS['k'] = kwargs['k']
786             client.DEFAULT_ENCODING_PARAMETERS['n'] = kwargs['n']
787         data = upload.Data("data" * 10000, convergence="")
788         self.data = data
789         d = client.upload(data)
790         def _store_uri(ur):
791             self.uri = ur.uri
792         d.addCallback(_store_uri)
793         d.addCallback(lambda ign:
794             self.find_shares(self.uri))
795         def _store_shares(shares):
796             self.shares = shares
797         d.addCallback(_store_shares)
798         return d
799
800
801     def test_configure_parameters(self):
802         self.basedir = self.mktemp()
803         hooks = {0: self._set_up_nodes_extra_config}
804         self.set_up_grid(client_config_hooks=hooks)
805         c0 = self.g.clients[0]
806
807         DATA = "data" * 100
808         u = upload.Data(DATA, convergence="")
809         d = c0.upload(u)
810         d.addCallback(lambda ur: c0.create_node_from_uri(ur.uri))
811         m = monitor.Monitor()
812         d.addCallback(lambda fn: fn.check(m))
813         def _check(cr):
814             data = cr.get_data()
815             self.failUnlessEqual(data["count-shares-needed"], 7)
816             self.failUnlessEqual(data["count-shares-expected"], 12)
817         d.addCallback(_check)
818         return d
819
820
821     def _setUp(self, ns):
822         # Used by test_happy_semantics and test_preexisting_share_behavior
823         # to set up the grid.
824         self.node = FakeClient(mode="good", num_servers=ns)
825         self.u = upload.Uploader()
826         self.u.running = True
827         self.u.parent = self.node
828
829
830     def test_happy_semantics(self):
831         self._setUp(2)
832         DATA = upload.Data("kittens" * 10000, convergence="")
833         # These parameters are unsatisfiable with only 2 servers.
834         self.set_encoding_parameters(k=3, happy=5, n=10)
835         d = self.shouldFail(UploadUnhappinessError, "test_happy_semantics",
836                             "shares could be placed or found on only 2 "
837                             "server(s). We were asked to place shares on "
838                             "at least 5 server(s) such that any 3 of them "
839                             "have enough shares to recover the file",
840                             self.u.upload, DATA)
841         # Let's reset the client to have 10 servers
842         d.addCallback(lambda ign:
843             self._setUp(10))
844         # These parameters are satisfiable with 10 servers.
845         d.addCallback(lambda ign:
846             self.set_encoding_parameters(k=3, happy=5, n=10))
847         d.addCallback(lambda ign:
848             self.u.upload(DATA))
849         # Let's reset the client to have 7 servers
850         # (this is less than n, but more than h)
851         d.addCallback(lambda ign:
852             self._setUp(7))
853         # These parameters are satisfiable with 7 servers.
854         d.addCallback(lambda ign:
855             self.set_encoding_parameters(k=3, happy=5, n=10))
856         d.addCallback(lambda ign:
857             self.u.upload(DATA))
858         return d
859
860
861     def test_problem_layout_comment_52(self):
862         def _basedir():
863             self.basedir = self.mktemp()
864         _basedir()
865         # This scenario is at
866         # http://allmydata.org/trac/tahoe/ticket/778#comment:52
867         #
868         # The scenario in comment:52 proposes that we have a layout
869         # like:
870         # server 0: shares 1 - 9
871         # server 1: share 0, read-only
872         # server 2: share 0, read-only
873         # server 3: share 0, read-only
874         # To get access to the shares, we will first upload to one
875         # server, which will then have shares 0 - 9. We'll then
876         # add three new servers, configure them to not accept any new
877         # shares, then write share 0 directly into the serverdir of each,
878         # and then remove share 0 from server 0 in the same way.
879         # Then each of servers 1 - 3 will report that they have share 0,
880         # and will not accept any new share, while server 0 will report that
881         # it has shares 1 - 9 and will accept new shares.
882         # We'll then set 'happy' = 4, and see that an upload fails
883         # (as it should)
884         d = self._setup_and_upload()
885         d.addCallback(lambda ign:
886             self._add_server_with_share(server_number=1, share_number=0,
887                                         readonly=True))
888         d.addCallback(lambda ign:
889             self._add_server_with_share(server_number=2, share_number=0,
890                                         readonly=True))
891         d.addCallback(lambda ign:
892             self._add_server_with_share(server_number=3, share_number=0,
893                                         readonly=True))
894         # Remove the first share from server 0.
895         def _remove_share_0_from_server_0():
896             share_location = self.shares[0][2]
897             os.remove(share_location)
898         d.addCallback(lambda ign:
899             _remove_share_0_from_server_0())
900         # Set happy = 4 in the client.
901         def _prepare():
902             client = self.g.clients[0]
903             client.DEFAULT_ENCODING_PARAMETERS['happy'] = 4
904             return client
905         d.addCallback(lambda ign:
906             _prepare())
907         # Uploading data should fail
908         d.addCallback(lambda client:
909             self.shouldFail(UploadUnhappinessError,
910                             "test_problem_layout_comment_52_test_1",
911                             "shares could be placed or found on 4 server(s), "
912                             "but they are not spread out evenly enough to "
913                             "ensure that any 3 of these servers would have "
914                             "enough shares to recover the file. "
915                             "We were asked to place shares on at "
916                             "least 4 servers such that any 3 of them have "
917                             "enough shares to recover the file",
918                             client.upload, upload.Data("data" * 10000,
919                                                        convergence="")))
920
921         # Do comment:52, but like this:
922         # server 2: empty
923         # server 3: share 0, read-only
924         # server 1: share 0, read-only
925         # server 0: shares 0-9
926         d.addCallback(lambda ign:
927             _basedir())
928         d.addCallback(lambda ign:
929             self._setup_and_upload())
930         d.addCallback(lambda ign:
931             self._add_server(server_number=2))
932         d.addCallback(lambda ign:
933             self._add_server_with_share(server_number=3, share_number=0,
934                                         readonly=True))
935         d.addCallback(lambda ign:
936             self._add_server_with_share(server_number=1, share_number=0,
937                                         readonly=True))
938         def _prepare2():
939             client = self.g.clients[0]
940             client.DEFAULT_ENCODING_PARAMETERS['happy'] = 4
941             return client
942         d.addCallback(lambda ign:
943             _prepare2())
944         d.addCallback(lambda client:
945             self.shouldFail(UploadUnhappinessError,
946                             "test_problem_layout_comment_52_test_2",
947                             "shares could be placed on only 3 server(s) such "
948                             "that any 3 of them have enough shares to recover "
949                             "the file, but we were asked to place shares on "
950                             "at least 4 such servers.",
951                             client.upload, upload.Data("data" * 10000,
952                                                        convergence="")))
953         return d
954
955
956     def test_problem_layout_comment_53(self):
957         # This scenario is at
958         # http://allmydata.org/trac/tahoe/ticket/778#comment:53
959         #
960         # Set up the grid to have one server
961         def _change_basedir(ign):
962             self.basedir = self.mktemp()
963         _change_basedir(None)
964         # We start by uploading all of the shares to one server.
965         # Next, we'll add three new servers to our NoNetworkGrid. We'll add
966         # one share from our initial upload to each of these.
967         # The counterintuitive ordering of the share numbers is to deal with
968         # the permuting of these servers -- distributing the shares this
969         # way ensures that the Tahoe2PeerSelector sees them in the order
970         # described below.
971         d = self._setup_and_upload()
972         d.addCallback(lambda ign:
973             self._add_server_with_share(server_number=1, share_number=2))
974         d.addCallback(lambda ign:
975             self._add_server_with_share(server_number=2, share_number=0))
976         d.addCallback(lambda ign:
977             self._add_server_with_share(server_number=3, share_number=1))
978         # So, we now have the following layout:
979         # server 0: shares 0 - 9
980         # server 1: share 2
981         # server 2: share 0
982         # server 3: share 1
983         # We change the 'happy' parameter in the client to 4.
984         # The Tahoe2PeerSelector will see the peers permuted as:
985         # 2, 3, 1, 0
986         # Ideally, a reupload of our original data should work.
987         def _reset_encoding_parameters(ign, happy=4):
988             client = self.g.clients[0]
989             client.DEFAULT_ENCODING_PARAMETERS['happy'] = happy
990             return client
991         d.addCallback(_reset_encoding_parameters)
992         d.addCallback(lambda client:
993             client.upload(upload.Data("data" * 10000, convergence="")))
994
995
996         # This scenario is basically comment:53, but changed so that the
997         # Tahoe2PeerSelector sees the server with all of the shares before
998         # any of the other servers.
999         # The layout is:
1000         # server 2: shares 0 - 9
1001         # server 3: share 0
1002         # server 1: share 1
1003         # server 4: share 2
1004         # The Tahoe2PeerSelector sees the peers permuted as:
1005         # 2, 3, 1, 4
1006         # Note that server 0 has been replaced by server 4; this makes it
1007         # easier to ensure that the last server seen by Tahoe2PeerSelector
1008         # has only one share.
1009         d.addCallback(_change_basedir)
1010         d.addCallback(lambda ign:
1011             self._setup_and_upload())
1012         d.addCallback(lambda ign:
1013             self._add_server_with_share(server_number=2, share_number=0))
1014         d.addCallback(lambda ign:
1015             self._add_server_with_share(server_number=3, share_number=1))
1016         d.addCallback(lambda ign:
1017             self._add_server_with_share(server_number=1, share_number=2))
1018         # Copy all of the other shares to server number 2
1019         def _copy_shares(ign):
1020             for i in xrange(0, 10):
1021                 self._copy_share_to_server(i, 2)
1022         d.addCallback(_copy_shares)
1023         # Remove the first server, and add a placeholder with share 0
1024         d.addCallback(lambda ign:
1025             self.g.remove_server(self.g.servers_by_number[0].my_nodeid))
1026         d.addCallback(lambda ign:
1027             self._add_server_with_share(server_number=4, share_number=0))
1028         # Now try uploading.
1029         d.addCallback(_reset_encoding_parameters)
1030         d.addCallback(lambda client:
1031             client.upload(upload.Data("data" * 10000, convergence="")))
1032
1033
1034         # Try the same thing, but with empty servers after the first one
1035         # We want to make sure that Tahoe2PeerSelector will redistribute
1036         # shares as necessary, not simply discover an existing layout.
1037         # The layout is:
1038         # server 2: shares 0 - 9
1039         # server 3: empty
1040         # server 1: empty
1041         # server 4: empty
1042         d.addCallback(_change_basedir)
1043         d.addCallback(lambda ign:
1044             self._setup_and_upload())
1045         d.addCallback(lambda ign:
1046             self._add_server(server_number=2))
1047         d.addCallback(lambda ign:
1048             self._add_server(server_number=3))
1049         d.addCallback(lambda ign:
1050             self._add_server(server_number=1))
1051         d.addCallback(lambda ign:
1052             self._add_server(server_number=4))
1053         d.addCallback(_copy_shares)
1054         d.addCallback(lambda ign:
1055             self.g.remove_server(self.g.servers_by_number[0].my_nodeid))
1056         d.addCallback(_reset_encoding_parameters)
1057         d.addCallback(lambda client:
1058             client.upload(upload.Data("data" * 10000, convergence="")))
1059         # Make sure that only as many shares as necessary to satisfy
1060         # servers of happiness were pushed.
1061         d.addCallback(lambda results:
1062             self.failUnlessEqual(results.pushed_shares, 3))
1063         return d
1064
1065
1066     def test_happiness_with_some_readonly_peers(self):
1067         # Try the following layout
1068         # server 2: shares 0-9
1069         # server 4: share 0, read-only
1070         # server 3: share 1, read-only
1071         # server 1: share 2, read-only
1072         self.basedir = self.mktemp()
1073         d = self._setup_and_upload()
1074         d.addCallback(lambda ign:
1075             self._add_server_with_share(server_number=2, share_number=0))
1076         d.addCallback(lambda ign:
1077             self._add_server_with_share(server_number=3, share_number=1,
1078                                         readonly=True))
1079         d.addCallback(lambda ign:
1080             self._add_server_with_share(server_number=1, share_number=2,
1081                                         readonly=True))
1082         # Copy all of the other shares to server number 2
1083         def _copy_shares(ign):
1084             for i in xrange(1, 10):
1085                 self._copy_share_to_server(i, 2)
1086         d.addCallback(_copy_shares)
1087         # Remove server 0, and add another in its place
1088         d.addCallback(lambda ign:
1089             self.g.remove_server(self.g.servers_by_number[0].my_nodeid))
1090         d.addCallback(lambda ign:
1091             self._add_server_with_share(server_number=4, share_number=0,
1092                                         readonly=True))
1093         def _reset_encoding_parameters(ign, happy=4):
1094             client = self.g.clients[0]
1095             client.DEFAULT_ENCODING_PARAMETERS['happy'] = happy
1096             return client
1097         d.addCallback(_reset_encoding_parameters)
1098         d.addCallback(lambda client:
1099             client.upload(upload.Data("data" * 10000, convergence="")))
1100         return d
1101
1102
1103     def test_happiness_with_all_readonly_peers(self):
1104         # server 3: share 1, read-only
1105         # server 1: share 2, read-only
1106         # server 2: shares 0-9, read-only
1107         # server 4: share 0, read-only
1108         # The idea with this test is to make sure that the survey of
1109         # read-only peers doesn't undercount servers of happiness
1110         self.basedir = self.mktemp()
1111         d = self._setup_and_upload()
1112         d.addCallback(lambda ign:
1113             self._add_server_with_share(server_number=4, share_number=0,
1114                                         readonly=True))
1115         d.addCallback(lambda ign:
1116             self._add_server_with_share(server_number=3, share_number=1,
1117                                         readonly=True))
1118         d.addCallback(lambda ign:
1119             self._add_server_with_share(server_number=1, share_number=2,
1120                                         readonly=True))
1121         d.addCallback(lambda ign:
1122             self._add_server_with_share(server_number=2, share_number=0,
1123                                         readonly=True))
1124         def _copy_shares(ign):
1125             for i in xrange(1, 10):
1126                 self._copy_share_to_server(i, 2)
1127         d.addCallback(_copy_shares)
1128         d.addCallback(lambda ign:
1129             self.g.remove_server(self.g.servers_by_number[0].my_nodeid))
1130         def _reset_encoding_parameters(ign, happy=4):
1131             client = self.g.clients[0]
1132             client.DEFAULT_ENCODING_PARAMETERS['happy'] = happy
1133             return client
1134         d.addCallback(_reset_encoding_parameters)
1135         d.addCallback(lambda client:
1136             client.upload(upload.Data("data" * 10000, convergence="")))
1137         return d
1138
1139
1140     def test_dropped_servers_in_encoder(self):
1141         # The Encoder does its own "servers_of_happiness" check if it
1142         # happens to lose a bucket during an upload (it assumes that
1143         # the layout presented to it satisfies "servers_of_happiness"
1144         # until a failure occurs)
1145         #
1146         # This test simulates an upload where servers break after peer
1147         # selection, but before they are written to.
1148         def _set_basedir(ign=None):
1149             self.basedir = self.mktemp()
1150         _set_basedir()
1151         d = self._setup_and_upload();
1152         # Add 5 servers
1153         def _do_server_setup(ign):
1154             self._add_server(server_number=1)
1155             self._add_server(server_number=2)
1156             self._add_server(server_number=3)
1157             self._add_server(server_number=4)
1158             self._add_server(server_number=5)
1159         d.addCallback(_do_server_setup)
1160         # remove the original server
1161         # (necessary to ensure that the Tahoe2PeerSelector will distribute
1162         #  all the shares)
1163         def _remove_server(ign):
1164             server = self.g.servers_by_number[0]
1165             self.g.remove_server(server.my_nodeid)
1166         d.addCallback(_remove_server)
1167         # This should succeed; we still have 4 servers, and the
1168         # happiness of the upload is 4.
1169         d.addCallback(lambda ign:
1170             self._do_upload_with_broken_servers(1))
1171         # Now, do the same thing over again, but drop 2 servers instead
1172         # of 1. This should fail, because servers_of_happiness is 4 and
1173         # we can't satisfy that.
1174         d.addCallback(_set_basedir)
1175         d.addCallback(lambda ign:
1176             self._setup_and_upload())
1177         d.addCallback(_do_server_setup)
1178         d.addCallback(_remove_server)
1179         d.addCallback(lambda ign:
1180             self.shouldFail(UploadUnhappinessError,
1181                             "test_dropped_servers_in_encoder",
1182                             "shares could be placed on only 3 server(s) "
1183                             "such that any 3 of them have enough shares to "
1184                             "recover the file, but we were asked to place "
1185                             "shares on at least 4",
1186                             self._do_upload_with_broken_servers, 2))
1187         # Now do the same thing over again, but make some of the servers
1188         # readonly, break some of the ones that aren't, and make sure that
1189         # happiness accounting is preserved.
1190         d.addCallback(_set_basedir)
1191         d.addCallback(lambda ign:
1192             self._setup_and_upload())
1193         def _do_server_setup_2(ign):
1194             self._add_server(1)
1195             self._add_server(2)
1196             self._add_server(3)
1197             self._add_server_with_share(4, 7, readonly=True)
1198             self._add_server_with_share(5, 8, readonly=True)
1199         d.addCallback(_do_server_setup_2)
1200         d.addCallback(_remove_server)
1201         d.addCallback(lambda ign:
1202             self._do_upload_with_broken_servers(1))
1203         d.addCallback(_set_basedir)
1204         d.addCallback(lambda ign:
1205             self._setup_and_upload())
1206         d.addCallback(_do_server_setup_2)
1207         d.addCallback(_remove_server)
1208         d.addCallback(lambda ign:
1209             self.shouldFail(UploadUnhappinessError,
1210                             "test_dropped_servers_in_encoder",
1211                             "shares could be placed on only 3 server(s) "
1212                             "such that any 3 of them have enough shares to "
1213                             "recover the file, but we were asked to place "
1214                             "shares on at least 4",
1215                             self._do_upload_with_broken_servers, 2))
1216         return d
1217
1218
1219     def test_merge_peers(self):
1220         # merge_peers merges a list of used_peers and a dict of
1221         # shareid -> peerid mappings.
1222         shares = {
1223                     1 : set(["server1"]),
1224                     2 : set(["server2"]),
1225                     3 : set(["server3"]),
1226                     4 : set(["server4", "server5"]),
1227                     5 : set(["server1", "server2"]),
1228                  }
1229         # if not provided with a used_peers argument, it should just
1230         # return the first argument unchanged.
1231         self.failUnlessEqual(shares, merge_peers(shares, set([])))
1232         class FakePeerTracker:
1233             pass
1234         trackers = []
1235         for (i, server) in [(i, "server%d" % i) for i in xrange(5, 9)]:
1236             t = FakePeerTracker()
1237             t.peerid = server
1238             t.buckets = [i]
1239             trackers.append(t)
1240         expected = {
1241                     1 : set(["server1"]),
1242                     2 : set(["server2"]),
1243                     3 : set(["server3"]),
1244                     4 : set(["server4", "server5"]),
1245                     5 : set(["server1", "server2", "server5"]),
1246                     6 : set(["server6"]),
1247                     7 : set(["server7"]),
1248                     8 : set(["server8"]),
1249                    }
1250         self.failUnlessEqual(expected, merge_peers(shares, set(trackers)))
1251         shares2 = {}
1252         expected = {
1253                     5 : set(["server5"]),
1254                     6 : set(["server6"]),
1255                     7 : set(["server7"]),
1256                     8 : set(["server8"]),
1257                    }
1258         self.failUnlessEqual(expected, merge_peers(shares2, set(trackers)))
1259         shares3 = {}
1260         trackers = []
1261         expected = {}
1262         for (i, server) in [(i, "server%d" % i) for i in xrange(10)]:
1263             shares3[i] = set([server])
1264             t = FakePeerTracker()
1265             t.peerid = server
1266             t.buckets = [i]
1267             trackers.append(t)
1268             expected[i] = set([server])
1269         self.failUnlessEqual(expected, merge_peers(shares3, set(trackers)))
1270
1271
1272     def test_servers_of_happiness_utility_function(self):
1273         # These tests are concerned with the servers_of_happiness()
1274         # utility function, and its underlying matching algorithm. Other
1275         # aspects of the servers_of_happiness behavior are tested
1276         # elsehwere These tests exist to ensure that
1277         # servers_of_happiness doesn't under or overcount the happiness
1278         # value for given inputs.
1279
1280         # servers_of_happiness expects a dict of
1281         # shnum => set(peerids) as a preexisting shares argument.
1282         test1 = {
1283                  1 : set(["server1"]),
1284                  2 : set(["server2"]),
1285                  3 : set(["server3"]),
1286                  4 : set(["server4"])
1287                 }
1288         happy = servers_of_happiness(test1)
1289         self.failUnlessEqual(4, happy)
1290         test1[4] = set(["server1"])
1291         # We've added a duplicate server, so now servers_of_happiness
1292         # should be 3 instead of 4.
1293         happy = servers_of_happiness(test1)
1294         self.failUnlessEqual(3, happy)
1295         # The second argument of merge_peers should be a set of
1296         # objects with peerid and buckets as attributes. In actual use,
1297         # these will be PeerTracker instances, but for testing it is fine
1298         # to make a FakePeerTracker whose job is to hold those instance
1299         # variables to test that part.
1300         class FakePeerTracker:
1301             pass
1302         trackers = []
1303         for (i, server) in [(i, "server%d" % i) for i in xrange(5, 9)]:
1304             t = FakePeerTracker()
1305             t.peerid = server
1306             t.buckets = [i]
1307             trackers.append(t)
1308         # Recall that test1 is a server layout with servers_of_happiness
1309         # = 3.  Since there isn't any overlap between the shnum ->
1310         # set([peerid]) correspondences in test1 and those in trackers,
1311         # the result here should be 7.
1312         test2 = merge_peers(test1, set(trackers))
1313         happy = servers_of_happiness(test2)
1314         self.failUnlessEqual(7, happy)
1315         # Now add an overlapping server to trackers. This is redundant,
1316         # so it should not cause the previously reported happiness value
1317         # to change.
1318         t = FakePeerTracker()
1319         t.peerid = "server1"
1320         t.buckets = [1]
1321         trackers.append(t)
1322         test2 = merge_peers(test1, set(trackers))
1323         happy = servers_of_happiness(test2)
1324         self.failUnlessEqual(7, happy)
1325         test = {}
1326         happy = servers_of_happiness(test)
1327         self.failUnlessEqual(0, happy)
1328         # Test a more substantial overlap between the trackers and the
1329         # existing assignments.
1330         test = {
1331             1 : set(['server1']),
1332             2 : set(['server2']),
1333             3 : set(['server3']),
1334             4 : set(['server4']),
1335         }
1336         trackers = []
1337         t = FakePeerTracker()
1338         t.peerid = 'server5'
1339         t.buckets = [4]
1340         trackers.append(t)
1341         t = FakePeerTracker()
1342         t.peerid = 'server6'
1343         t.buckets = [3, 5]
1344         trackers.append(t)
1345         # The value returned by servers_of_happiness is the size
1346         # of a maximum matching in the bipartite graph that
1347         # servers_of_happiness() makes between peerids and share
1348         # numbers. It should find something like this:
1349         # (server 1, share 1)
1350         # (server 2, share 2)
1351         # (server 3, share 3)
1352         # (server 5, share 4)
1353         # (server 6, share 5)
1354         #
1355         # and, since there are 5 edges in this matching, it should
1356         # return 5.
1357         test2 = merge_peers(test, set(trackers))
1358         happy = servers_of_happiness(test2)
1359         self.failUnlessEqual(5, happy)
1360         # Zooko's first puzzle:
1361         # (from http://allmydata.org/trac/tahoe-lafs/ticket/778#comment:156)
1362         #
1363         # server 1: shares 0, 1
1364         # server 2: shares 1, 2
1365         # server 3: share 2
1366         #
1367         # This should yield happiness of 3.
1368         test = {
1369             0 : set(['server1']),
1370             1 : set(['server1', 'server2']),
1371             2 : set(['server2', 'server3']),
1372         }
1373         self.failUnlessEqual(3, servers_of_happiness(test))
1374         # Zooko's second puzzle:
1375         # (from http://allmydata.org/trac/tahoe-lafs/ticket/778#comment:158)
1376         #
1377         # server 1: shares 0, 1
1378         # server 2: share 1
1379         #
1380         # This should yield happiness of 2.
1381         test = {
1382             0 : set(['server1']),
1383             1 : set(['server1', 'server2']),
1384         }
1385         self.failUnlessEqual(2, servers_of_happiness(test))
1386
1387
1388     def test_shares_by_server(self):
1389         test = dict([(i, set(["server%d" % i])) for i in xrange(1, 5)])
1390         sbs = shares_by_server(test)
1391         self.failUnlessEqual(set([1]), sbs["server1"])
1392         self.failUnlessEqual(set([2]), sbs["server2"])
1393         self.failUnlessEqual(set([3]), sbs["server3"])
1394         self.failUnlessEqual(set([4]), sbs["server4"])
1395         test1 = {
1396                     1 : set(["server1"]),
1397                     2 : set(["server1"]),
1398                     3 : set(["server1"]),
1399                     4 : set(["server2"]),
1400                     5 : set(["server2"])
1401                 }
1402         sbs = shares_by_server(test1)
1403         self.failUnlessEqual(set([1, 2, 3]), sbs["server1"])
1404         self.failUnlessEqual(set([4, 5]), sbs["server2"])
1405         # This should fail unless the peerid part of the mapping is a set
1406         test2 = {1: "server1"}
1407         self.shouldFail(AssertionError,
1408                        "test_shares_by_server",
1409                        "",
1410                        shares_by_server, test2)
1411
1412
1413     def test_existing_share_detection(self):
1414         self.basedir = self.mktemp()
1415         d = self._setup_and_upload()
1416         # Our final setup should look like this:
1417         # server 1: shares 0 - 9, read-only
1418         # server 2: empty
1419         # server 3: empty
1420         # server 4: empty
1421         # The purpose of this test is to make sure that the peer selector
1422         # knows about the shares on server 1, even though it is read-only.
1423         # It used to simply filter these out, which would cause the test
1424         # to fail when servers_of_happiness = 4.
1425         d.addCallback(lambda ign:
1426             self._add_server_with_share(1, 0, True))
1427         d.addCallback(lambda ign:
1428             self._add_server(2))
1429         d.addCallback(lambda ign:
1430             self._add_server(3))
1431         d.addCallback(lambda ign:
1432             self._add_server(4))
1433         def _copy_shares(ign):
1434             for i in xrange(1, 10):
1435                 self._copy_share_to_server(i, 1)
1436         d.addCallback(_copy_shares)
1437         d.addCallback(lambda ign:
1438             self.g.remove_server(self.g.servers_by_number[0].my_nodeid))
1439         def _prepare_client(ign):
1440             client = self.g.clients[0]
1441             client.DEFAULT_ENCODING_PARAMETERS['happy'] = 4
1442             return client
1443         d.addCallback(_prepare_client)
1444         d.addCallback(lambda client:
1445             client.upload(upload.Data("data" * 10000, convergence="")))
1446         return d
1447
1448
1449     def test_query_counting(self):
1450         # If peer selection fails, Tahoe2PeerSelector prints out a lot
1451         # of helpful diagnostic information, including query stats.
1452         # This test helps make sure that that information is accurate.
1453         self.basedir = self.mktemp()
1454         d = self._setup_and_upload()
1455         def _setup(ign):
1456             for i in xrange(1, 11):
1457                 self._add_server(server_number=i)
1458             self.g.remove_server(self.g.servers_by_number[0].my_nodeid)
1459             c = self.g.clients[0]
1460             # We set happy to an unsatisfiable value so that we can check the
1461             # counting in the exception message. The same progress message
1462             # is also used when the upload is successful, but in that case it
1463             # only gets written to a log, so we can't see what it says.
1464             c.DEFAULT_ENCODING_PARAMETERS['happy'] = 45
1465             return c
1466         d.addCallback(_setup)
1467         d.addCallback(lambda c:
1468             self.shouldFail(UploadUnhappinessError, "test_query_counting",
1469                             "10 queries placed some shares",
1470                             c.upload, upload.Data("data" * 10000,
1471                                                   convergence="")))
1472         # Now try with some readonly servers. We want to make sure that
1473         # the readonly peer share discovery phase is counted correctly.
1474         def _reset(ign):
1475             self.basedir = self.mktemp()
1476             self.g = None
1477         d.addCallback(_reset)
1478         d.addCallback(lambda ign:
1479             self._setup_and_upload())
1480         def _then(ign):
1481             for i in xrange(1, 11):
1482                 self._add_server(server_number=i)
1483             self._add_server(server_number=11, readonly=True)
1484             self._add_server(server_number=12, readonly=True)
1485             self.g.remove_server(self.g.servers_by_number[0].my_nodeid)
1486             c = self.g.clients[0]
1487             c.DEFAULT_ENCODING_PARAMETERS['happy'] = 45
1488             return c
1489         d.addCallback(_then)
1490         d.addCallback(lambda c:
1491             self.shouldFail(UploadUnhappinessError, "test_query_counting",
1492                             "2 placed none (of which 2 placed none due to "
1493                             "the server being full",
1494                             c.upload, upload.Data("data" * 10000,
1495                                                   convergence="")))
1496         # Now try the case where the upload process finds a bunch of the
1497         # shares that it wants to place on the first server, including
1498         # the one that it wanted to allocate there. Though no shares will
1499         # be allocated in this request, it should still be called
1500         # productive, since it caused some homeless shares to be
1501         # removed.
1502         d.addCallback(_reset)
1503         d.addCallback(lambda ign:
1504             self._setup_and_upload())
1505
1506         def _next(ign):
1507             for i in xrange(1, 11):
1508                 self._add_server(server_number=i)
1509             # Copy all of the shares to server 9, since that will be
1510             # the first one that the selector sees.
1511             for i in xrange(10):
1512                 self._copy_share_to_server(i, 9)
1513             # Remove server 0, and its contents
1514             self.g.remove_server(self.g.servers_by_number[0].my_nodeid)
1515             # Make happiness unsatisfiable
1516             c = self.g.clients[0]
1517             c.DEFAULT_ENCODING_PARAMETERS['happy'] = 45
1518             return c
1519         d.addCallback(_next)
1520         d.addCallback(lambda c:
1521             self.shouldFail(UploadUnhappinessError, "test_query_counting",
1522                             "1 queries placed some shares",
1523                             c.upload, upload.Data("data" * 10000,
1524                                                   convergence="")))
1525         return d
1526
1527
1528     def test_upper_limit_on_readonly_queries(self):
1529         self.basedir = self.mktemp()
1530         d = self._setup_and_upload()
1531         def _then(ign):
1532             for i in xrange(1, 11):
1533                 self._add_server(server_number=i, readonly=True)
1534             self.g.remove_server(self.g.servers_by_number[0].my_nodeid)
1535             c = self.g.clients[0]
1536             c.DEFAULT_ENCODING_PARAMETERS['k'] = 2
1537             c.DEFAULT_ENCODING_PARAMETERS['happy'] = 4
1538             c.DEFAULT_ENCODING_PARAMETERS['n'] = 4
1539             return c
1540         d.addCallback(_then)
1541         d.addCallback(lambda client:
1542             self.shouldFail(UploadUnhappinessError,
1543                             "test_upper_limit_on_readonly_queries",
1544                             "sent 8 queries to 8 peers",
1545                             client.upload,
1546                             upload.Data('data' * 10000, convergence="")))
1547         return d
1548
1549
1550     def test_exception_messages_during_peer_selection(self):
1551         # server 1: read-only, no shares
1552         # server 2: read-only, no shares
1553         # server 3: read-only, no shares
1554         # server 4: read-only, no shares
1555         # server 5: read-only, no shares
1556         # This will fail, but we want to make sure that the log messages
1557         # are informative about why it has failed.
1558         self.basedir = self.mktemp()
1559         d = self._setup_and_upload()
1560         d.addCallback(lambda ign:
1561             self._add_server(server_number=1, readonly=True))
1562         d.addCallback(lambda ign:
1563             self._add_server(server_number=2, readonly=True))
1564         d.addCallback(lambda ign:
1565             self._add_server(server_number=3, readonly=True))
1566         d.addCallback(lambda ign:
1567             self._add_server(server_number=4, readonly=True))
1568         d.addCallback(lambda ign:
1569             self._add_server(server_number=5, readonly=True))
1570         d.addCallback(lambda ign:
1571             self.g.remove_server(self.g.servers_by_number[0].my_nodeid))
1572         def _reset_encoding_parameters(ign, happy=4):
1573             client = self.g.clients[0]
1574             client.DEFAULT_ENCODING_PARAMETERS['happy'] = happy
1575             return client
1576         d.addCallback(_reset_encoding_parameters)
1577         d.addCallback(lambda client:
1578             self.shouldFail(UploadUnhappinessError, "test_selection_exceptions",
1579                             "placed 0 shares out of 10 "
1580                             "total (10 homeless), want to place shares on at "
1581                             "least 4 servers such that any 3 of them have "
1582                             "enough shares to recover the file, "
1583                             "sent 5 queries to 5 peers, 0 queries placed "
1584                             "some shares, 5 placed none "
1585                             "(of which 5 placed none due to the server being "
1586                             "full and 0 placed none due to an error)",
1587                             client.upload,
1588                             upload.Data("data" * 10000, convergence="")))
1589
1590
1591         # server 1: read-only, no shares
1592         # server 2: broken, no shares
1593         # server 3: read-only, no shares
1594         # server 4: read-only, no shares
1595         # server 5: read-only, no shares
1596         def _reset(ign):
1597             self.basedir = self.mktemp()
1598         d.addCallback(_reset)
1599         d.addCallback(lambda ign:
1600             self._setup_and_upload())
1601         d.addCallback(lambda ign:
1602             self._add_server(server_number=1, readonly=True))
1603         d.addCallback(lambda ign:
1604             self._add_server(server_number=2))
1605         def _break_server_2(ign):
1606             server = self.g.servers_by_number[2].my_nodeid
1607             # We have to break the server in servers_by_id,
1608             # because the one in servers_by_number isn't wrapped,
1609             # and doesn't look at its broken attribute when answering
1610             # queries.
1611             self.g.servers_by_id[server].broken = True
1612         d.addCallback(_break_server_2)
1613         d.addCallback(lambda ign:
1614             self._add_server(server_number=3, readonly=True))
1615         d.addCallback(lambda ign:
1616             self._add_server(server_number=4, readonly=True))
1617         d.addCallback(lambda ign:
1618             self._add_server(server_number=5, readonly=True))
1619         d.addCallback(lambda ign:
1620             self.g.remove_server(self.g.servers_by_number[0].my_nodeid))
1621         d.addCallback(_reset_encoding_parameters)
1622         d.addCallback(lambda client:
1623             self.shouldFail(UploadUnhappinessError, "test_selection_exceptions",
1624                             "placed 0 shares out of 10 "
1625                             "total (10 homeless), want to place shares on at "
1626                             "least 4 servers such that any 3 of them have "
1627                             "enough shares to recover the file, "
1628                             "sent 5 queries to 5 peers, 0 queries placed "
1629                             "some shares, 5 placed none "
1630                             "(of which 4 placed none due to the server being "
1631                             "full and 1 placed none due to an error)",
1632                             client.upload,
1633                             upload.Data("data" * 10000, convergence="")))
1634         # server 0, server 1 = empty, accepting shares
1635         # This should place all of the shares, but still fail with happy=4.
1636         # We want to make sure that the exception message is worded correctly.
1637         d.addCallback(_reset)
1638         d.addCallback(lambda ign:
1639             self._setup_grid())
1640         d.addCallback(lambda ign:
1641             self._add_server(server_number=1))
1642         d.addCallback(_reset_encoding_parameters)
1643         d.addCallback(lambda client:
1644             self.shouldFail(UploadUnhappinessError, "test_selection_exceptions",
1645                             "shares could be placed or found on only 2 "
1646                             "server(s). We were asked to place shares on at "
1647                             "least 4 server(s) such that any 3 of them have "
1648                             "enough shares to recover the file.",
1649                             client.upload, upload.Data("data" * 10000,
1650                                                        convergence="")))
1651         # servers 0 - 4 = empty, accepting shares
1652         # This too should place all the shares, and this too should fail,
1653         # but since the effective happiness is more than the k encoding
1654         # parameter, it should trigger a different error message than the one
1655         # above.
1656         d.addCallback(_reset)
1657         d.addCallback(lambda ign:
1658             self._setup_grid())
1659         d.addCallback(lambda ign:
1660             self._add_server(server_number=1))
1661         d.addCallback(lambda ign:
1662             self._add_server(server_number=2))
1663         d.addCallback(lambda ign:
1664             self._add_server(server_number=3))
1665         d.addCallback(lambda ign:
1666             self._add_server(server_number=4))
1667         d.addCallback(_reset_encoding_parameters, happy=7)
1668         d.addCallback(lambda client:
1669             self.shouldFail(UploadUnhappinessError, "test_selection_exceptions",
1670                             "shares could be placed on only 5 server(s) such "
1671                             "that any 3 of them have enough shares to recover "
1672                             "the file, but we were asked to place shares on "
1673                             "at least 7 such servers.",
1674                             client.upload, upload.Data("data" * 10000,
1675                                                        convergence="")))
1676         # server 0: shares 0 - 9
1677         # server 1: share 0, read-only
1678         # server 2: share 0, read-only
1679         # server 3: share 0, read-only
1680         # This should place all of the shares, but fail with happy=4.
1681         # Since the number of servers with shares is more than the number
1682         # necessary to reconstitute the file, this will trigger a different
1683         # error message than either of those above.
1684         d.addCallback(_reset)
1685         d.addCallback(lambda ign:
1686             self._setup_and_upload())
1687         d.addCallback(lambda ign:
1688             self._add_server_with_share(server_number=1, share_number=0,
1689                                         readonly=True))
1690         d.addCallback(lambda ign:
1691             self._add_server_with_share(server_number=2, share_number=0,
1692                                         readonly=True))
1693         d.addCallback(lambda ign:
1694             self._add_server_with_share(server_number=3, share_number=0,
1695                                         readonly=True))
1696         d.addCallback(_reset_encoding_parameters, happy=7)
1697         d.addCallback(lambda client:
1698             self.shouldFail(UploadUnhappinessError, "test_selection_exceptions",
1699                             "shares could be placed or found on 4 server(s), "
1700                             "but they are not spread out evenly enough to "
1701                             "ensure that any 3 of these servers would have "
1702                             "enough shares to recover the file. We were asked "
1703                             "to place shares on at least 7 servers such that "
1704                             "any 3 of them have enough shares to recover the "
1705                             "file",
1706                             client.upload, upload.Data("data" * 10000,
1707                                                        convergence="")))
1708         return d
1709
1710
1711     def test_problem_layout_comment_187(self):
1712         # #778 comment 187 broke an initial attempt at a share
1713         # redistribution algorithm. This test is here to demonstrate the
1714         # breakage, and to test that subsequent algorithms don't also
1715         # break in the same way.
1716         self.basedir = self.mktemp()
1717         d = self._setup_and_upload(k=2, n=3)
1718
1719         # server 1: shares 0, 1, 2, readonly
1720         # server 2: share 0, readonly
1721         # server 3: share 0
1722         def _setup(ign):
1723             self._add_server_with_share(server_number=1, share_number=0,
1724                                         readonly=True)
1725             self._add_server_with_share(server_number=2, share_number=0,
1726                                         readonly=True)
1727             self._add_server_with_share(server_number=3, share_number=0)
1728             # Copy shares
1729             self._copy_share_to_server(1, 1)
1730             self._copy_share_to_server(2, 1)
1731             # Remove server 0
1732             self.g.remove_server(self.g.servers_by_number[0].my_nodeid)
1733             client = self.g.clients[0]
1734             client.DEFAULT_ENCODING_PARAMETERS['happy'] = 3
1735             return client
1736
1737         d.addCallback(_setup)
1738         d.addCallback(lambda client:
1739             client.upload(upload.Data("data" * 10000, convergence="")))
1740         return d
1741     test_problem_layout_comment_187.todo = "this isn't fixed yet"
1742
1743
1744     def test_upload_succeeds_with_some_homeless_shares(self):
1745         # If the upload is forced to stop trying to place shares before
1746         # it has placed (or otherwise accounted) for all of them, but it
1747         # has placed enough to satisfy the upload health criteria that
1748         # we're using, it should still succeed.
1749         self.basedir = self.mktemp()
1750         d = self._setup_and_upload()
1751         def _server_setup(ign):
1752             # Add four servers so that we have a layout like this:
1753             # server 1: share 0, read-only
1754             # server 2: share 1, read-only
1755             # server 3: share 2, read-only
1756             # server 4: share 3, read-only
1757             # If we set happy = 4, the upload will manage to satisfy
1758             # servers of happiness, but not place all of the shares; we
1759             # want to test that the upload is declared successful in
1760             # this case.
1761             self._add_server_with_share(server_number=1, share_number=0,
1762                                         readonly=True)
1763             self._add_server_with_share(server_number=2, share_number=1,
1764                                         readonly=True)
1765             self._add_server_with_share(server_number=3, share_number=2,
1766                                         readonly=True)
1767             self._add_server_with_share(server_number=4, share_number=3,
1768                                         readonly=True)
1769             # Remove server 0.
1770             self.g.remove_server(self.g.servers_by_number[0].my_nodeid)
1771             # Set the client appropriately
1772             c = self.g.clients[0]
1773             c.DEFAULT_ENCODING_PARAMETERS['happy'] = 4
1774             return c
1775         d.addCallback(_server_setup)
1776         d.addCallback(lambda client:
1777             client.upload(upload.Data("data" * 10000, convergence="")))
1778         return d
1779
1780
1781     def test_uploader_skips_over_servers_with_only_one_share(self):
1782         # We want to make sure that the redistribution logic ignores
1783         # servers with only one share, since placing these shares
1784         # elsewhere will at best keep happiness the same as it was, and
1785         # at worst hurt it.
1786         self.basedir = self.mktemp()
1787         d = self._setup_and_upload()
1788         def _server_setup(ign):
1789             # Add some servers so that the upload will need to
1790             # redistribute, but will first pass over a couple of servers
1791             # that don't have enough shares to redistribute before
1792             # finding one that does have shares to redistribute.
1793             self._add_server_with_share(server_number=1, share_number=0)
1794             self._add_server_with_share(server_number=2, share_number=2)
1795             self._add_server_with_share(server_number=3, share_number=1)
1796             self._add_server_with_share(server_number=8, share_number=4)
1797             self._add_server_with_share(server_number=5, share_number=5)
1798             self._add_server_with_share(server_number=10, share_number=7)
1799             for i in xrange(4):
1800                 self._copy_share_to_server(i, 2)
1801             return self.g.clients[0]
1802         d.addCallback(_server_setup)
1803         d.addCallback(lambda client:
1804             client.upload(upload.Data("data" * 10000, convergence="")))
1805         return d
1806
1807
1808     def test_peer_selector_bucket_abort(self):
1809         # If peer selection for an upload fails due to an unhappy
1810         # layout, the peer selection process should abort the buckets it
1811         # allocates before failing, so that the space can be re-used.
1812         self.basedir = self.mktemp()
1813         self.set_up_grid(num_servers=5)
1814
1815         # Try to upload a file with happy=7, which is unsatisfiable with
1816         # the current grid. This will fail, but should not take up any
1817         # space on the storage servers after it fails.
1818         client = self.g.clients[0]
1819         client.DEFAULT_ENCODING_PARAMETERS['happy'] = 7
1820         d = defer.succeed(None)
1821         d.addCallback(lambda ignored:
1822             self.shouldFail(UploadUnhappinessError,
1823                             "test_peer_selection_bucket_abort",
1824                             "",
1825                             client.upload, upload.Data("data" * 10000,
1826                                                        convergence="")))
1827         # wait for the abort messages to get there.
1828         def _turn_barrier(res):
1829             return fireEventually(res)
1830         d.addCallback(_turn_barrier)
1831         def _then(ignored):
1832             for server in self.g.servers_by_number.values():
1833                 self.failUnlessEqual(server.allocated_size(), 0)
1834         d.addCallback(_then)
1835         return d
1836
1837
1838     def test_encoder_bucket_abort(self):
1839         # If enough servers die in the process of encoding and uploading
1840         # a file to make the layout unhappy, we should cancel the
1841         # newly-allocated buckets before dying.
1842         self.basedir = self.mktemp()
1843         self.set_up_grid(num_servers=4)
1844
1845         client = self.g.clients[0]
1846         client.DEFAULT_ENCODING_PARAMETERS['happy'] = 7
1847
1848         d = defer.succeed(None)
1849         d.addCallback(lambda ignored:
1850             self.shouldFail(UploadUnhappinessError,
1851                             "test_encoder_bucket_abort",
1852                             "",
1853                             self._do_upload_with_broken_servers, 1))
1854         def _turn_barrier(res):
1855             return fireEventually(res)
1856         d.addCallback(_turn_barrier)
1857         def _then(ignored):
1858             for server in self.g.servers_by_number.values():
1859                 self.failUnlessEqual(server.allocated_size(), 0)
1860         d.addCallback(_then)
1861         return d
1862
1863
1864     def _set_up_nodes_extra_config(self, clientdir):
1865         cfgfn = os.path.join(clientdir, "tahoe.cfg")
1866         oldcfg = open(cfgfn, "r").read()
1867         f = open(cfgfn, "wt")
1868         f.write(oldcfg)
1869         f.write("\n")
1870         f.write("[client]\n")
1871         f.write("shares.needed = 7\n")
1872         f.write("shares.total = 12\n")
1873         f.write("\n")
1874         f.close()
1875         return None
1876
1877 # TODO:
1878 #  upload with exactly 75 peers (shares_of_happiness)
1879 #  have a download fail
1880 #  cancel a download (need to implement more cancel stuff)