]> git.rkrishnan.org Git - tahoe-lafs/tahoe-lafs.git/blob - src/allmydata/test/test_encode.py
rename fileid/verifierid to plaintext_hash/crypttext_hash
[tahoe-lafs/tahoe-lafs.git] / src / allmydata / test / test_encode.py
1
2 from twisted.trial import unittest
3 from twisted.internet import defer
4 from twisted.python.failure import Failure
5 from foolscap import eventual
6 from allmydata import encode, download, hashtree
7 from allmydata.util import hashutil
8 from allmydata.uri import pack_uri
9 from allmydata.Crypto.Cipher import AES
10 from cStringIO import StringIO
11
12 class FakePeer:
13     def __init__(self, mode="good"):
14         self.ss = FakeStorageServer(mode)
15
16     def callRemote(self, methname, *args, **kwargs):
17         def _call():
18             meth = getattr(self, methname)
19             return meth(*args, **kwargs)
20         return defer.maybeDeferred(_call)
21
22     def get_service(self, sname):
23         assert sname == "storageserver"
24         return self.ss
25
26 class FakeStorageServer:
27     def __init__(self, mode):
28         self.mode = mode
29     def callRemote(self, methname, *args, **kwargs):
30         def _call():
31             meth = getattr(self, methname)
32             return meth(*args, **kwargs)
33         d = eventual.fireEventually()
34         d.addCallback(lambda res: _call())
35         return d
36     def allocate_buckets(self, crypttext_hash, sharenums, shareize, blocksize, canary):
37         if self.mode == "full":
38             return (set(), {},)
39         elif self.mode == "already got them":
40             return (set(sharenums), {},)
41         else:
42             return (set(), dict([(shnum, FakeBucketWriter(),) for shnum in sharenums]),)
43
44 class LostPeerError(Exception):
45     pass
46
47 def flip_bit(good): # flips the last bit
48     return good[:-1] + chr(ord(good[-1]) ^ 0x01)
49
50 class FakeBucketWriter:
51     # these are used for both reading and writing
52     def __init__(self, mode="good"):
53         self.mode = mode
54         self.blocks = {}
55         self.plaintext_hashes = None
56         self.crypttext_hashes = None
57         self.block_hashes = None
58         self.share_hashes = None
59         self.closed = False
60
61     def callRemote(self, methname, *args, **kwargs):
62         def _call():
63             meth = getattr(self, methname)
64             return meth(*args, **kwargs)
65         return defer.maybeDeferred(_call)
66
67     def put_block(self, segmentnum, data):
68         assert not self.closed
69         assert segmentnum not in self.blocks
70         if self.mode == "lost" and segmentnum >= 1:
71             raise LostPeerError("I'm going away now")
72         self.blocks[segmentnum] = data
73
74     def put_plaintext_hashes(self, hashes):
75         assert not self.closed
76         assert self.plaintext_hashes is None
77         self.plaintext_hashes = hashes
78
79     def put_crypttext_hashes(self, hashes):
80         assert not self.closed
81         assert self.crypttext_hashes is None
82         self.crypttext_hashes = hashes
83
84     def put_block_hashes(self, blockhashes):
85         assert not self.closed
86         assert self.block_hashes is None
87         self.block_hashes = blockhashes
88         
89     def put_share_hashes(self, sharehashes):
90         assert not self.closed
91         assert self.share_hashes is None
92         self.share_hashes = sharehashes
93
94     def put_uri_extension(self, uri_extension):
95         assert not self.closed
96         self.uri_extension = uri_extension
97
98     def close(self):
99         assert not self.closed
100         self.closed = True
101
102     def get_block(self, blocknum):
103         assert isinstance(blocknum, (int, long))
104         if self.mode == "bad block":
105             return flip_bit(self.blocks[blocknum])
106         return self.blocks[blocknum]
107
108     def get_plaintext_hashes(self):
109         hashes = self.plaintext_hashes[:]
110         if self.mode == "bad plaintext hashroot":
111             hashes[0] = flip_bit(hashes[0])
112         if self.mode == "bad plaintext hash":
113             hashes[1] = flip_bit(hashes[1])
114         return hashes
115
116     def get_crypttext_hashes(self):
117         hashes = self.crypttext_hashes[:]
118         if self.mode == "bad crypttext hashroot":
119             hashes[0] = flip_bit(hashes[0])
120         if self.mode == "bad crypttext hash":
121             hashes[1] = flip_bit(hashes[1])
122         return hashes
123
124     def get_block_hashes(self):
125         if self.mode == "bad blockhash":
126             hashes = self.block_hashes[:]
127             hashes[1] = flip_bit(hashes[1])
128             return hashes
129         return self.block_hashes
130     def get_share_hashes(self):
131         if self.mode == "bad sharehash":
132             hashes = self.share_hashes[:]
133             hashes[1] = (hashes[1][0], flip_bit(hashes[1][1]))
134             return hashes
135         if self.mode == "missing sharehash":
136             # one sneaky attack would be to pretend we don't know our own
137             # sharehash, which could manage to frame someone else.
138             # download.py is supposed to guard against this case.
139             return []
140         return self.share_hashes
141
142     def get_uri_extension(self):
143         if self.mode == "bad uri_extension":
144             return flip_bit(self.uri_extension)
145         return self.uri_extension
146
147
148 def make_data(length):
149     data = "happy happy joy joy" * 100
150     assert length <= len(data)
151     return data[:length]
152
153 class Encode(unittest.TestCase):
154
155     def do_encode(self, max_segment_size, datalen, NUM_SHARES, NUM_SEGMENTS,
156                   expected_block_hashes, expected_share_hashes):
157         data = make_data(datalen)
158         # force use of multiple segments
159         options = {"max_segment_size": max_segment_size}
160         e = encode.Encoder(options)
161         nonkey = "\x00" * 16
162         e.setup(StringIO(data), nonkey)
163         assert e.num_shares == NUM_SHARES # else we'll be completely confused
164         e.setup_codec() # need to rebuild the codec for that change
165         assert (NUM_SEGMENTS-1)*e.segment_size < len(data) <= NUM_SEGMENTS*e.segment_size
166         shareholders = {}
167         all_shareholders = []
168         for shnum in range(NUM_SHARES):
169             peer = FakeBucketWriter()
170             shareholders[shnum] = peer
171             all_shareholders.append(peer)
172         e.set_shareholders(shareholders)
173         d = e.start()
174         def _check(roothash):
175             self.failUnless(isinstance(roothash, str))
176             self.failUnlessEqual(len(roothash), 32)
177             for i,peer in enumerate(all_shareholders):
178                 self.failUnless(peer.closed)
179                 self.failUnlessEqual(len(peer.blocks), NUM_SEGMENTS)
180                 # each peer gets a full tree of block hashes. For 3 or 4
181                 # segments, that's 7 hashes. For 5 segments it's 15 hashes.
182                 self.failUnlessEqual(len(peer.block_hashes),
183                                      expected_block_hashes)
184                 for h in peer.block_hashes:
185                     self.failUnlessEqual(len(h), 32)
186                 # each peer also gets their necessary chain of share hashes.
187                 # For 100 shares (rounded up to 128 leaves), that's 8 hashes
188                 self.failUnlessEqual(len(peer.share_hashes),
189                                      expected_share_hashes)
190                 for (hashnum, h) in peer.share_hashes:
191                     self.failUnless(isinstance(hashnum, int))
192                     self.failUnlessEqual(len(h), 32)
193         d.addCallback(_check)
194
195         return d
196
197     # a series of 3*3 tests to check out edge conditions. One axis is how the
198     # plaintext is divided into segments: kn+(-1,0,1). Another way to express
199     # that is that n%k == -1 or 0 or 1. For example, for 25-byte segments, we
200     # might test 74 bytes, 75 bytes, and 76 bytes.
201
202     # on the other axis is how many leaves in the block hash tree we wind up
203     # with, relative to a power of 2, so 2^a+(-1,0,1). Each segment turns
204     # into a single leaf. So we'd like to check out, e.g., 3 segments, 4
205     # segments, and 5 segments.
206
207     # that results in the following series of data lengths:
208     #  3 segs: 74, 75, 51
209     #  4 segs: 99, 100, 76
210     #  5 segs: 124, 125, 101
211
212     # all tests encode to 100 shares, which means the share hash tree will
213     # have 128 leaves, which means that buckets will be given an 8-long share
214     # hash chain
215     
216     # all 3-segment files will have a 4-leaf blockhashtree, and thus expect
217     # to get 7 blockhashes. 4-segment files will also get 4-leaf block hash
218     # trees and 7 blockhashes. 5-segment files will get 8-leaf block hash
219     # trees, which get 15 blockhashes.
220
221     def test_send_74(self):
222         # 3 segments (25, 25, 24)
223         return self.do_encode(25, 74, 100, 3, 7, 8)
224     def test_send_75(self):
225         # 3 segments (25, 25, 25)
226         return self.do_encode(25, 75, 100, 3, 7, 8)
227     def test_send_51(self):
228         # 3 segments (25, 25, 1)
229         return self.do_encode(25, 51, 100, 3, 7, 8)
230
231     def test_send_76(self):
232         # encode a 76 byte file (in 4 segments: 25,25,25,1) to 100 shares
233         return self.do_encode(25, 76, 100, 4, 7, 8)
234     def test_send_99(self):
235         # 4 segments: 25,25,25,24
236         return self.do_encode(25, 99, 100, 4, 7, 8)
237     def test_send_100(self):
238         # 4 segments: 25,25,25,25
239         return self.do_encode(25, 100, 100, 4, 7, 8)
240
241     def test_send_101(self):
242         # encode a 101 byte file (in 5 segments: 25,25,25,25,1) to 100 shares
243         return self.do_encode(25, self.make_data(101), 100, 5, 15, 8)
244
245     def test_send_124(self):
246         # 5 segments: 25, 25, 25, 25, 24
247         return self.do_encode(25, 124, 100, 5, 15, 8)
248     def test_send_125(self):
249         # 5 segments: 25, 25, 25, 25, 25
250         return self.do_encode(25, 125, 100, 5, 15, 8)
251     def test_send_101(self):
252         # 5 segments: 25, 25, 25, 25, 1
253         return self.do_encode(25, 101, 100, 5, 15, 8)
254
255 class Roundtrip(unittest.TestCase):
256     def send_and_recover(self, k_and_happy_and_n=(25,75,100),
257                          AVAILABLE_SHARES=None,
258                          datalen=76,
259                          max_segment_size=25,
260                          bucket_modes={},
261                          recover_mode="recover",
262                          ):
263         if AVAILABLE_SHARES is None:
264             AVAILABLE_SHARES = k_and_happy_and_n[2]
265         data = make_data(datalen)
266         d = self.send(k_and_happy_and_n, AVAILABLE_SHARES,
267                       max_segment_size, bucket_modes, data)
268         # that fires with (uri_extension_hash, e, shareholders)
269         d.addCallback(self.recover, AVAILABLE_SHARES, recover_mode)
270         # that fires with newdata
271         def _downloaded((newdata, fd)):
272             self.failUnless(newdata == data)
273             return fd
274         d.addCallback(_downloaded)
275         return d
276
277     def send(self, k_and_happy_and_n, AVAILABLE_SHARES, max_segment_size,
278              bucket_modes, data):
279         NUM_SHARES = k_and_happy_and_n[2]
280         if AVAILABLE_SHARES is None:
281             AVAILABLE_SHARES = NUM_SHARES
282         # force use of multiple segments
283         options = {"max_segment_size": max_segment_size,
284                    "needed_and_happy_and_total_shares": k_and_happy_and_n}
285         e = encode.Encoder(options)
286         nonkey = "\x00" * 16
287         e.setup(StringIO(data), nonkey)
288
289         assert e.num_shares == NUM_SHARES # else we'll be completely confused
290         e.setup_codec() # need to rebuild the codec for that change
291
292         shareholders = {}
293         all_peers = []
294         for shnum in range(NUM_SHARES):
295             mode = bucket_modes.get(shnum, "good")
296             peer = FakeBucketWriter(mode)
297             shareholders[shnum] = peer
298         e.set_shareholders(shareholders)
299         plaintext_hasher = hashutil.plaintext_hasher()
300         plaintext_hasher.update(data)
301         cryptor = AES.new(key=nonkey, mode=AES.MODE_CTR,
302                           counterstart="\x00"*16)
303         crypttext_hasher = hashutil.crypttext_hasher()
304         crypttext_hasher.update(cryptor.encrypt(data))
305
306         e.set_uri_extension_data({'crypttext_hash': crypttext_hasher.digest(),
307                                   'plaintext_hash': plaintext_hasher.digest(),
308                                   })
309         d = e.start()
310         def _sent(uri_extension_hash):
311             return (uri_extension_hash, e, shareholders)
312         d.addCallback(_sent)
313         return d
314
315     def recover(self, (uri_extension_hash, e, shareholders), AVAILABLE_SHARES,
316                 recover_mode):
317         key = e.key
318         if "corrupt_key" in recover_mode:
319             key = flip_bit(key)
320
321         URI = pack_uri(storage_index="S" * 32,
322                        key=key,
323                        uri_extension_hash=uri_extension_hash,
324                        needed_shares=e.required_shares,
325                        total_shares=e.num_shares,
326                        size=e.file_size)
327         client = None
328         target = download.Data()
329         fd = download.FileDownloader(client, URI, target)
330
331         # we manually cycle the FileDownloader through a number of steps that
332         # would normally be sequenced by a Deferred chain in
333         # FileDownloader.start(), to give us more control over the process.
334         # In particular, by bypassing _get_all_shareholders, we skip
335         # permuted-peerlist selection.
336         for shnum, bucket in shareholders.items():
337             if shnum < AVAILABLE_SHARES and bucket.closed:
338                 fd.add_share_bucket(shnum, bucket)
339         fd._got_all_shareholders(None)
340
341         # Make it possible to obtain uri_extension from the shareholders.
342         # Arrange for shareholders[0] to be the first, so we can selectively
343         # corrupt the data it returns.
344         fd._uri_extension_sources = shareholders.values()
345         fd._uri_extension_sources.remove(shareholders[0])
346         fd._uri_extension_sources.insert(0, shareholders[0])
347
348         d = defer.succeed(None)
349
350         # have the FileDownloader retrieve a copy of uri_extension itself
351         d.addCallback(fd._obtain_uri_extension)
352
353         if "corrupt_crypttext_hashes" in recover_mode:
354             # replace everybody's crypttext hash trees with a different one
355             # (computed over a different file), then modify our uri_extension
356             # to reflect the new crypttext hash tree root
357             def _corrupt_crypttext_hashes(uri_extension):
358                 assert isinstance(uri_extension, dict)
359                 assert 'crypttext_root_hash' in uri_extension
360                 badhash = hashutil.tagged_hash("bogus", "data")
361                 bad_crypttext_hashes = [badhash] * uri_extension['num_segments']
362                 badtree = hashtree.HashTree(bad_crypttext_hashes)
363                 for bucket in shareholders.values():
364                     bucket.crypttext_hashes = list(badtree)
365                 uri_extension['crypttext_root_hash'] = badtree[0]
366                 return uri_extension
367             d.addCallback(_corrupt_crypttext_hashes)
368
369         d.addCallback(fd._got_uri_extension)
370
371         # also have the FileDownloader ask for hash trees
372         d.addCallback(fd._get_hashtrees)
373
374         d.addCallback(fd._create_validated_buckets)
375         d.addCallback(fd._download_all_segments)
376         d.addCallback(fd._done)
377         def _done(newdata):
378             return (newdata, fd)
379         d.addCallback(_done)
380         return d
381
382     def test_not_enough_shares(self):
383         d = self.send_and_recover((4,8,10), AVAILABLE_SHARES=2)
384         def _done(res):
385             self.failUnless(isinstance(res, Failure))
386             self.failUnless(res.check(download.NotEnoughPeersError))
387         d.addBoth(_done)
388         return d
389
390     def test_one_share_per_peer(self):
391         return self.send_and_recover()
392
393     def test_74(self):
394         return self.send_and_recover(datalen=74)
395     def test_75(self):
396         return self.send_and_recover(datalen=75)
397     def test_51(self):
398         return self.send_and_recover(datalen=51)
399
400     def test_99(self):
401         return self.send_and_recover(datalen=99)
402     def test_100(self):
403         return self.send_and_recover(datalen=100)
404     def test_76(self):
405         return self.send_and_recover(datalen=76)
406
407     def test_124(self):
408         return self.send_and_recover(datalen=124)
409     def test_125(self):
410         return self.send_and_recover(datalen=125)
411     def test_101(self):
412         return self.send_and_recover(datalen=101)
413
414     # the following tests all use 4-out-of-10 encoding
415
416     def test_bad_blocks(self):
417         # the first 6 servers have bad blocks, which will be caught by the
418         # blockhashes
419         modemap = dict([(i, "bad block")
420                         for i in range(6)]
421                        + [(i, "good")
422                           for i in range(6, 10)])
423         return self.send_and_recover((4,8,10), bucket_modes=modemap)
424
425     def test_bad_blocks_failure(self):
426         # the first 7 servers have bad blocks, which will be caught by the
427         # blockhashes, and the download will fail
428         modemap = dict([(i, "bad block")
429                         for i in range(7)]
430                        + [(i, "good")
431                           for i in range(7, 10)])
432         d = self.send_and_recover((4,8,10), bucket_modes=modemap)
433         def _done(res):
434             self.failUnless(isinstance(res, Failure))
435             self.failUnless(res.check(download.NotEnoughPeersError))
436         d.addBoth(_done)
437         return d
438
439     def test_bad_blockhashes(self):
440         # the first 6 servers have bad block hashes, so the blockhash tree
441         # will not validate
442         modemap = dict([(i, "bad blockhash")
443                         for i in range(6)]
444                        + [(i, "good")
445                           for i in range(6, 10)])
446         return self.send_and_recover((4,8,10), bucket_modes=modemap)
447
448     def test_bad_blockhashes_failure(self):
449         # the first 7 servers have bad block hashes, so the blockhash tree
450         # will not validate, and the download will fail
451         modemap = dict([(i, "bad blockhash")
452                         for i in range(7)]
453                        + [(i, "good")
454                           for i in range(7, 10)])
455         d = self.send_and_recover((4,8,10), bucket_modes=modemap)
456         def _done(res):
457             self.failUnless(isinstance(res, Failure))
458             self.failUnless(res.check(download.NotEnoughPeersError))
459         d.addBoth(_done)
460         return d
461
462     def test_bad_sharehashes(self):
463         # the first 6 servers have bad block hashes, so the sharehash tree
464         # will not validate
465         modemap = dict([(i, "bad sharehash")
466                         for i in range(6)]
467                        + [(i, "good")
468                           for i in range(6, 10)])
469         return self.send_and_recover((4,8,10), bucket_modes=modemap)
470
471     def assertFetchFailureIn(self, fd, where):
472         expected = {"uri_extension": 0,
473                     "plaintext_hashroot": 0,
474                     "plaintext_hashtree": 0,
475                     "crypttext_hashroot": 0,
476                     "crypttext_hashtree": 0,
477                     }
478         if where is not None:
479             expected[where] += 1
480         self.failUnlessEqual(fd._fetch_failures, expected)
481
482     def test_good(self):
483         # just to make sure the test harness works when we aren't
484         # intentionally causing failures
485         modemap = dict([(i, "good") for i in range(0, 10)])
486         d = self.send_and_recover((4,8,10), bucket_modes=modemap)
487         d.addCallback(self.assertFetchFailureIn, None)
488         return d
489
490     def test_bad_uri_extension(self):
491         # the first server has a bad uri_extension block, so we will fail
492         # over to a different server.
493         modemap = dict([(i, "bad uri_extension") for i in range(1)] +
494                        [(i, "good") for i in range(1, 10)])
495         d = self.send_and_recover((4,8,10), bucket_modes=modemap)
496         d.addCallback(self.assertFetchFailureIn, "uri_extension")
497         return d
498
499     def test_bad_plaintext_hashroot(self):
500         # the first server has a bad plaintext hashroot, so we will fail over
501         # to a different server.
502         modemap = dict([(i, "bad plaintext hashroot") for i in range(1)] +
503                        [(i, "good") for i in range(1, 10)])
504         d = self.send_and_recover((4,8,10), bucket_modes=modemap)
505         d.addCallback(self.assertFetchFailureIn, "plaintext_hashroot")
506         return d
507
508     def test_bad_crypttext_hashroot(self):
509         # the first server has a bad crypttext hashroot, so we will fail
510         # over to a different server.
511         modemap = dict([(i, "bad crypttext hashroot") for i in range(1)] +
512                        [(i, "good") for i in range(1, 10)])
513         d = self.send_and_recover((4,8,10), bucket_modes=modemap)
514         d.addCallback(self.assertFetchFailureIn, "crypttext_hashroot")
515         return d
516
517     def test_bad_plaintext_hashes(self):
518         # the first server has a bad plaintext hash block, so we will fail
519         # over to a different server.
520         modemap = dict([(i, "bad plaintext hash") for i in range(1)] +
521                        [(i, "good") for i in range(1, 10)])
522         d = self.send_and_recover((4,8,10), bucket_modes=modemap)
523         d.addCallback(self.assertFetchFailureIn, "plaintext_hashtree")
524         return d
525
526     def test_bad_crypttext_hashes(self):
527         # the first server has a bad crypttext hash block, so we will fail
528         # over to a different server.
529         modemap = dict([(i, "bad crypttext hash") for i in range(1)] +
530                        [(i, "good") for i in range(1, 10)])
531         d = self.send_and_recover((4,8,10), bucket_modes=modemap)
532         d.addCallback(self.assertFetchFailureIn, "crypttext_hashtree")
533         return d
534
535     def test_bad_crypttext_hashes_failure(self):
536         # to test that the crypttext merkle tree is really being applied, we
537         # sneak into the download process and corrupt two things: we replace
538         # everybody's crypttext hashtree with a bad version (computed over
539         # bogus data), and we modify the supposedly-validated uri_extension
540         # block to match the new crypttext hashtree root. The download
541         # process should notice that the crypttext coming out of FEC doesn't
542         # match the tree, and fail.
543
544         modemap = dict([(i, "good") for i in range(0, 10)])
545         d = self.send_and_recover((4,8,10), bucket_modes=modemap,
546                                   recover_mode=("corrupt_crypttext_hashes"))
547         def _done(res):
548             self.failUnless(isinstance(res, Failure))
549             self.failUnless(res.check(hashtree.BadHashError), res)
550         d.addBoth(_done)
551         return d
552
553
554     def test_bad_plaintext(self):
555         # faking a decryption failure is easier: just corrupt the key
556         modemap = dict([(i, "good") for i in range(0, 10)])
557         d = self.send_and_recover((4,8,10), bucket_modes=modemap,
558                                   recover_mode=("corrupt_key"))
559         def _done(res):
560             self.failUnless(isinstance(res, Failure))
561             self.failUnless(res.check(hashtree.BadHashError))
562         d.addBoth(_done)
563         return d
564
565     def test_bad_sharehashes_failure(self):
566         # the first 7 servers have bad block hashes, so the sharehash tree
567         # will not validate, and the download will fail
568         modemap = dict([(i, "bad sharehash")
569                         for i in range(7)]
570                        + [(i, "good")
571                           for i in range(7, 10)])
572         d = self.send_and_recover((4,8,10), bucket_modes=modemap)
573         def _done(res):
574             self.failUnless(isinstance(res, Failure))
575             self.failUnless(res.check(download.NotEnoughPeersError))
576         d.addBoth(_done)
577         return d
578
579     def test_missing_sharehashes(self):
580         # the first 6 servers are missing their sharehashes, so the
581         # sharehash tree will not validate
582         modemap = dict([(i, "missing sharehash")
583                         for i in range(6)]
584                        + [(i, "good")
585                           for i in range(6, 10)])
586         return self.send_and_recover((4,8,10), bucket_modes=modemap)
587
588     def test_missing_sharehashes_failure(self):
589         # the first 7 servers are missing their sharehashes, so the
590         # sharehash tree will not validate, and the download will fail
591         modemap = dict([(i, "missing sharehash")
592                         for i in range(7)]
593                        + [(i, "good")
594                           for i in range(7, 10)])
595         d = self.send_and_recover((4,8,10), bucket_modes=modemap)
596         def _done(res):
597             self.failUnless(isinstance(res, Failure))
598             self.failUnless(res.check(download.NotEnoughPeersError))
599         d.addBoth(_done)
600         return d
601
602     def test_lost_one_shareholder(self):
603         # we have enough shareholders when we start, but one segment in we
604         # lose one of them. The upload should still succeed, as long as we
605         # still have 'shares_of_happiness' peers left.
606         modemap = dict([(i, "good") for i in range(9)] +
607                        [(i, "lost") for i in range(9, 10)])
608         return self.send_and_recover((4,8,10), bucket_modes=modemap)
609
610     def test_lost_many_shareholders(self):
611         # we have enough shareholders when we start, but one segment in we
612         # lose all but one of them. The upload should fail.
613         modemap = dict([(i, "good") for i in range(1)] +
614                        [(i, "lost") for i in range(1, 10)])
615         d = self.send_and_recover((4,8,10), bucket_modes=modemap)
616         def _done(res):
617             self.failUnless(isinstance(res, Failure))
618             self.failUnless(res.check(encode.NotEnoughPeersError))
619         d.addBoth(_done)
620         return d
621
622     def test_lost_all_shareholders(self):
623         # we have enough shareholders when we start, but one segment in we
624         # lose all of them. The upload should fail.
625         modemap = dict([(i, "lost") for i in range(10)])
626         d = self.send_and_recover((4,8,10), bucket_modes=modemap)
627         def _done(res):
628             self.failUnless(isinstance(res, Failure))
629             self.failUnless(res.check(encode.NotEnoughPeersError))
630         d.addBoth(_done)
631         return d
632