3 from twisted.trial import unittest
4 from twisted.internet import defer
5 from twisted.python.failure import Failure
6 from foolscap import eventual
7 from allmydata import encode, download
8 from allmydata.uri import pack_uri
9 from cStringIO import StringIO
12 def __init__(self, mode="good"):
13 self.ss = FakeStorageServer(mode)
15 def callRemote(self, methname, *args, **kwargs):
17 meth = getattr(self, methname)
18 return meth(*args, **kwargs)
19 return defer.maybeDeferred(_call)
21 def get_service(self, sname):
22 assert sname == "storageserver"
25 class FakeStorageServer:
26 def __init__(self, mode):
28 def callRemote(self, methname, *args, **kwargs):
30 meth = getattr(self, methname)
31 return meth(*args, **kwargs)
32 d = eventual.fireEventually()
33 d.addCallback(lambda res: _call())
35 def allocate_buckets(self, verifierid, sharenums, shareize, blocksize, canary):
36 if self.mode == "full":
38 elif self.mode == "already got them":
39 return (set(sharenums), {},)
41 return (set(), dict([(shnum, FakeBucketWriter(),) for shnum in sharenums]),)
43 class FakeBucketWriter:
44 # these are used for both reading and writing
45 def __init__(self, mode="good"):
48 self.block_hashes = None
49 self.share_hashes = None
52 def callRemote(self, methname, *args, **kwargs):
54 meth = getattr(self, methname)
55 return meth(*args, **kwargs)
56 return defer.maybeDeferred(_call)
58 def put_block(self, segmentnum, data):
59 assert not self.closed
60 assert segmentnum not in self.blocks
61 self.blocks[segmentnum] = data
63 def put_block_hashes(self, blockhashes):
64 assert not self.closed
65 assert self.block_hashes is None
66 self.block_hashes = blockhashes
68 def put_share_hashes(self, sharehashes):
69 assert not self.closed
70 assert self.share_hashes is None
71 self.share_hashes = sharehashes
74 assert not self.closed
77 def flip_bit(self, good):
78 return good[:-1] + chr(ord(good[-1]) ^ 0x01)
80 def get_block(self, blocknum):
81 assert isinstance(blocknum, int)
82 if self.mode == "bad block":
83 return self.flip_bit(self.blocks[blocknum])
84 return self.blocks[blocknum]
86 def get_block_hashes(self):
87 if self.mode == "bad blockhash":
88 hashes = self.block_hashes[:]
89 hashes[1] = self.flip_bit(hashes[1])
91 return self.block_hashes
92 def get_share_hashes(self):
93 if self.mode == "bad sharehash":
94 hashes = self.share_hashes[:]
95 hashes[1] = (hashes[1][0], self.flip_bit(hashes[1][1]))
97 if self.mode == "missing sharehash":
98 # one sneaky attack would be to pretend we don't know our own
99 # sharehash, which could manage to frame someone else.
100 # download.py is supposed to guard against this case.
102 return self.share_hashes
105 class Encode(unittest.TestCase):
108 data = "happy happy joy joy" * 4
109 e.setup(StringIO(data))
111 assert e.num_shares == NUM_SHARES # else we'll be completely confused
112 e.segment_size = 25 # force use of multiple segments
113 e.setup_codec() # need to rebuild the codec for that change
115 assert (NUM_SEGMENTS-1)*e.segment_size < len(data) <= NUM_SEGMENTS*e.segment_size
117 all_shareholders = []
118 for shnum in range(NUM_SHARES):
119 peer = FakeBucketWriter()
120 shareholders[shnum] = peer
121 all_shareholders.append(peer)
122 e.set_shareholders(shareholders)
124 def _check(roothash):
125 self.failUnless(isinstance(roothash, str))
126 self.failUnlessEqual(len(roothash), 32)
127 for i,peer in enumerate(all_shareholders):
128 self.failUnless(peer.closed)
129 self.failUnlessEqual(len(peer.blocks), NUM_SEGMENTS)
130 #self.failUnlessEqual(len(peer.block_hashes), NUM_SEGMENTS)
131 # that isn't true: each peer gets a full tree, so it's more
132 # like 2n-1 but with rounding to a power of two
133 for h in peer.block_hashes:
134 self.failUnlessEqual(len(h), 32)
135 #self.failUnlessEqual(len(peer.share_hashes), NUM_SHARES)
136 # that isn't true: each peer only gets the chain they need
137 for (hashnum, h) in peer.share_hashes:
138 self.failUnless(isinstance(hashnum, int))
139 self.failUnlessEqual(len(h), 32)
140 d.addCallback(_check)
144 class Roundtrip(unittest.TestCase):
145 def send_and_recover(self, NUM_SHARES, NUM_SEGMENTS=4, bucket_modes={}):
147 data = "happy happy joy joy" * 4
148 e.setup(StringIO(data))
150 assert e.num_shares == NUM_SHARES # else we'll be completely confused
151 e.segment_size = 25 # force use of multiple segments
152 e.setup_codec() # need to rebuild the codec for that change
154 assert (NUM_SEGMENTS-1)*e.segment_size < len(data) <= NUM_SEGMENTS*e.segment_size
156 all_shareholders = []
158 for shnum in range(NUM_SHARES):
159 mode = bucket_modes.get(shnum, "good")
160 peer = FakeBucketWriter(mode)
161 shareholders[shnum] = peer
162 all_shareholders.append(peer)
163 e.set_shareholders(shareholders)
165 def _uploaded(roothash):
166 URI = pack_uri(e._codec.get_encoder_type(),
167 e._codec.get_serialized_params(),
168 e._tail_codec.get_serialized_params(),
176 target = download.Data()
177 fd = download.FileDownloader(client, URI, target)
178 for shnum in range(NUM_SHARES):
179 bucket = all_shareholders[shnum]
180 fd.add_share_bucket(shnum, bucket)
181 fd._got_all_shareholders(None)
182 d2 = fd._download_all_segments(None)
183 d2.addCallback(fd._done)
185 d.addCallback(_uploaded)
186 def _downloaded(newdata):
187 self.failUnless(newdata == data)
188 d.addCallback(_downloaded)
192 def test_one_share_per_peer(self):
193 return self.send_and_recover(100)
195 def test_bad_blocks(self):
196 # the first 74 servers have bad blocks, which will be caught by the
198 modemap = dict([(i, "bad block")
201 for i in range(74, 100)])
202 return self.send_and_recover(100, bucket_modes=modemap)
204 def test_bad_blocks_failure(self):
205 # the first 76 servers have bad blocks, which will be caught by the
206 # blockhashes, and the download will fail
207 modemap = dict([(i, "bad block")
210 for i in range(76, 100)])
211 d = self.send_and_recover(100, bucket_modes=modemap)
213 self.failUnless(isinstance(res, Failure))
214 self.failUnless(res.check(download.NotEnoughPeersError))
218 def test_bad_blockhashes(self):
219 # the first 74 servers have bad block hashes, so the blockhash tree
221 modemap = dict([(i, "bad blockhash")
224 for i in range(74, 100)])
225 return self.send_and_recover(100, bucket_modes=modemap)
227 def test_bad_blockhashes_failure(self):
228 # the first 76 servers have bad block hashes, so the blockhash tree
229 # will not validate, and the download will fail
230 modemap = dict([(i, "bad blockhash")
233 for i in range(76, 100)])
234 d = self.send_and_recover(100, bucket_modes=modemap)
236 self.failUnless(isinstance(res, Failure))
237 self.failUnless(res.check(download.NotEnoughPeersError))
241 def test_bad_sharehashes(self):
242 # the first 74 servers have bad block hashes, so the sharehash tree
244 modemap = dict([(i, "bad sharehash")
247 for i in range(74, 100)])
248 return self.send_and_recover(100, bucket_modes=modemap)
250 def test_bad_sharehashes_failure(self):
251 # the first 76 servers have bad block hashes, so the sharehash tree
252 # will not validate, and the download will fail
253 modemap = dict([(i, "bad sharehash")
256 for i in range(76, 100)])
257 d = self.send_and_recover(100, bucket_modes=modemap)
259 self.failUnless(isinstance(res, Failure))
260 self.failUnless(res.check(download.NotEnoughPeersError))
264 def test_missing_sharehashes(self):
265 # the first 74 servers are missing their sharehashes, so the
266 # sharehash tree will not validate
267 modemap = dict([(i, "missing sharehash")
270 for i in range(74, 100)])
271 return self.send_and_recover(100, bucket_modes=modemap)
273 def test_missing_sharehashes_failure(self):
274 # the first 76 servers are missing their sharehashes, so the
275 # sharehash tree will not validate, and the download will fail
276 modemap = dict([(i, "missing sharehash")
279 for i in range(76, 100)])
280 d = self.send_and_recover(100, bucket_modes=modemap)
282 self.failUnless(isinstance(res, Failure))
283 self.failUnless(res.check(download.NotEnoughPeersError))