3 from twisted.trial import unittest
4 from twisted.python.failure import Failure
5 from twisted.internet import defer
6 from cStringIO import StringIO
8 from allmydata import upload, encode, uri
9 from allmydata.interfaces import IFileURI
10 from allmydata.util.assertutil import precondition
11 from foolscap import eventual
13 class Uploadable(unittest.TestCase):
14 def shouldEqual(self, data, expected):
15 self.failUnless(isinstance(data, list))
17 self.failUnless(isinstance(e, str))
19 self.failUnlessEqual(s, expected)
21 def test_filehandle(self):
23 u = upload.FileHandle(s)
25 d.addCallback(self.failUnlessEqual, 41)
26 d.addCallback(lambda res: u.read(1))
27 d.addCallback(self.shouldEqual, "a")
28 d.addCallback(lambda res: u.read(80))
29 d.addCallback(self.shouldEqual, "a"*40)
30 d.addCallback(lambda res: u.close()) # this doesn't close the filehandle
31 d.addCallback(lambda res: s.close()) # that privilege is reserved for us
34 def test_filename(self):
35 basedir = "upload/Uploadable/test_filename"
37 fn = os.path.join(basedir, "file")
41 u = upload.FileName(fn)
43 d.addCallback(self.failUnlessEqual, 41)
44 d.addCallback(lambda res: u.read(1))
45 d.addCallback(self.shouldEqual, "a")
46 d.addCallback(lambda res: u.read(80))
47 d.addCallback(self.shouldEqual, "a"*40)
48 d.addCallback(lambda res: u.close())
55 d.addCallback(self.failUnlessEqual, 41)
56 d.addCallback(lambda res: u.read(1))
57 d.addCallback(self.shouldEqual, "a")
58 d.addCallback(lambda res: u.read(80))
59 d.addCallback(self.shouldEqual, "a"*40)
60 d.addCallback(lambda res: u.close())
64 def __init__(self, mode="good"):
65 self.ss = FakeStorageServer(mode)
67 def callRemote(self, methname, *args, **kwargs):
69 meth = getattr(self, methname)
70 return meth(*args, **kwargs)
71 return defer.maybeDeferred(_call)
73 def get_service(self, sname):
74 assert sname == "storageserver"
77 class FakeStorageServer:
78 def __init__(self, mode):
82 def callRemote(self, methname, *args, **kwargs):
84 meth = getattr(self, methname)
85 return meth(*args, **kwargs)
86 d = eventual.fireEventually()
87 d.addCallback(lambda res: _call())
90 def allocate_buckets(self, storage_index, renew_secret, cancel_secret,
91 sharenums, share_size, canary):
92 #print "FakeStorageServer.allocate_buckets(num=%d, size=%d)" % (len(sharenums), share_size)
94 if self.mode == "full":
96 elif self.mode == "already got them":
97 return (set(sharenums), {},)
99 for shnum in sharenums:
100 self.allocated.append( (storage_index, shnum) )
102 dict([( shnum, FakeBucketWriter(share_size) )
103 for shnum in sharenums]),
106 class FakeBucketWriter:
107 # a diagnostic version of storageserver.BucketWriter
108 def __init__(self, size):
109 self.data = StringIO()
113 def callRemote(self, methname, *args, **kwargs):
115 meth = getattr(self, "remote_" + methname)
116 return meth(*args, **kwargs)
117 d = eventual.fireEventually()
118 d.addCallback(lambda res: _call())
121 def remote_write(self, offset, data):
122 precondition(not self.closed)
123 precondition(offset >= 0)
124 precondition(offset+len(data) <= self._size,
125 "offset=%d + data=%d > size=%d" %
126 (offset, len(data), self._size))
127 self.data.seek(offset)
128 self.data.write(data)
130 def remote_close(self):
131 precondition(not self.closed)
134 class FakeIntroducerClient:
135 def when_enough_peers(self, numpeers):
136 return defer.succeed(None)
139 def __init__(self, mode="good", num_servers=50):
141 self.num_servers = num_servers
142 def get_permuted_peers(self, storage_index, include_myself):
143 peers = [ ("%20d"%fakeid, "%20d"%fakeid, FakePeer(self.mode),)
144 for fakeid in range(self.num_servers) ]
145 self.last_peers = [p[2] for p in peers]
147 def get_push_to_ourselves(self):
149 def get_encoding_parameters(self):
152 def get_renewal_secret(self):
154 def get_cancel_secret(self):
158 Once upon a time, there was a beautiful princess named Buttercup. She lived
159 in a magical land where every file was stored securely among millions of
160 machines, and nobody ever worried about their data being lost ever again.
163 assert len(DATA) > upload.Uploader.URI_LIT_SIZE_THRESHOLD
167 SIZE_LARGE = len(DATA)
169 class GoodServer(unittest.TestCase):
171 self.node = FakeClient(mode="good")
172 self.u = upload.Uploader()
173 self.u.running = True
174 self.u.parent = self.node
176 def _check_small(self, newuri, size):
178 self.failUnless(isinstance(u, uri.LiteralFileURI))
179 self.failUnlessEqual(len(u.data), size)
181 def _check_large(self, newuri, size):
183 self.failUnless(isinstance(u, uri.CHKFileURI))
184 self.failUnless(isinstance(u.storage_index, str))
185 self.failUnlessEqual(len(u.storage_index), 16)
186 self.failUnless(isinstance(u.key, str))
187 self.failUnlessEqual(len(u.key), 16)
188 self.failUnlessEqual(u.size, size)
190 def get_data(self, size):
193 def test_data_zero(self):
194 data = self.get_data(SIZE_ZERO)
195 d = self.u.upload_data(data)
196 d.addCallback(self._check_small, SIZE_ZERO)
199 def test_data_small(self):
200 data = self.get_data(SIZE_SMALL)
201 d = self.u.upload_data(data)
202 d.addCallback(self._check_small, SIZE_SMALL)
205 def test_data_large(self):
206 data = self.get_data(SIZE_LARGE)
207 d = self.u.upload_data(data)
208 d.addCallback(self._check_large, SIZE_LARGE)
211 def test_data_large_odd_segments(self):
212 data = self.get_data(SIZE_LARGE)
213 segsize = int(SIZE_LARGE / 2.5)
214 # we want 3 segments, since that's not a power of two
215 d = self.u.upload_data(data, {"max_segment_size": segsize})
216 d.addCallback(self._check_large, SIZE_LARGE)
219 def test_filehandle_zero(self):
220 data = self.get_data(SIZE_ZERO)
221 d = self.u.upload_filehandle(StringIO(data))
222 d.addCallback(self._check_small, SIZE_ZERO)
225 def test_filehandle_small(self):
226 data = self.get_data(SIZE_SMALL)
227 d = self.u.upload_filehandle(StringIO(data))
228 d.addCallback(self._check_small, SIZE_SMALL)
231 def test_filehandle_large(self):
232 data = self.get_data(SIZE_LARGE)
233 d = self.u.upload_filehandle(StringIO(data))
234 d.addCallback(self._check_large, SIZE_LARGE)
237 def test_filename_zero(self):
238 fn = "Uploader-test_filename_zero.data"
240 data = self.get_data(SIZE_ZERO)
243 d = self.u.upload_filename(fn)
244 d.addCallback(self._check_small, SIZE_ZERO)
247 def test_filename_small(self):
248 fn = "Uploader-test_filename_small.data"
250 data = self.get_data(SIZE_SMALL)
253 d = self.u.upload_filename(fn)
254 d.addCallback(self._check_small, SIZE_SMALL)
257 def test_filename_large(self):
258 fn = "Uploader-test_filename_large.data"
260 data = self.get_data(SIZE_LARGE)
263 d = self.u.upload_filename(fn)
264 d.addCallback(self._check_large, SIZE_LARGE)
267 class FullServer(unittest.TestCase):
269 self.node = FakeClient(mode="full")
270 self.u = upload.Uploader()
271 self.u.running = True
272 self.u.parent = self.node
274 def _should_fail(self, f):
275 self.failUnless(isinstance(f, Failure) and f.check(encode.NotEnoughPeersError), f)
277 def test_data_large(self):
279 d = self.u.upload_data(data)
280 d.addBoth(self._should_fail)
283 class PeerSelection(unittest.TestCase):
285 def make_client(self, num_servers=50):
286 self.node = FakeClient(mode="good", num_servers=num_servers)
287 self.u = upload.Uploader()
288 self.u.running = True
289 self.u.parent = self.node
291 def get_data(self, size):
294 def _check_large(self, newuri, size):
296 self.failUnless(isinstance(u, uri.CHKFileURI))
297 self.failUnless(isinstance(u.storage_index, str))
298 self.failUnlessEqual(len(u.storage_index), 16)
299 self.failUnless(isinstance(u.key, str))
300 self.failUnlessEqual(len(u.key), 16)
301 self.failUnlessEqual(u.size, size)
303 def test_one_each(self):
304 # if we have 50 shares, and there are 50 peers, and they all accept a
305 # share, we should get exactly one share per peer
308 data = self.get_data(SIZE_LARGE)
309 self.u.DEFAULT_ENCODING_PARAMETERS = (25, 30, 50)
310 d = self.u.upload_data(data)
311 d.addCallback(self._check_large, SIZE_LARGE)
313 for p in self.node.last_peers:
314 allocated = p.ss.allocated
315 self.failUnlessEqual(len(allocated), 1)
316 self.failUnlessEqual(p.ss.queries, 1)
317 d.addCallback(_check)
320 def test_two_each(self):
321 # if we have 100 shares, and there are 50 peers, and they all accept
322 # all shares, we should get exactly two shares per peer
325 data = self.get_data(SIZE_LARGE)
326 self.u.DEFAULT_ENCODING_PARAMETERS = (50, 75, 100)
327 d = self.u.upload_data(data)
328 d.addCallback(self._check_large, SIZE_LARGE)
330 for p in self.node.last_peers:
331 allocated = p.ss.allocated
332 self.failUnlessEqual(len(allocated), 2)
333 self.failUnlessEqual(p.ss.queries, 2)
334 d.addCallback(_check)
337 def test_one_each_plus_one_extra(self):
338 # if we have 51 shares, and there are 50 peers, then one peer gets
339 # two shares and the rest get just one
342 data = self.get_data(SIZE_LARGE)
343 self.u.DEFAULT_ENCODING_PARAMETERS = (24, 41, 51)
344 d = self.u.upload_data(data)
345 d.addCallback(self._check_large, SIZE_LARGE)
349 for p in self.node.last_peers:
350 allocated = p.ss.allocated
351 self.failUnless(len(allocated) in (1,2), len(allocated))
352 if len(allocated) == 1:
353 self.failUnlessEqual(p.ss.queries, 1)
356 self.failUnlessEqual(p.ss.queries, 2)
358 self.failUnlessEqual(len(got_one), 49)
359 self.failUnlessEqual(len(got_two), 1)
360 d.addCallback(_check)
363 def test_four_each(self):
364 # if we have 200 shares, and there are 50 peers, then each peer gets
365 # 4 shares. The design goal is to accomplish this with only two
369 data = self.get_data(SIZE_LARGE)
370 self.u.DEFAULT_ENCODING_PARAMETERS = (100, 150, 200)
371 d = self.u.upload_data(data)
372 d.addCallback(self._check_large, SIZE_LARGE)
374 for p in self.node.last_peers:
375 allocated = p.ss.allocated
376 self.failUnlessEqual(len(allocated), 4)
377 self.failUnlessEqual(p.ss.queries, 2)
378 d.addCallback(_check)
381 def test_three_of_ten(self):
382 # if we have 10 shares and 3 servers, I want to see 3+3+4 rather than
386 data = self.get_data(SIZE_LARGE)
387 self.u.DEFAULT_ENCODING_PARAMETERS = (3, 5, 10)
388 d = self.u.upload_data(data)
389 d.addCallback(self._check_large, SIZE_LARGE)
392 for p in self.node.last_peers:
393 allocated = p.ss.allocated
394 counts[len(allocated)] = counts.get(len(allocated), 0) + 1
395 histogram = [counts.get(i, 0) for i in range(5)]
396 self.failUnlessEqual(histogram, [0,0,0,2,1])
397 d.addCallback(_check)
402 # upload with exactly 75 peers (shares_of_happiness)
403 # have a download fail
404 # cancel a download (need to implement more cancel stuff)