From: Brian Warner Date: Tue, 17 Apr 2007 02:29:57 +0000 (-0700) Subject: encode: make MAX_SEGMENT_SIZE controllable, to support tests which force the use... X-Git-Tag: tahoe_v0.1.0-0-UNSTABLE~88 X-Git-Url: https://git.rkrishnan.org/?a=commitdiff_plain;h=ff8cb4d32e38d3de77d54f4d3130e997211baffc;p=tahoe-lafs%2Ftahoe-lafs.git encode: make MAX_SEGMENT_SIZE controllable, to support tests which force the use of multiple segments. Also, remove not-very-useful upload-side debug messages --- diff --git a/src/allmydata/encode.py b/src/allmydata/encode.py index 50c3c389..cc3c4f39 100644 --- a/src/allmydata/encode.py +++ b/src/allmydata/encode.py @@ -79,6 +79,12 @@ class Encoder(object): implements(IEncoder) NEEDED_SHARES = 25 TOTAL_SHARES = 100 + MAX_SEGMENT_SIZE = 2*MiB + + def __init__(self, options={}): + object.__init__(self) + self.MAX_SEGMENT_SIZE = options.get("max_segment_size", + self.MAX_SEGMENT_SIZE) def setup(self, infile): self.infile = infile @@ -89,7 +95,7 @@ class Encoder(object): self.num_shares = self.TOTAL_SHARES self.required_shares = self.NEEDED_SHARES - self.segment_size = min(2*MiB, self.file_size) + self.segment_size = min(self.MAX_SEGMENT_SIZE, self.file_size) # this must be a multiple of self.required_shares self.segment_size = mathutil.next_multiple(self.segment_size, self.required_shares) diff --git a/src/allmydata/test/test_encode.py b/src/allmydata/test/test_encode.py index 08200e36..59419cf5 100644 --- a/src/allmydata/test/test_encode.py +++ b/src/allmydata/test/test_encode.py @@ -147,12 +147,12 @@ class Roundtrip(unittest.TestCase): bucket_modes={}): if AVAILABLE_SHARES is None: AVAILABLE_SHARES = NUM_SHARES - e = encode.Encoder() + options = {"max_segment_size": 25} # force use of multiple segments + e = encode.Encoder(options) data = "happy happy joy joy" * 4 e.setup(StringIO(data)) assert e.num_shares == NUM_SHARES # else we'll be completely confused - e.segment_size = 25 # force use of multiple segments e.setup_codec() # need to rebuild the codec for that change assert (NUM_SEGMENTS-1)*e.segment_size < len(data) <= NUM_SEGMENTS*e.segment_size diff --git a/src/allmydata/upload.py b/src/allmydata/upload.py index fac636ac..8a65fc43 100644 --- a/src/allmydata/upload.py +++ b/src/allmydata/upload.py @@ -56,10 +56,10 @@ class PeerTracker: return (alreadygot, set(buckets.keys())) class FileUploader: - debug = False - def __init__(self, client): + def __init__(self, client, options={}): self._client = client + self._options = options def set_params(self, needed_shares, shares_of_happiness, total_shares): self.needed_shares = needed_shares @@ -87,12 +87,10 @@ class FileUploader: string).""" log.msg("starting upload [%s]" % (idlib.b2a(self._verifierid),)) - if self.debug: - print "starting upload" assert self.needed_shares # create the encoder, so we can know how large the shares will be - self._encoder = encode.Encoder() + self._encoder = encode.Encoder(self._options) self._encoder.setup(self._filehandle) share_size = self._encoder.get_share_size() block_size = self._encoder.get_block_size() @@ -279,7 +277,6 @@ class Uploader(service.MultiService): implements(IUploader) name = "uploader" uploader_class = FileUploader - debug = False needed_shares = 25 # Number of shares required to reconstruct a file. desired_shares = 75 # We will abort an upload unless we can allocate space for at least this many. @@ -294,18 +291,14 @@ class Uploader(service.MultiService): # note: this is only of the plaintext data, no encryption yet return hasher.digest() - def upload(self, f): + def upload(self, f, options={}): # this returns the URI assert self.parent assert self.running f = IUploadable(f) fh = f.get_filehandle() - u = self.uploader_class(self.parent) - if self.debug: - u.debug = True + u = self.uploader_class(self.parent, options) u.set_filehandle(fh) - # push two shares, require that we get two back. TODO: this is - # temporary, of course. u.set_params(self.needed_shares, self.desired_shares, self.total_shares) u.set_verifierid(self._compute_verifierid(fh)) d = u.start()