From: Brian Warner Date: Tue, 16 Oct 2007 18:00:29 +0000 (-0700) Subject: encode.py: update comments, max_segment_size is now 2MiB X-Git-Tag: allmydata-tahoe-0.7.0~370 X-Git-Url: https://git.rkrishnan.org/%5B/%5D%20/uri/%22doc.html/running.html?a=commitdiff_plain;h=6160af5f50738171d13fe19f27413c7af3722f36;p=tahoe-lafs%2Ftahoe-lafs.git encode.py: update comments, max_segment_size is now 2MiB --- diff --git a/src/allmydata/encode.py b/src/allmydata/encode.py index 5a46826e..90ace0a5 100644 --- a/src/allmydata/encode.py +++ b/src/allmydata/encode.py @@ -286,8 +286,8 @@ class Encoder(object): # memory footprint: we only hold a tiny piece of the plaintext at any # given time. We build up a segment's worth of cryptttext, then hand # it to the encoder. Assuming 3-of-10 encoding (3.3x expansion) and - # 2MiB max_segment_size, we get a peak memory footprint of 4.3*2MiB = - # 8.6MiB. Lowering max_segment_size to, say, 100KiB would drop the + # 1MiB max_segment_size, we get a peak memory footprint of 4.3*1MiB = + # 4.3MiB. Lowering max_segment_size to, say, 100KiB would drop the # footprint to 430KiB at the expense of more hash-tree overhead. d = self._gather_data(self.required_shares, input_piece_size,