From 3bc4b015c8ee5b9a73745f6d2db1d27c08d58db3 Mon Sep 17 00:00:00 2001 From: Zooko O'Whielacronx Date: Tue, 6 Jan 2009 12:24:04 -0700 Subject: [PATCH] immutable: fix the writing of share data size into share file in case the share file is used by a < v1.3.0 storage server Brian noticed that the constant was wrong, and in fixing that I noticed that we should be saturating instead of modding. This code would never matter unless a server downgraded or a share migrated from Tahoe >= v1.3.0 to Tahoe < v1.3.0. Even in that case, this bug would never matter unless the share size were exactly 4,294,967,296 bytes long. Brian, for good reason, wanted this to be spelled "2**32" instead of "4294967296", but I couldn't stand to see a couple of more Python bytecodes interpreted in the middle of a core, frequent operation on the server like immutable share creation. --- src/allmydata/storage.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/allmydata/storage.py b/src/allmydata/storage.py index 74eeaa84..0ffb275e 100644 --- a/src/allmydata/storage.py +++ b/src/allmydata/storage.py @@ -115,8 +115,11 @@ class ShareFile: # The second field -- share data length -- is no longer used as of Tahoe v1.3.0, but # we continue to write it in there in case someone downgrades a storage server from # >= Tahoe-1.3.0 to < Tahoe-1.3.0, or moves a share file from one server to another, - # etc. - f.write(struct.pack(">LLL", 1, max_size % 4294967295, 0)) + # etc. We do saturation -- a share data length larger than what can fit into the + # field is marked as the largest length that can fit into the field. That way, even + # if this does happen, the old < v1.3.0 server will still allow clients to read the + # first part of the share. + f.write(struct.pack(">LLL", 1, min(4294967295, max_size), 0)) f.close() self._lease_offset = max_size + 0x0c self._num_leases = 0 -- 2.45.2