From: Daira Hopwood Date: Wed, 1 May 2013 02:41:41 +0000 (+0100) Subject: Cleanup: improve error reporting for DataTooLargeError X-Git-Url: https://git.rkrishnan.org/pf/content/en.html?a=commitdiff_plain;h=0336046e3cf7b19cdd604b65fcbe57ba7ca878a6;p=tahoe-lafs%2Ftahoe-lafs.git Cleanup: improve error reporting for DataTooLargeError (and fix an off-by-one error in MutableDiskShare._write_share_data). Signed-off-by: Daira Hopwood --- diff --git a/src/allmydata/storage/backends/cloud/immutable.py b/src/allmydata/storage/backends/cloud/immutable.py index 45e60bd1..41f0dbb3 100644 --- a/src/allmydata/storage/backends/cloud/immutable.py +++ b/src/allmydata/storage/backends/cloud/immutable.py @@ -75,7 +75,7 @@ class ImmutableCloudShareForWriting(CloudShareBase, ImmutableCloudShareMixin): seekpos = self.DATA_OFFSET + offset precondition(seekpos >= self._total_size, offset=offset, seekpos=seekpos, total_size=self._total_size) if offset + len(data) > self._allocated_data_length: - raise DataTooLargeError(self._allocated_data_length, offset, len(data)) + raise DataTooLargeError(self._shnum, self._allocated_data_length, offset, len(data)) self._set_size(self._total_size + len(data)) return self._store_or_buffer( (seekpos, data, 0) ) diff --git a/src/allmydata/storage/backends/cloud/mutable.py b/src/allmydata/storage/backends/cloud/mutable.py index 7ee1af62..2314f52f 100644 --- a/src/allmydata/storage/backends/cloud/mutable.py +++ b/src/allmydata/storage/backends/cloud/mutable.py @@ -219,7 +219,7 @@ class MutableCloudShare(CloudShareBase, CloudShareReaderMixin): length = len(data) precondition(offset >= 0, offset=offset) if offset + length > self.MAX_SIZE: - raise DataTooLargeError() + raise DataTooLargeError(self._shnum, self.MAX_SIZE, offset, length) if new_length is not None and new_length < offset + length: length = max(0, new_length - offset) diff --git a/src/allmydata/storage/backends/disk/immutable.py b/src/allmydata/storage/backends/disk/immutable.py index 056863c5..bd8af51c 100644 --- a/src/allmydata/storage/backends/disk/immutable.py +++ b/src/allmydata/storage/backends/disk/immutable.py @@ -190,7 +190,8 @@ class ImmutableDiskShare(object): length = len(data) precondition(offset >= 0, offset) if self._allocated_data_length is not None and offset+length > self._allocated_data_length: - raise DataTooLargeError(self._allocated_data_length, offset, length) + raise DataTooLargeError(self._shnum, self._allocated_data_length, offset, length) + f = open(self._home, 'rb+') try: real_offset = self.DATA_OFFSET + offset diff --git a/src/allmydata/storage/backends/disk/mutable.py b/src/allmydata/storage/backends/disk/mutable.py index 253df514..c86abaa4 100644 --- a/src/allmydata/storage/backends/disk/mutable.py +++ b/src/allmydata/storage/backends/disk/mutable.py @@ -174,17 +174,13 @@ class MutableDiskShare(object): def _write_share_data(self, f, offset, data): length = len(data) precondition(offset >= 0, offset=offset) - precondition(offset + length < self.MAX_SIZE, offset=offset, length=length) + if offset + length > self.MAX_SIZE: + raise DataTooLargeError(self._shnum, self.MAX_SIZE, offset, length) data_length = self._read_data_length(f) if offset+length >= data_length: - # They are expanding their data size. - - if offset+length > self.MAX_SIZE: - raise DataTooLargeError() - - # Their data now fits in the current container. We must write + # They are expanding their data size. We must write # their new data and modify the recorded data size. # Fill any newly exposed empty space with 0's. @@ -263,7 +259,7 @@ class MutableDiskShare(object): for (offset, data) in datav: precondition(offset >= 0, offset=offset) if offset + len(data) > self.MAX_SIZE: - raise DataTooLargeError() + raise DataTooLargeError(self._shnum, self.MAX_SIZE, offset, len(data)) f = open(self._home, 'rb+') try: diff --git a/src/allmydata/storage/common.py b/src/allmydata/storage/common.py index 5417da30..959aa83b 100644 --- a/src/allmydata/storage/common.py +++ b/src/allmydata/storage/common.py @@ -11,7 +11,15 @@ PREFIX = re.compile("^[%s]{2}$" % (base32.z_base_32_alphabet,)) class DataTooLargeError(Exception): - pass + def __init__(self, shnum, allocated_data_length, offset, length): + self.shnum = shnum + self.allocated_data_length = allocated_data_length + self.offset = offset + self.length = length + + def __str__(self): + return ("attempted write to shnum %d of %d bytes at offset %d exceeds allocated data length of %d bytes" + % (self.__class__.__name__, self.shnum, self.length, self.offset, self.allocated_data_length)) class CorruptStoredShareError(Exception):