]> git.rkrishnan.org Git - tahoe-lafs/tahoe-lafs.git/commitdiff
Cleanup: improve error reporting for DataTooLargeError
authorDaira Hopwood <daira@jacaranda.org>
Wed, 1 May 2013 02:41:41 +0000 (03:41 +0100)
committerDaira Hopwood <daira@jacaranda.org>
Fri, 16 Oct 2015 16:53:02 +0000 (17:53 +0100)
(and fix an off-by-one error in MutableDiskShare._write_share_data).

Signed-off-by: Daira Hopwood <david-sarah@jacaranda.org>
src/allmydata/storage/backends/cloud/immutable.py
src/allmydata/storage/backends/cloud/mutable.py
src/allmydata/storage/backends/disk/immutable.py
src/allmydata/storage/backends/disk/mutable.py
src/allmydata/storage/common.py

index 45e60bd1dd8aa35f4d9a005cf3c817b76f9caa06..41f0dbb33d09ce4e4afe79bf398eb5fec896908f 100644 (file)
@@ -75,7 +75,7 @@ class ImmutableCloudShareForWriting(CloudShareBase, ImmutableCloudShareMixin):
         seekpos = self.DATA_OFFSET + offset
         precondition(seekpos >= self._total_size, offset=offset, seekpos=seekpos, total_size=self._total_size)
         if offset + len(data) > self._allocated_data_length:
-            raise DataTooLargeError(self._allocated_data_length, offset, len(data))
+            raise DataTooLargeError(self._shnum, self._allocated_data_length, offset, len(data))
 
         self._set_size(self._total_size + len(data))
         return self._store_or_buffer( (seekpos, data, 0) )
index 7ee1af62c20de9d83054d37c4146d28bce7d09e3..2314f52fe16f09297945e5fe767b5e0d08dc994e 100644 (file)
@@ -219,7 +219,7 @@ class MutableCloudShare(CloudShareBase, CloudShareReaderMixin):
             length = len(data)
             precondition(offset >= 0, offset=offset)
             if offset + length > self.MAX_SIZE:
-                raise DataTooLargeError()
+                raise DataTooLargeError(self._shnum, self.MAX_SIZE, offset, length)
 
             if new_length is not None and new_length < offset + length:
                 length = max(0, new_length - offset)
index 056863c5b151ee431dc5ee004a4c5a2911c960b7..bd8af51c42869d2fa459ed71f5ca705c19f1ab49 100644 (file)
@@ -190,7 +190,8 @@ class ImmutableDiskShare(object):
         length = len(data)
         precondition(offset >= 0, offset)
         if self._allocated_data_length is not None and offset+length > self._allocated_data_length:
-            raise DataTooLargeError(self._allocated_data_length, offset, length)
+            raise DataTooLargeError(self._shnum, self._allocated_data_length, offset, length)
+
         f = open(self._home, 'rb+')
         try:
             real_offset = self.DATA_OFFSET + offset
index 253df514704adf26c90971aff5f33ac64e30e7fa..c86abaa4fc41e6d4d4e141f75550e9393d414f47 100644 (file)
@@ -174,17 +174,13 @@ class MutableDiskShare(object):
     def _write_share_data(self, f, offset, data):
         length = len(data)
         precondition(offset >= 0, offset=offset)
-        precondition(offset + length < self.MAX_SIZE, offset=offset, length=length)
+        if offset + length > self.MAX_SIZE:
+            raise DataTooLargeError(self._shnum, self.MAX_SIZE, offset, length)
 
         data_length = self._read_data_length(f)
 
         if offset+length >= data_length:
-            # They are expanding their data size.
-
-            if offset+length > self.MAX_SIZE:
-                raise DataTooLargeError()
-
-            # Their data now fits in the current container. We must write
+            # They are expanding their data size. We must write
             # their new data and modify the recorded data size.
 
             # Fill any newly exposed empty space with 0's.
@@ -263,7 +259,7 @@ class MutableDiskShare(object):
         for (offset, data) in datav:
             precondition(offset >= 0, offset=offset)
             if offset + len(data) > self.MAX_SIZE:
-                raise DataTooLargeError()
+                raise DataTooLargeError(self._shnum, self.MAX_SIZE, offset, len(data))
 
         f = open(self._home, 'rb+')
         try:
index 5417da30e6d51dc89b6f222b6c0a621a6623fb2c..959aa83bd2669e7409edc3596b677d932d586422 100644 (file)
@@ -11,7 +11,15 @@ PREFIX = re.compile("^[%s]{2}$" % (base32.z_base_32_alphabet,))
 
 
 class DataTooLargeError(Exception):
-    pass
+    def __init__(self, shnum, allocated_data_length, offset, length):
+        self.shnum = shnum
+        self.allocated_data_length = allocated_data_length
+        self.offset = offset
+        self.length = length
+
+    def __str__(self):
+        return ("attempted write to shnum %d of %d bytes at offset %d exceeds allocated data length of %d bytes"
+                % (self.__class__.__name__, self.shnum, self.length, self.offset, self.allocated_data_length))
 
 
 class CorruptStoredShareError(Exception):