2 from allmydata.util import idlib
3 from allmydata.util.dictutil import DictOfSets
5 MODE_CHECK = "MODE_CHECK" # query all peers
6 MODE_ANYTHING = "MODE_ANYTHING" # one recoverable version
7 MODE_WRITE = "MODE_WRITE" # replace all shares, probably.. not for initial
9 MODE_READ = "MODE_READ"
11 class NotMutableError(Exception):
14 class NeedMoreDataError(Exception):
15 def __init__(self, needed_bytes, encprivkey_offset, encprivkey_length):
16 Exception.__init__(self)
17 self.needed_bytes = needed_bytes # up through EOF
18 self.encprivkey_offset = encprivkey_offset
19 self.encprivkey_length = encprivkey_length
21 return "<NeedMoreDataError (%d bytes)>" % self.needed_bytes
23 class UncoordinatedWriteError(Exception):
25 return ("<%s -- You, oh user, tried to change a file or directory "
26 "at the same time as another process was trying to change it. "
27 " To avoid data loss, don't do this. Please see "
28 "docs/write_coordination.html for details.>" %
29 (self.__class__.__name__,))
31 class UnrecoverableFileError(Exception):
34 class NotEnoughServersError(Exception):
35 """There were not enough functioning servers available to place shares
36 upon. This might result from all servers being full or having an error, a
37 local bug which causes all server requests to fail in the same way, or
38 from there being zero servers. The first error received (if any) is
39 stored in my .first_error attribute."""
40 def __init__(self, why, first_error=None):
41 Exception.__init__(self, why, first_error)
42 self.first_error = first_error
44 class CorruptShareError(Exception):
45 def __init__(self, peerid, shnum, reason):
46 self.args = (peerid, shnum, reason)
51 short_peerid = idlib.nodeid_b2a(self.peerid)[:8]
52 return "<CorruptShareError peerid=%s shnum[%d]: %s" % (short_peerid,
56 class UnknownVersionError(Exception):
57 """The share we received was of a version we don't recognize."""
60 """I cache share data, to reduce the number of round trips used during
61 mutable file operations. All of the data in my cache is for a single
62 storage index, but I will keep information on multiple shares (and
63 multiple versions) for that storage index.
65 My cache is indexed by a (verinfo, shnum) tuple.
67 My cache entries contain a set of non-overlapping byteranges: (start,
68 data, timestamp) tuples.
72 self.cache = DictOfSets()
76 self.cache = DictOfSets()
78 def _does_overlap(self, x_start, x_length, y_start, y_length):
80 x_start, y_start = y_start, x_start
81 x_length, y_length = y_length, x_length
82 x_end = x_start + x_length
83 y_end = y_start + y_length
84 # this just returns a boolean. Eventually we'll want a form that
97 def _inside(self, x_start, x_length, y_start, y_length):
98 x_end = x_start + x_length
99 y_end = y_start + y_length
100 if x_start < y_start:
110 def add(self, verinfo, shnum, offset, data, timestamp):
111 index = (verinfo, shnum)
112 self.cache.add(index, (offset, data, timestamp) )
114 def read(self, verinfo, shnum, offset, length):
115 """Try to satisfy a read request from cache.
116 Returns (data, timestamp), or (None, None) if the cache did not hold
120 # TODO: join multiple fragments, instead of only returning a hit if
121 # we have a fragment that contains the whole request
123 index = (verinfo, shnum)
125 for entry in self.cache.get(index, set()):
126 (e_start, e_data, e_timestamp) = entry
127 if self._inside(offset, length, e_start, len(e_data)):
128 want_start = offset - e_start
129 want_end = offset+length - e_start
130 return (e_data[want_start:want_end], e_timestamp)