2 from allmydata.util import idlib
3 from allmydata.util.dictutil import DictOfSets
5 MODE_CHECK = "MODE_CHECK" # query all peers
6 MODE_ANYTHING = "MODE_ANYTHING" # one recoverable version
7 MODE_WRITE = "MODE_WRITE" # replace all shares, probably.. not for initial
9 MODE_READ = "MODE_READ"
11 class NotMutableError(Exception):
14 class NeedMoreDataError(Exception):
15 def __init__(self, needed_bytes, encprivkey_offset, encprivkey_length):
16 Exception.__init__(self)
17 self.needed_bytes = needed_bytes # up through EOF
18 self.encprivkey_offset = encprivkey_offset
19 self.encprivkey_length = encprivkey_length
21 return "<NeedMoreDataError (%d bytes)>" % self.needed_bytes
23 class UncoordinatedWriteError(Exception):
25 return ("<%s -- You, oh user, tried to change a file or directory "
26 "at the same time as another process was trying to change it. "
27 " To avoid data loss, don't do this. Please see "
28 "docs/write_coordination.html for details.>" %
29 (self.__class__.__name__,))
31 class UnrecoverableFileError(Exception):
34 class NotEnoughServersError(Exception):
35 """There were not enough functioning servers available to place shares
36 upon. This might result from all servers being full or having an error, a
37 local bug which causes all server requests to fail in the same way, or
38 from there being zero servers. The first error received (if any) is
39 stored in my .first_error attribute."""
40 def __init__(self, why, first_error=None):
41 Exception.__init__(self, why, first_error)
42 self.first_error = first_error
44 class CorruptShareError(Exception):
45 def __init__(self, peerid, shnum, reason):
46 self.args = (peerid, shnum, reason)
51 short_peerid = idlib.nodeid_b2a(self.peerid)[:8]
52 return "<CorruptShareError peerid=%s shnum[%d]: %s" % (short_peerid,
59 """I cache share data, to reduce the number of round trips used during
60 mutable file operations. All of the data in my cache is for a single
61 storage index, but I will keep information on multiple shares (and
62 multiple versions) for that storage index.
64 My cache is indexed by a (verinfo, shnum) tuple.
66 My cache entries contain a set of non-overlapping byteranges: (start,
67 data, timestamp) tuples.
71 self.cache = DictOfSets()
75 self.cache = DictOfSets()
77 def _does_overlap(self, x_start, x_length, y_start, y_length):
79 x_start, y_start = y_start, x_start
80 x_length, y_length = y_length, x_length
81 x_end = x_start + x_length
82 y_end = y_start + y_length
83 # this just returns a boolean. Eventually we'll want a form that
96 def _inside(self, x_start, x_length, y_start, y_length):
97 x_end = x_start + x_length
98 y_end = y_start + y_length
109 def add(self, verinfo, shnum, offset, data, timestamp):
110 index = (verinfo, shnum)
111 self.cache.add(index, (offset, data, timestamp) )
113 def read(self, verinfo, shnum, offset, length):
114 """Try to satisfy a read request from cache.
115 Returns (data, timestamp), or (None, None) if the cache did not hold
119 # TODO: join multiple fragments, instead of only returning a hit if
120 # we have a fragment that contains the whole request
122 index = (verinfo, shnum)
124 for entry in self.cache.get(index, set()):
125 (e_start, e_data, e_timestamp) = entry
126 if self._inside(offset, length, e_start, len(e_data)):
127 want_start = offset - e_start
128 want_end = offset+length - e_start
129 return (e_data[want_start:want_end], e_timestamp)