2 from allmydata.util import idlib
4 MODE_CHECK = "MODE_CHECK" # query all peers
5 MODE_ANYTHING = "MODE_ANYTHING" # one recoverable version
6 MODE_WRITE = "MODE_WRITE" # replace all shares, probably.. not for initial
8 MODE_READ = "MODE_READ"
10 class NotMutableError(Exception):
13 class NeedMoreDataError(Exception):
14 def __init__(self, needed_bytes, encprivkey_offset, encprivkey_length):
15 Exception.__init__(self)
16 self.needed_bytes = needed_bytes # up through EOF
17 self.encprivkey_offset = encprivkey_offset
18 self.encprivkey_length = encprivkey_length
20 return "<NeedMoreDataError (%d bytes)>" % self.needed_bytes
22 class UncoordinatedWriteError(Exception):
24 return "<%s -- You, oh user, tried to change a file or directory at the same time as another process was trying to change it. To avoid data loss, don't do this. Please see docs/write_coordination.html for details.>" % (self.__class__.__name__,)
26 class UnrecoverableFileError(Exception):
29 class CorruptShareError(Exception):
30 def __init__(self, peerid, shnum, reason):
31 self.args = (peerid, shnum, reason)
36 short_peerid = idlib.nodeid_b2a(self.peerid)[:8]
37 return "<CorruptShareError peerid=%s shnum[%d]: %s" % (short_peerid,
45 class DictOfSets(dict):
46 def add(self, key, value):
50 self[key] = set([value])
52 def discard(self, key, value):
55 self[key].discard(value)
60 """I cache share data, to reduce the number of round trips used during
61 mutable file operations. All of the data in my cache is for a single
62 storage index, but I will keep information on multiple shares (and
63 multiple versions) for that storage index.
65 My cache is indexed by a (verinfo, shnum) tuple.
67 My cache entries contain a set of non-overlapping byteranges: (start,
68 data, timestamp) tuples.
72 self.cache = DictOfSets()
74 def _does_overlap(self, x_start, x_length, y_start, y_length):
76 x_start, y_start = y_start, x_start
77 x_length, y_length = y_length, x_length
78 x_end = x_start + x_length
79 y_end = y_start + y_length
80 # this just returns a boolean. Eventually we'll want a form that
93 def _inside(self, x_start, x_length, y_start, y_length):
94 x_end = x_start + x_length
95 y_end = y_start + y_length
106 def add(self, verinfo, shnum, offset, data, timestamp):
107 index = (verinfo, shnum)
108 self.cache.add(index, (offset, data, timestamp) )
110 def read(self, verinfo, shnum, offset, length):
111 """Try to satisfy a read request from cache.
112 Returns (data, timestamp), or (None, None) if the cache did not hold
116 # TODO: join multiple fragments, instead of only returning a hit if
117 # we have a fragment that contains the whole request
119 index = (verinfo, shnum)
121 for entry in self.cache.get(index, set()):
122 (e_start, e_data, e_timestamp) = entry
123 if self._inside(offset, length, e_start, len(e_data)):
124 want_start = offset - e_start
125 want_end = offset+length - e_start
126 return (e_data[want_start:want_end], e_timestamp)