2 from allmydata.util import idlib
3 from allmydata.util.spans import DataSpans
5 MODE_CHECK = "MODE_CHECK" # query all peers
6 MODE_ANYTHING = "MODE_ANYTHING" # one recoverable version
7 MODE_WRITE = "MODE_WRITE" # replace all shares, probably.. not for initial
9 MODE_READ = "MODE_READ"
11 class NotWriteableError(Exception):
14 class NeedMoreDataError(Exception):
15 def __init__(self, needed_bytes, encprivkey_offset, encprivkey_length):
16 Exception.__init__(self)
17 self.needed_bytes = needed_bytes # up through EOF
18 self.encprivkey_offset = encprivkey_offset
19 self.encprivkey_length = encprivkey_length
21 return "<NeedMoreDataError (%d bytes)>" % self.needed_bytes
23 class UncoordinatedWriteError(Exception):
25 return ("<%s -- You, oh user, tried to change a file or directory "
26 "at the same time as another process was trying to change it. "
27 " To avoid data loss, don't do this. Please see "
28 "docs/write_coordination.rst for details.>" %
29 (self.__class__.__name__,))
31 class UnrecoverableFileError(Exception):
34 class NotEnoughServersError(Exception):
35 """There were not enough functioning servers available to place shares
36 upon. This might result from all servers being full or having an error, a
37 local bug which causes all server requests to fail in the same way, or
38 from there being zero servers. The first error received (if any) is
39 stored in my .first_error attribute."""
40 def __init__(self, why, first_error=None):
41 Exception.__init__(self, why, first_error)
42 self.first_error = first_error
44 class CorruptShareError(Exception):
45 def __init__(self, peerid, shnum, reason):
46 self.args = (peerid, shnum, reason)
51 short_peerid = idlib.nodeid_b2a(self.peerid)[:8]
52 return "<CorruptShareError peerid=%s shnum[%d]: %s" % (short_peerid,
56 class UnknownVersionError(Exception):
57 """The share we received was of a version we don't recognize."""
60 """I cache share data, to reduce the number of round trips used during
61 mutable file operations. All of the data in my cache is for a single
62 storage index, but I will keep information on multiple shares for
65 I maintain a highest-seen sequence number, and will flush all entries
66 each time this number increases (this doesn't necessarily imply that
67 all entries have the same sequence number).
69 My cache is indexed by a (verinfo, shnum) tuple.
71 My cache entries are DataSpans instances, each representing a set of
72 non-overlapping byteranges.
80 # also used by unit tests
83 def add(self, verinfo, shnum, offset, data):
85 if seqnum > self.seqnum:
89 index = (verinfo, shnum)
90 if index in self.cache:
91 self.cache[index].add(offset, data)
94 spans.add(offset, data)
95 self.cache[index] = spans
97 def read(self, verinfo, shnum, offset, length):
98 """Try to satisfy a read request from cache.
99 Returns data, or None if the cache did not hold the entire requested span.
102 # TODO: perhaps return a DataSpans object representing the fragments
103 # that we have, instead of only returning a hit if we can satisfy the
104 # whole request from cache.
106 index = (verinfo, shnum)
107 if index in self.cache:
108 return self.cache[index].get(offset, length)