def get_storage_index(self):
return self._uri._filenode_uri.storage_index
- def check(self, verify=False):
+ def check(self, monitor, verify=False):
"""Perform a file check. See IChecker.check for details."""
- return self._node.check(verify)
- def check_and_repair(self, verify=False):
- return self._node.check_and_repair(verify)
+ return self._node.check(monitor, verify)
+ def check_and_repair(self, monitor, verify=False):
+ return self._node.check_and_repair(monitor, verify)
def list(self):
"""I return a Deferred that fires with a dictionary mapping child
def add_node(self, node, childpath):
if self._repair:
- d = node.check_and_repair(self._verify)
+ d = node.check_and_repair(self.monitor, self._verify)
d.addCallback(self._results.add_check_and_repair, childpath)
else:
- d = node.check(self._verify)
+ d = node.check(self.monitor, self._verify)
d.addCallback(self._results.add_check, childpath)
d.addCallback(lambda ignored: self._stats.add_node(node, childpath))
return d
def get_storage_index(self):
return self.u.storage_index
- def check(self, verify=False):
+ def check(self, monitor, verify=False):
storage_index = self.u.storage_index
k = self.u.needed_shares
N = self.u.total_shares
v = self.checker_class(self._client, storage_index, k, N)
return v.start()
- def check_and_repair(self, verify=False):
+ def check_and_repair(self, monitor, verify=False):
# this is a stub, to allow the deep-check tests to pass.
#raise NotImplementedError("not implemented yet")
from allmydata.checker_results import CheckAndRepairResults
def get_storage_index(self):
return None
- def check(self, verify=False):
+ def check(self, monitor, verify=False):
return defer.succeed(None)
- def check_and_repair(self, verify=False):
+ def check_and_repair(self, monitor, verify=False):
return defer.succeed(None)
def download(self, target):
download. This may be None if there is no storage index (i.e. LIT
files)."""
- def check(verify=False, repair=False):
- """Perform a file check. See IChecker.check for details.
-
- The default mode is named 'check' and simply asks each server whether
- or not it has a share, and believes the answers. If verify=True, this
- switches into the 'verify' mode, in which every byte of every share
- is retrieved, and all hashes are verified. This uses much more
- network and disk bandwidth than simple checking, especially for large
- files, but will catch disk errors.
-
- The default repair=False argument means that files which are not
- perfectly healthy will be reported, but not fixed. When repair=True,
- the node will attempt to repair the file first. The results will
- indicate the initial status of the file in either case. If repair was
- attempted, the results will indicate that too.
- """
-
def is_readonly():
"""Return True if this reference provides mutable access to the given
file or directory (i.e. if you can modify it), or False if not. Note
"""TODO: how should this work?"""
class ICheckable(Interface):
- def check(verify=False):
+ def check(monitor, verify=False):
"""Check upon my health, optionally repairing any problems.
This returns a Deferred that fires with an instance that provides
ICheckerResults, or None if the object is non-distributed (i.e. LIT
files).
+ The monitor will be checked periodically to see if the operation has
+ been cancelled. If so, no new queries will be sent, and the Deferred
+ will fire (with a OperationCancelledError) immediately.
+
Filenodes and dirnodes (which provide IFilesystemNode) are also
checkable. Instances that represent verifier-caps will be checkable
but not downloadable. Some objects (like LIT files) do not actually
failures during retrieval, or is malicious or buggy, then
verification will detect the problem, but checking will not.
- If repair=True, then a non-healthy result will cause an immediate
- repair operation, to generate and upload new shares. After repair,
- the file will be as healthy as we can make it. Details about what
- sort of repair is done will be put in the checker results. My
- Deferred will not fire until the repair is complete.
-
TODO: any problems seen during checking will be reported to the
health-manager.furl, a centralized object which is responsible for
figuring out why files are unhealthy so corrective action can be
taken.
"""
- def check_and_repair(verify=False):
+ def check_and_repair(monitor, verify=False):
"""Like check(), but if the file/directory is not healthy, attempt to
repair the damage.
+ Any non-healthy result will cause an immediate repair operation, to
+ generate and upload new shares. After repair, the file will be as
+ healthy as we can make it. Details about what sort of repair is done
+ will be put in the check-and-repair results. The Deferred will not
+ fire until the repair is complete.
+
This returns a Deferred which fires with an instance of
ICheckAndRepairResults."""
class MutableChecker:
- def __init__(self, node):
+ def __init__(self, node, monitor):
self._node = node
+ self._monitor = monitor
self.bad_shares = [] # list of (nodeid,shnum,failure)
self._storage_index = self._node.get_storage_index()
self.results = CheckerResults(self._storage_index)
def check(self, verify=False):
servermap = ServerMap()
- u = ServermapUpdater(self._node, servermap, MODE_CHECK)
+ u = ServermapUpdater(self._node, self._monitor, servermap, MODE_CHECK)
d = u.update()
d.addCallback(self._got_mapupdate_results)
if verify:
# the file is healthy if there is exactly one recoverable version, it
# has at least N distinct shares, and there are no unrecoverable
# versions: all existing shares will be for the same version.
+ self._monitor.raise_if_cancelled()
self.best_version = None
num_recoverable = len(servermap.recoverable_versions())
if num_recoverable:
d = self._do_read(ss, peerid, self._storage_index, [shnum], readv)
d.addCallback(self._got_answer, peerid, servermap)
dl.append(d)
- return defer.DeferredList(dl, fireOnOneErrback=True)
+ return defer.DeferredList(dl, fireOnOneErrback=True, consumeErrors=True)
def _do_read(self, ss, peerid, storage_index, shnums, readv):
# isolate the callRemote to a separate method, so tests can subclass
return counters
def _fill_checker_results(self, smap, r):
+ self._monitor.raise_if_cancelled()
r.set_servermap(smap.copy())
healthy = True
data = {}
class MutableCheckAndRepairer(MutableChecker):
- def __init__(self, node):
- MutableChecker.__init__(self, node)
+ def __init__(self, node, monitor):
+ MutableChecker.__init__(self, node, monitor)
self.cr_results = CheckAndRepairResults(self._storage_index)
self.cr_results.pre_repair_results = self.results
self.need_repair = False
return d
def _maybe_repair(self, res):
+ self._monitor.raise_if_cancelled()
if not self.need_repair:
self.cr_results.post_repair_results = self.results
return
from allmydata.util import hashutil, log
from allmydata.util.assertutil import precondition
from allmydata.uri import WriteableSSKFileURI
+from allmydata.monitor import Monitor
from allmydata.immutable.encode import NotEnoughSharesError
from pycryptopp.publickey import rsa
from pycryptopp.cipher.aes import AES
#################################
# ICheckable
- def check(self, verify=False):
- checker = self.checker_class(self)
+ def check(self, monitor, verify=False):
+ checker = self.checker_class(self, monitor)
return checker.check(verify)
- def check_and_repair(self, verify=False):
- checker = self.check_and_repairer_class(self)
+ def check_and_repair(self, monitor, verify=False):
+ checker = self.check_and_repairer_class(self, monitor)
return checker.check(verify)
#################################
servermap = ServerMap()
return self._update_servermap(servermap, mode)
def _update_servermap(self, servermap, mode):
- u = ServermapUpdater(self, servermap, mode)
+ u = ServermapUpdater(self, Monitor(), servermap, mode)
self._client.notify_mapupdate(u.get_status())
return u.update()
class ServermapUpdater:
- def __init__(self, filenode, servermap, mode=MODE_READ):
+ def __init__(self, filenode, monitor, servermap, mode=MODE_READ):
"""I update a servermap, locating a sufficient number of useful
shares and remembering where they are located.
"""
self._node = filenode
+ self._monitor = monitor
self._servermap = servermap
self.mode = mode
self._running = True
return self.my_uri
def get_verifier(self):
return IURI(self.my_uri).get_verifier()
- def check(self, verify=False):
+ def check(self, monitor, verify=False):
r = CheckerResults(self.storage_index)
is_bad = self.bad_shares.get(self.storage_index, None)
data = {}
r.problems = []
r.set_data(data)
return defer.succeed(r)
- def check_and_repair(self, verify=False):
+ def check_and_repair(self, monitor, verify=False):
d = self.check(verify)
def _got(cr):
r = CheckAndRepairResults(self.storage_index)
def get_storage_index(self):
return self.storage_index
- def check(self, verify=False):
+ def check(self, monitor, verify=False):
r = CheckerResults(self.storage_index)
is_bad = self.bad_shares.get(self.storage_index, None)
data = {}
r.set_data(data)
return defer.succeed(r)
- def check_and_repair(self, verify=False):
+ def check_and_repair(self, monitor, verify=False):
d = self.check(verify)
def _got(cr):
r = CheckAndRepairResults(self.storage_index)
INewDirectoryURI, IReadonlyNewDirectoryURI, IFileNode, \
ExistingChildError, IDeepCheckResults, IDeepCheckAndRepairResults
from allmydata.util import hashutil, testutil
+from allmydata.monitor import Monitor
from allmydata.test.common import make_chk_file_uri, make_mutable_file_uri, \
FakeDirectoryNode, create_chk_filenode
from allmydata.checker_results import CheckerResults, CheckAndRepairResults
def test_check(self):
d = self.client.create_empty_dirnode()
- d.addCallback(lambda dn: dn.check())
+ d.addCallback(lambda dn: dn.check(Monitor()))
def _done(res):
self.failUnless(res.is_healthy())
d.addCallback(_done)
from twisted.trial import unittest
from allmydata import uri
+from allmydata.monitor import Monitor
from allmydata.immutable import filenode, download
from allmydata.mutable.node import MutableFileNode
from allmydata.util import hashutil
u = uri.LiteralFileURI(data=DATA)
fn1 = filenode.LiteralFileNode(u, None)
- d = fn1.check()
+ d = fn1.check(Monitor())
def _check_checker_results(cr):
self.failUnlessEqual(cr, None)
d.addCallback(_check_checker_results)
- d.addCallback(lambda res: fn1.check(verify=True))
+ d.addCallback(lambda res: fn1.check(Monitor(), verify=True))
d.addCallback(_check_checker_results)
return d
from allmydata.immutable import encode, upload
from allmydata.test.common import SystemTestMixin, ShareManglingMixin
from allmydata.util import testutil
+from allmydata.monitor import Monitor
from allmydata.interfaces import IURI
from twisted.internet import defer
from twisted.trial import unittest
# The following process of leaving 8 of the shares deleted and asserting that you can't
# repair it is more to test this test code than to test the Tahoe code...
def _then_repair(unused=None):
- d2 = self.filenode.check_and_repair(verify=False)
+ d2 = self.filenode.check_and_repair(Monitor(), verify=False)
def _after_repair(checkandrepairresults):
prerepairres = checkandrepairresults.get_pre_repair_results()
postrepairres = checkandrepairresults.get_post_repair_results()
def _check1(filenode):
before_check_reads = self._count_reads()
- d2 = filenode.check(verify=False)
+ d2 = filenode.check(Monitor(), verify=False)
def _after_check(checkresults):
after_check_reads = self._count_reads()
self.failIf(after_check_reads - before_check_reads > 0, after_check_reads - before_check_reads)
d.addCallback(lambda ignore: self.replace_shares({}, storage_index=self.uri.storage_index))
def _check2(ignored):
before_check_reads = self._count_reads()
- d2 = self.filenode.check(verify=False)
+ d2 = self.filenode.check(Monitor(), verify=False)
def _after_check(checkresults):
after_check_reads = self._count_reads()
def _check1(filenode):
before_check_reads = self._count_reads()
- d2 = filenode.check(verify=True)
+ d2 = filenode.check(Monitor(), verify=True)
def _after_check(checkresults):
after_check_reads = self._count_reads()
# print "delta was ", after_check_reads - before_check_reads
def _check2(ignored):
before_check_reads = self._count_reads()
- d2 = self.filenode.check(verify=True)
+ d2 = self.filenode.check(Monitor(), verify=True)
def _after_check(checkresults):
after_check_reads = self._count_reads()
before_repair_reads = self._count_reads()
before_repair_allocates = self._count_allocates()
- d2 = filenode.check_and_repair(verify=False)
+ d2 = filenode.check_and_repair(Monitor(), verify=False)
def _after_repair(checkandrepairresults):
prerepairres = checkandrepairresults.get_pre_repair_results()
postrepairres = checkandrepairresults.get_post_repair_results()
before_repair_reads = self._count_reads()
before_repair_allocates = self._count_allocates()
- d2 = filenode.check_and_repair(verify=False)
+ d2 = filenode.check_and_repair(Monitor(), verify=False)
def _after_repair(checkandrepairresults):
prerepairres = checkandrepairresults.get_pre_repair_results()
postrepairres = checkandrepairresults.get_post_repair_results()
before_repair_reads = self._count_reads()
before_repair_allocates = self._count_allocates()
- d2 = filenode.check_and_repair(verify=False)
+ d2 = filenode.check_and_repair(Monitor(), verify=False)
def _after_repair(checkandrepairresults):
prerepairres = checkandrepairresults.get_pre_repair_results()
postrepairres = checkandrepairresults.get_post_repair_results()
from allmydata.util.fileutil import make_dirs
from allmydata.interfaces import IURI, IMutableFileURI, IUploadable, \
FileTooLargeError, IRepairResults
+from allmydata.monitor import Monitor
from allmydata.test.common import ShouldFailMixin
from foolscap.eventual import eventually, fireEventually
from foolscap.logging import log
def make_servermap(self, mode=MODE_CHECK, fn=None):
if fn is None:
fn = self._fn
- smu = ServermapUpdater(fn, ServerMap(), mode)
+ smu = ServermapUpdater(fn, Monitor(), ServerMap(), mode)
d = smu.update()
return d
def update_servermap(self, oldmap, mode=MODE_CHECK):
- smu = ServermapUpdater(self._fn, oldmap, mode)
+ smu = ServermapUpdater(self._fn, Monitor(), oldmap, mode)
d = smu.update()
return d
def make_servermap(self, mode=MODE_READ, oldmap=None):
if oldmap is None:
oldmap = ServerMap()
- smu = ServermapUpdater(self._fn, oldmap, mode)
+ smu = ServermapUpdater(self._fn, Monitor(), oldmap, mode)
d = smu.update()
return d
def test_check_good(self):
- d = self._fn.check()
+ d = self._fn.check(Monitor())
d.addCallback(self.check_good, "test_check_good")
return d
def test_check_no_shares(self):
for shares in self._storage._peers.values():
shares.clear()
- d = self._fn.check()
+ d = self._fn.check(Monitor())
d.addCallback(self.check_bad, "test_check_no_shares")
return d
for shnum in shares.keys():
if shnum > 0:
del shares[shnum]
- d = self._fn.check()
+ d = self._fn.check(Monitor())
d.addCallback(self.check_bad, "test_check_not_enough_shares")
return d
def test_check_all_bad_sig(self):
corrupt(None, self._storage, 1) # bad sig
- d = self._fn.check()
+ d = self._fn.check(Monitor())
d.addCallback(self.check_bad, "test_check_all_bad_sig")
return d
def test_check_all_bad_blocks(self):
corrupt(None, self._storage, "share_data", [9]) # bad blocks
# the Checker won't notice this.. it doesn't look at actual data
- d = self._fn.check()
+ d = self._fn.check(Monitor())
d.addCallback(self.check_good, "test_check_all_bad_blocks")
return d
def test_verify_good(self):
- d = self._fn.check(verify=True)
+ d = self._fn.check(Monitor(), verify=True)
d.addCallback(self.check_good, "test_verify_good")
return d
def test_verify_all_bad_sig(self):
corrupt(None, self._storage, 1) # bad sig
- d = self._fn.check(verify=True)
+ d = self._fn.check(Monitor(), verify=True)
d.addCallback(self.check_bad, "test_verify_all_bad_sig")
return d
def test_verify_one_bad_sig(self):
corrupt(None, self._storage, 1, [9]) # bad sig
- d = self._fn.check(verify=True)
+ d = self._fn.check(Monitor(), verify=True)
d.addCallback(self.check_bad, "test_verify_one_bad_sig")
return d
def test_verify_one_bad_block(self):
corrupt(None, self._storage, "share_data", [9]) # bad blocks
# the Verifier *will* notice this, since it examines every byte
- d = self._fn.check(verify=True)
+ d = self._fn.check(Monitor(), verify=True)
d.addCallback(self.check_bad, "test_verify_one_bad_block")
d.addCallback(self.check_expected_failure,
CorruptShareError, "block hash tree failure",
def test_verify_one_bad_sharehash(self):
corrupt(None, self._storage, "share_hash_chain", [9], 5)
- d = self._fn.check(verify=True)
+ d = self._fn.check(Monitor(), verify=True)
d.addCallback(self.check_bad, "test_verify_one_bad_sharehash")
d.addCallback(self.check_expected_failure,
CorruptShareError, "corrupt hashes",
def test_verify_one_bad_encprivkey(self):
corrupt(None, self._storage, "enc_privkey", [9]) # bad privkey
- d = self._fn.check(verify=True)
+ d = self._fn.check(Monitor(), verify=True)
d.addCallback(self.check_bad, "test_verify_one_bad_encprivkey")
d.addCallback(self.check_expected_failure,
CorruptShareError, "invalid privkey",
corrupt(None, self._storage, "enc_privkey", [9]) # bad privkey
readonly_fn = self._fn.get_readonly()
# a read-only node has no way to validate the privkey
- d = readonly_fn.check(verify=True)
+ d = readonly_fn.check(Monitor(), verify=True)
d.addCallback(self.check_good,
"test_verify_one_bad_encprivkey_uncheckable")
return d
self.old_shares = []
d = self.publish_one()
d.addCallback(self.copy_shares)
- d.addCallback(lambda res: self._fn.check())
+ d.addCallback(lambda res: self._fn.check(Monitor()))
d.addCallback(lambda check_results: self._fn.repair(check_results))
def _check_results(rres):
self.failUnless(IRepairResults.providedBy(rres))
self._set_versions({0:3,2:3,4:3,6:3,8:3,
1:4,3:4,5:4,7:4,9:4}))
d.addCallback(self.copy_shares)
- d.addCallback(lambda res: self._fn.check())
+ d.addCallback(lambda res: self._fn.check(Monitor()))
def _try_repair(check_results):
ex = "There were multiple recoverable versions with identical seqnums, so force=True must be passed to the repair() operation"
d2 = self.shouldFail(MustForceRepairError, "test_merge", ex,
def make_servermap(self, mode=MODE_READ, oldmap=None):
if oldmap is None:
oldmap = ServerMap()
- smu = ServermapUpdater(self._fn, oldmap, mode)
+ smu = ServermapUpdater(self._fn, Monitor(), oldmap, mode)
d = smu.update()
return d
d = self._fn.download_best_version()
d.addCallback(lambda res: self.failUnlessEqual(res, self.CONTENTS[4]))
# and the checker should report problems
- d.addCallback(lambda res: self._fn.check())
+ d.addCallback(lambda res: self._fn.check(Monitor()))
d.addCallback(self.check_bad, "test_multiple_versions")
# but if everything is at version 2, that's what we should download
from allmydata.interfaces import IDirectoryNode, IFileNode, IFileURI, \
ICheckerResults, ICheckAndRepairResults, IDeepCheckResults, \
IDeepCheckAndRepairResults
-from allmydata.monitor import OperationCancelledError
+from allmydata.monitor import Monitor, OperationCancelledError
from allmydata.mutable.common import NotMutableError
from allmydata.mutable import layout as mutable_layout
from foolscap import DeadReferenceError
ut = upload.Data("too big to be literal" * 200, convergence=None)
d = self._personal_node.add_file(u"big file", ut)
- d.addCallback(lambda res: self._personal_node.check())
+ d.addCallback(lambda res: self._personal_node.check(Monitor()))
def _check_dirnode_results(r):
self.failUnless(r.is_healthy())
d.addCallback(_check_dirnode_results)
- d.addCallback(lambda res: self._personal_node.check(verify=True))
+ d.addCallback(lambda res: self._personal_node.check(Monitor(), verify=True))
d.addCallback(_check_dirnode_results)
d.addCallback(lambda res: self._personal_node.get(u"big file"))
def _got_chk_filenode(n):
self.failUnless(isinstance(n, filenode.FileNode))
- d = n.check()
+ d = n.check(Monitor())
def _check_filenode_results(r):
self.failUnless(r.is_healthy())
d.addCallback(_check_filenode_results)
- d.addCallback(lambda res: n.check(verify=True))
+ d.addCallback(lambda res: n.check(Monitor(), verify=True))
d.addCallback(_check_filenode_results)
return d
d.addCallback(_got_chk_filenode)
d.addCallback(lambda res: self._personal_node.get(u"sekrit data"))
def _got_lit_filenode(n):
self.failUnless(isinstance(n, filenode.LiteralFileNode))
- d = n.check()
+ d = n.check(Monitor())
def _check_lit_filenode_results(r):
self.failUnlessEqual(r, None)
d.addCallback(_check_lit_filenode_results)
- d.addCallback(lambda res: n.check(verify=True))
+ d.addCallback(lambda res: n.check(Monitor(), verify=True))
d.addCallback(_check_lit_filenode_results)
return d
d.addCallback(_got_lit_filenode)
def do_test_good(self, ignored):
d = defer.succeed(None)
# check the individual items
- d.addCallback(lambda ign: self.root.check())
+ d.addCallback(lambda ign: self.root.check(Monitor()))
d.addCallback(self.check_is_healthy, self.root, "root")
- d.addCallback(lambda ign: self.mutable.check())
+ d.addCallback(lambda ign: self.mutable.check(Monitor()))
d.addCallback(self.check_is_healthy, self.mutable, "mutable")
- d.addCallback(lambda ign: self.large.check())
+ d.addCallback(lambda ign: self.large.check(Monitor()))
d.addCallback(self.check_is_healthy, self.large, "large")
- d.addCallback(lambda ign: self.small.check())
+ d.addCallback(lambda ign: self.small.check(Monitor()))
d.addCallback(self.failUnlessEqual, None, "small")
# and again with verify=True
- d.addCallback(lambda ign: self.root.check(verify=True))
+ d.addCallback(lambda ign: self.root.check(Monitor(), verify=True))
d.addCallback(self.check_is_healthy, self.root, "root")
- d.addCallback(lambda ign: self.mutable.check(verify=True))
+ d.addCallback(lambda ign: self.mutable.check(Monitor(), verify=True))
d.addCallback(self.check_is_healthy, self.mutable, "mutable")
- d.addCallback(lambda ign: self.large.check(verify=True))
+ d.addCallback(lambda ign: self.large.check(Monitor(), verify=True))
d.addCallback(self.check_is_healthy, self.large, "large",
incomplete=True)
- d.addCallback(lambda ign: self.small.check(verify=True))
+ d.addCallback(lambda ign: self.small.check(Monitor(), verify=True))
d.addCallback(self.failUnlessEqual, None, "small")
# and check_and_repair(), which should be a nop
- d.addCallback(lambda ign: self.root.check_and_repair())
+ d.addCallback(lambda ign: self.root.check_and_repair(Monitor()))
d.addCallback(self.check_and_repair_is_healthy, self.root, "root")
- d.addCallback(lambda ign: self.mutable.check_and_repair())
+ d.addCallback(lambda ign: self.mutable.check_and_repair(Monitor()))
d.addCallback(self.check_and_repair_is_healthy, self.mutable, "mutable")
- d.addCallback(lambda ign: self.large.check_and_repair())
+ d.addCallback(lambda ign: self.large.check_and_repair(Monitor()))
d.addCallback(self.check_and_repair_is_healthy, self.large, "large")
- d.addCallback(lambda ign: self.small.check_and_repair())
+ d.addCallback(lambda ign: self.small.check_and_repair(Monitor()))
d.addCallback(self.failUnlessEqual, None, "small")
# check_and_repair(verify=True)
- d.addCallback(lambda ign: self.root.check_and_repair(verify=True))
+ d.addCallback(lambda ign: self.root.check_and_repair(Monitor(), verify=True))
d.addCallback(self.check_and_repair_is_healthy, self.root, "root")
- d.addCallback(lambda ign: self.mutable.check_and_repair(verify=True))
+ d.addCallback(lambda ign: self.mutable.check_and_repair(Monitor(), verify=True))
d.addCallback(self.check_and_repair_is_healthy, self.mutable, "mutable")
- d.addCallback(lambda ign: self.large.check_and_repair(verify=True))
+ d.addCallback(lambda ign: self.large.check_and_repair(Monitor(), verify=True))
d.addCallback(self.check_and_repair_is_healthy, self.large, "large",
incomplete=True)
- d.addCallback(lambda ign: self.small.check_and_repair(verify=True))
+ d.addCallback(lambda ign: self.small.check_and_repair(Monitor(), verify=True))
d.addCallback(self.failUnlessEqual, None, "small")
from allmydata.uri import from_string_dirnode
from allmydata.interfaces import IDirectoryNode, IFileNode, IMutableFileNode, \
ExistingChildError
+from allmydata.monitor import Monitor
from allmydata.web.common import text_plain, WebError, \
IClient, IOpHandleTable, NeedOperationHandleError, \
boolean_of_arg, get_arg, get_root, \
verify = boolean_of_arg(get_arg(req, "verify", "false"))
repair = boolean_of_arg(get_arg(req, "repair", "false"))
if repair:
- d = self.node.check_and_repair(verify)
+ d = self.node.check_and_repair(Monitor(), verify)
d.addCallback(lambda res: CheckAndRepairResults(res))
else:
- d = self.node.check(verify)
+ d = self.node.check(Monitor(), verify)
d.addCallback(lambda res: CheckerResults(res))
return d
from nevow.inevow import IRequest
from allmydata.interfaces import IDownloadTarget, ExistingChildError
+from allmydata.monitor import Monitor
from allmydata.immutable.upload import FileHandle
from allmydata.immutable.filenode import LiteralFileNode
from allmydata.util import log
if isinstance(self.node, LiteralFileNode):
return defer.succeed(LiteralCheckerResults())
if repair:
- d = self.node.check_and_repair(verify)
+ d = self.node.check_and_repair(Monitor(), verify)
d.addCallback(lambda res: CheckAndRepairResults(res))
else:
- d = self.node.check(verify)
+ d = self.node.check(Monitor(), verify)
d.addCallback(lambda res: CheckerResults(res))
return d