4 from zope.interface import implements
5 from twisted.internet import defer
6 from foolscap.api import fireEventually
8 from allmydata.mutable.common import NotMutableError
9 from allmydata.mutable.filenode import MutableFileNode
10 from allmydata.unknown import UnknownNode
11 from allmydata.interfaces import IMutableFileNode, IDirectoryNode,\
12 IURI, IFileNode, IMutableFileURI, IFilesystemNode, \
13 ExistingChildError, NoSuchChildError, ICheckable, IDeepCheckable, \
14 CannotPackUnknownNodeError
15 from allmydata.check_results import DeepCheckResults, \
16 DeepCheckAndRepairResults
17 from allmydata.monitor import Monitor
18 from allmydata.util import hashutil, mathutil, base32, log
19 from allmydata.util.assertutil import _assert, precondition
20 from allmydata.util.netstring import netstring, split_netstring
21 from allmydata.uri import NewDirectoryURI, LiteralFileURI, from_string
22 from pycryptopp.cipher.aes import AES
24 class CachingDict(dict):
25 def __init__(self, *args):
26 super(CachingDict, self).__init__(*args)
29 def __setitem__(self, k, v):
30 super(CachingDict, self).__setitem__(k, v)
31 self.serialized[k] = None
33 def get_both_items(self, k):
34 return (self.serialized.setdefault(k, None),
35 super(CachingDict, self).__getitem__(k))
37 def set_both_items(self, key, serialized, t):
38 self.serialized[key] = serialized
39 super(CachingDict, self).__setitem__(key, t)
42 def __init__(self, node, name, must_exist=True):
45 self.must_exist = True
46 def modify(self, old_contents, servermap, first_time):
47 children = self.node._unpack_contents(old_contents)
48 if self.name not in children:
49 if first_time and self.must_exist:
50 raise NoSuchChildError(self.name)
53 self.old_child, metadata = children[self.name]
54 del children[self.name]
55 new_contents = self.node._pack_contents(children)
59 def __init__(self, node, name, metadata):
62 self.metadata = metadata
64 def modify(self, old_contents, servermap, first_time):
65 children = self.node._unpack_contents(old_contents)
66 if self.name not in children:
67 raise NoSuchChildError(self.name)
68 children[self.name] = (children[self.name][0], self.metadata)
69 new_contents = self.node._pack_contents(children)
74 def __init__(self, node, entries=None, overwrite=True):
78 self.entries = entries
79 self.overwrite = overwrite
81 def set_node(self, name, node, metadata):
82 precondition(isinstance(name, unicode), name)
83 precondition(IFilesystemNode.providedBy(node), node)
84 self.entries.append( [name, node, metadata] )
86 def modify(self, old_contents, servermap, first_time):
87 children = self.node._unpack_contents(old_contents)
89 for e in self.entries:
95 name, child, new_metadata = e
96 assert _assert(IFilesystemNode.providedBy(child), child)
97 assert isinstance(name, unicode)
99 if not self.overwrite:
100 raise ExistingChildError("child '%s' already exists" % name)
101 metadata = children[name][1].copy()
103 metadata = {"ctime": now,
111 if new_metadata is not None:
112 # Overwrite all metadata.
113 newmd = new_metadata.copy()
116 if newmd.has_key('tahoe'):
118 if metadata.has_key('tahoe'):
119 newmd['tahoe'] = metadata['tahoe']
123 # For backwards compatibility with Tahoe < 1.4.0:
124 if "ctime" not in metadata:
125 metadata["ctime"] = now
126 metadata["mtime"] = now
129 sysmd = metadata.get('tahoe', {})
130 if not 'linkcrtime' in sysmd:
131 if "ctime" in metadata:
132 # In Tahoe < 1.4.0 we used the word "ctime" to mean what Tahoe >= 1.4.0
133 # calls "linkcrtime".
134 sysmd["linkcrtime"] = metadata["ctime"]
136 sysmd["linkcrtime"] = now
137 sysmd["linkmotime"] = now
139 children[name] = (child, metadata)
140 new_contents = self.node._pack_contents(children)
143 class NewDirectoryNode:
144 implements(IDirectoryNode, ICheckable, IDeepCheckable)
145 filenode_class = MutableFileNode
147 def __init__(self, client):
148 self._client = client
149 self._most_recent_size = None
152 return "<%s %s %s>" % (self.__class__.__name__, self.is_readonly() and "RO" or "RW", hasattr(self, '_uri') and self._uri.abbrev())
153 def init_from_uri(self, myuri):
154 self._uri = IURI(myuri)
155 self._node = self.filenode_class(self._client)
156 self._node.init_from_uri(self._uri.get_filenode_uri())
160 def create_with_mutablefile(cls, filenode, client):
162 self._node = filenode
163 return self._filenode_created(filenode)
165 def create(self, keypair_generator=None, keysize=None):
167 Returns a deferred that eventually fires with self once the directory
168 has been created (distributed across a set of storage servers).
170 # first we create a MutableFileNode with empty_contents, then use its
171 # URI to create our own.
172 self._node = self.filenode_class(self._client)
173 empty_contents = self._pack_contents({})
174 d = self._node.create(empty_contents, keypair_generator, keysize=keysize)
175 d.addCallback(self._filenode_created)
177 def _filenode_created(self, res):
178 self._uri = NewDirectoryURI(IMutableFileURI(self._node.get_uri()))
182 # return the size of our backing mutable file, in bytes, if we've
184 return self._most_recent_size
186 def _set_size(self, data):
187 self._most_recent_size = len(data)
191 d = self._node.download_best_version()
192 d.addCallback(self._set_size)
193 d.addCallback(self._unpack_contents)
196 def _encrypt_rwcap(self, rwcap):
197 assert isinstance(rwcap, str)
199 key = hashutil.mutable_rwcap_key_hash(IV, self._node.get_writekey())
201 crypttext = cryptor.process(rwcap)
202 mac = hashutil.hmac(key, IV + crypttext)
203 assert len(mac) == 32
204 return IV + crypttext + mac
205 # The MAC is not checked by readers in Tahoe >= 1.3.0, but we still produce it for the sake of older readers.
207 def _decrypt_rwcapdata(self, encwrcap):
209 crypttext = encwrcap[16:-32]
210 key = hashutil.mutable_rwcap_key_hash(IV, self._node.get_writekey())
212 plaintext = cryptor.process(crypttext)
215 def _create_node(self, rwcap, rocap):
216 return self._client.create_node_from_uri(rwcap, rocap)
218 def _unpack_contents(self, data):
219 # the directory is serialized as a list of netstrings, one per child.
220 # Each child is serialized as a list of four netstrings: (name,
221 # rocap, rwcap, metadata), in which the name,rocap,metadata are in
222 # cleartext. The 'name' is UTF-8 encoded. The rwcap is formatted as:
223 # pack("16ss32s", iv, AES(H(writekey+iv), plaintextrwcap), mac)
224 assert isinstance(data, str), (repr(data), type(data))
225 # an empty directory is serialized as an empty string
228 writeable = not self.is_readonly()
231 while position < len(data):
232 entries, position = split_netstring(data, 1, position)
233 (name, rocap, rwcapdata, metadata_s), subpos = split_netstring(entries[0], 4)
234 name = name.decode("utf-8")
237 rwcap = self._decrypt_rwcapdata(rwcapdata)
239 rwcap = None # rwcap is None or a non-empty string
241 rocap = None # rocap is None or a non-empty string
242 child = self._create_node(rwcap, rocap)
243 metadata = simplejson.loads(metadata_s)
244 assert isinstance(metadata, dict)
245 children[name] = (child, metadata)
248 def _pack_contents(self, children):
249 # expects children in the same format as _unpack_contents
250 assert isinstance(children, dict)
252 for name in sorted(children.keys()):
253 child, metadata = children[name]
254 assert isinstance(name, unicode)
255 assert IFilesystemNode.providedBy(child), (name,child)
256 assert isinstance(metadata, dict)
257 rwcap = child.get_uri() # might be RO if the child is not writeable
260 assert isinstance(rwcap, str), rwcap
261 rocap = child.get_readonly_uri()
264 assert isinstance(rocap, str), rocap
265 entry = "".join([netstring(name.encode("utf-8")),
267 netstring(self._encrypt_rwcap(rwcap)),
268 netstring(simplejson.dumps(metadata))])
269 entries.append(netstring(entry))
270 return "".join(entries)
272 def is_readonly(self):
273 return self._node.is_readonly()
274 def is_mutable(self):
275 return self._node.is_mutable()
278 return self._uri.to_string()
280 def get_readonly_uri(self):
281 return self._uri.get_readonly().to_string()
283 def get_verify_cap(self):
284 return self._uri.get_verify_cap()
286 def get_repair_cap(self):
287 if self._node.is_readonly():
291 def get_storage_index(self):
292 return self._uri._filenode_uri.storage_index
294 def check(self, monitor, verify=False, add_lease=False):
295 """Perform a file check. See IChecker.check for details."""
296 return self._node.check(monitor, verify, add_lease)
297 def check_and_repair(self, monitor, verify=False, add_lease=False):
298 return self._node.check_and_repair(monitor, verify, add_lease)
301 """I return a Deferred that fires with a dictionary mapping child
302 name to a tuple of (IFileNode or IDirectoryNode, metadata)."""
305 def has_child(self, name):
306 """I return a Deferred that fires with a boolean, True if there
307 exists a child of the given name, False if not."""
308 assert isinstance(name, unicode)
310 d.addCallback(lambda children: children.has_key(name))
313 def _get(self, children, name):
314 child = children.get(name)
316 raise NoSuchChildError(name)
319 def _get_with_metadata(self, children, name):
320 child = children.get(name)
322 raise NoSuchChildError(name)
326 """I return a Deferred that fires with the named child node,
327 which is either an IFileNode or an IDirectoryNode."""
328 assert isinstance(name, unicode)
330 d.addCallback(self._get, name)
333 def get_child_and_metadata(self, name):
334 """I return a Deferred that fires with the (node, metadata) pair for
335 the named child. The node is either an IFileNode or an
336 IDirectoryNode, and the metadata is a dictionary."""
337 assert isinstance(name, unicode)
339 d.addCallback(self._get_with_metadata, name)
342 def get_metadata_for(self, name):
343 assert isinstance(name, unicode)
345 d.addCallback(lambda children: children[name][1])
348 def set_metadata_for(self, name, metadata):
349 assert isinstance(name, unicode)
350 if self.is_readonly():
351 return defer.fail(NotMutableError())
352 assert isinstance(metadata, dict)
353 s = MetadataSetter(self, name, metadata)
354 d = self._node.modify(s.modify)
355 d.addCallback(lambda res: self)
358 def get_child_at_path(self, path):
359 """Transform a child path into an IDirectoryNode or IFileNode.
361 I perform a recursive series of 'get' operations to find the named
362 descendant node. I return a Deferred that fires with the node, or
363 errbacks with IndexError if the node could not be found.
365 The path can be either a single string (slash-separated) or a list of
368 d = self.get_child_and_metadata_at_path(path)
369 d.addCallback(lambda (node, metadata): node)
372 def get_child_and_metadata_at_path(self, path):
373 """Transform a child path into an IDirectoryNode or IFileNode and
374 a metadata dictionary from the last edge that was traversed.
378 return defer.succeed((self, {}))
379 if isinstance(path, (list, tuple)):
382 path = path.split("/")
384 assert isinstance(p, unicode)
386 remaining_path = path[1:]
388 d = self.get(childname)
389 d.addCallback(lambda node:
390 node.get_child_and_metadata_at_path(remaining_path))
392 d = self.get_child_and_metadata(childname)
395 def set_uri(self, name, child_uri, metadata=None, overwrite=True):
396 """I add a child (by URI) at the specific name. I return a Deferred
397 that fires with the child node when the operation finishes. I will
398 replace any existing child of the same name.
400 The child_uri could be for a file, or for a directory (either
401 read-write or read-only, using a URI that came from get_uri() ).
403 If this directory node is read-only, the Deferred will errback with a
405 precondition(isinstance(name, unicode), name)
406 precondition(isinstance(child_uri, str), child_uri)
407 child_node = self._create_node(child_uri, None)
408 if isinstance(child_node, UnknownNode):
409 # don't be willing to pack unknown nodes: we might accidentally
410 # put some write-authority into the rocap slot because we don't
411 # know how to diminish the URI they gave us. We don't even know
412 # if they gave us a readcap or a writecap.
413 msg = "cannot pack unknown node as child %s" % str(name)
414 raise CannotPackUnknownNodeError(msg)
415 d = self.set_node(name, child_node, metadata, overwrite)
416 d.addCallback(lambda res: child_node)
419 def set_children(self, entries, overwrite=True):
421 a = Adder(self, overwrite=overwrite)
429 name, child_uri, metadata = e
430 assert isinstance(name, unicode)
431 child_node = self._create_node(child_uri, None)
432 if isinstance(child_node, UnknownNode):
433 msg = "cannot pack unknown node as child %s" % str(name)
434 raise CannotPackUnknownNodeError(msg)
435 a.set_node(name, child_node, metadata)
436 return self._node.modify(a.modify)
438 def set_node(self, name, child, metadata=None, overwrite=True):
439 """I add a child at the specific name. I return a Deferred that fires
440 when the operation finishes. This Deferred will fire with the child
441 node that was just added. I will replace any existing child of the
444 If this directory node is read-only, the Deferred will errback with a
447 precondition(IFilesystemNode.providedBy(child), child)
449 if self.is_readonly():
450 return defer.fail(NotMutableError())
451 assert isinstance(name, unicode)
452 assert IFilesystemNode.providedBy(child), child
453 a = Adder(self, overwrite=overwrite)
454 a.set_node(name, child, metadata)
455 d = self._node.modify(a.modify)
456 d.addCallback(lambda res: child)
459 def set_nodes(self, entries, overwrite=True):
460 if self.is_readonly():
461 return defer.fail(NotMutableError())
462 a = Adder(self, entries, overwrite=overwrite)
463 d = self._node.modify(a.modify)
464 d.addCallback(lambda res: None)
468 def add_file(self, name, uploadable, metadata=None, overwrite=True):
469 """I upload a file (using the given IUploadable), then attach the
470 resulting FileNode to the directory at the given name. I return a
471 Deferred that fires (with the IFileNode of the uploaded file) when
472 the operation completes."""
473 assert isinstance(name, unicode)
474 if self.is_readonly():
475 return defer.fail(NotMutableError())
476 d = self._client.upload(uploadable)
477 d.addCallback(lambda results: results.uri)
478 d.addCallback(self._client.create_node_from_uri)
479 d.addCallback(lambda node:
480 self.set_node(name, node, metadata, overwrite))
483 def delete(self, name):
484 """I remove the child at the specific name. I return a Deferred that
485 fires (with the node just removed) when the operation finishes."""
486 assert isinstance(name, unicode)
487 if self.is_readonly():
488 return defer.fail(NotMutableError())
489 deleter = Deleter(self, name)
490 d = self._node.modify(deleter.modify)
491 d.addCallback(lambda res: deleter.old_child)
494 def create_empty_directory(self, name, overwrite=True):
495 """I create and attach an empty directory at the given name. I return
496 a Deferred that fires (with the new directory node) when the
497 operation finishes."""
498 assert isinstance(name, unicode)
499 if self.is_readonly():
500 return defer.fail(NotMutableError())
501 d = self._client.create_empty_dirnode()
503 entries = [(name, child, None)]
504 a = Adder(self, entries, overwrite=overwrite)
505 d = self._node.modify(a.modify)
506 d.addCallback(lambda res: child)
508 d.addCallback(_created)
511 def move_child_to(self, current_child_name, new_parent,
512 new_child_name=None, overwrite=True):
513 """I take one of my children and move them to a new parent. The child
514 is referenced by name. On the new parent, the child will live under
515 'new_child_name', which defaults to 'current_child_name'. I return a
516 Deferred that fires when the operation finishes."""
517 assert isinstance(current_child_name, unicode)
518 if self.is_readonly() or new_parent.is_readonly():
519 return defer.fail(NotMutableError())
520 if new_child_name is None:
521 new_child_name = current_child_name
522 assert isinstance(new_child_name, unicode)
523 d = self.get(current_child_name)
525 return new_parent.set_node(new_child_name, child,
528 d.addCallback(lambda child: self.delete(current_child_name))
532 def deep_traverse(self, walker):
533 """Perform a recursive walk, using this dirnode as a root, notifying
534 the 'walker' instance of everything I encounter.
536 I call walker.enter_directory(parent, children) once for each dirnode
537 I visit, immediately after retrieving the list of children. I pass in
538 the parent dirnode and the dict of childname->(childnode,metadata).
539 This function should *not* traverse the children: I will do that.
540 enter_directory() is most useful for the deep-stats number that
541 counts how large a directory is.
543 I call walker.add_node(node, path) for each node (both files and
544 directories) I can reach. Most work should be done here.
546 I avoid loops by keeping track of verifier-caps and refusing to call
547 walker.add_node() or traverse a node that I've seen before. This
548 means that any file or directory will only be given to the walker
549 once. If files or directories are referenced multiple times by a
550 directory structure, this may appear to under-count or miss some of
553 I return a Monitor which can be used to wait for the operation to
554 finish, learn about its progress, or cancel the operation.
557 # this is just a tree-walker, except that following each edge
558 # requires a Deferred. We used to use a ConcurrencyLimiter to limit
559 # fanout to 10 simultaneous operations, but the memory load of the
560 # queued operations was excessive (in one case, with 330k dirnodes,
561 # it caused the process to run into the 3.0GB-ish per-process 32bit
562 # linux memory limit, and crashed). So we use a single big Deferred
563 # chain, and do a strict depth-first traversal, one node at a time.
564 # This can be slower, because we aren't pipelining directory reads,
565 # but it brought the memory footprint down by roughly 50%.
568 walker.set_monitor(monitor)
570 found = set([self.get_verify_cap()])
571 d = self._deep_traverse_dirnode(self, [], walker, monitor, found)
572 d.addCallback(lambda ignored: walker.finish())
573 d.addBoth(monitor.finish)
574 d.addErrback(lambda f: None)
578 def _deep_traverse_dirnode(self, node, path, walker, monitor, found):
579 # process this directory, then walk its children
580 monitor.raise_if_cancelled()
581 d = defer.maybeDeferred(walker.add_node, node, path)
582 d.addCallback(lambda ignored: node.list())
583 d.addCallback(self._deep_traverse_dirnode_children, node, path,
584 walker, monitor, found)
587 def _deep_traverse_dirnode_children(self, children, parent, path,
588 walker, monitor, found):
589 monitor.raise_if_cancelled()
590 d = defer.maybeDeferred(walker.enter_directory, parent, children)
591 # we process file-like children first, so we can drop their FileNode
592 # objects as quickly as possible. Tests suggest that a FileNode (held
593 # in the client's nodecache) consumes about 2440 bytes. dirnodes (not
594 # in the nodecache) seem to consume about 2000 bytes.
597 for name, (child, metadata) in sorted(children.iteritems()):
598 childpath = path + [name]
599 if isinstance(child, UnknownNode):
600 walker.add_node(child, childpath)
602 verifier = child.get_verify_cap()
603 # allow LIT files (for which verifier==None) to be processed
604 if (verifier is not None) and (verifier in found):
607 if IDirectoryNode.providedBy(child):
608 dirkids.append( (child, childpath) )
610 filekids.append( (child, childpath) )
611 for i, (child, childpath) in enumerate(filekids):
612 d.addCallback(lambda ignored, child=child, childpath=childpath:
613 walker.add_node(child, childpath))
614 # to work around the Deferred tail-recursion problem
615 # (specifically the defer.succeed flavor) requires us to avoid
616 # doing more than 158 LIT files in a row. We insert a turn break
617 # once every 100 files (LIT or CHK) to preserve some stack space
618 # for other code. This is a different expression of the same
619 # Twisted problem as in #237.
621 d.addCallback(lambda ignored: fireEventually())
622 for (child, childpath) in dirkids:
623 d.addCallback(lambda ignored, child=child, childpath=childpath:
624 self._deep_traverse_dirnode(child, childpath,
630 def build_manifest(self):
631 """Return a Monitor, with a ['status'] that will be a list of (path,
632 cap) tuples, for all nodes (directories and files) reachable from
634 walker = ManifestWalker(self)
635 return self.deep_traverse(walker)
637 def start_deep_stats(self):
638 # Since deep_traverse tracks verifier caps, we avoid double-counting
639 # children for which we've got both a write-cap and a read-cap
640 return self.deep_traverse(DeepStats(self))
642 def start_deep_check(self, verify=False, add_lease=False):
643 return self.deep_traverse(DeepChecker(self, verify, repair=False, add_lease=add_lease))
645 def start_deep_check_and_repair(self, verify=False, add_lease=False):
646 return self.deep_traverse(DeepChecker(self, verify, repair=True, add_lease=add_lease))
651 def __init__(self, origin):
654 for k in ["count-immutable-files",
655 "count-mutable-files",
656 "count-literal-files",
660 "size-immutable-files",
661 #"size-mutable-files",
662 "size-literal-files",
665 "largest-directory-children",
666 "largest-immutable-file",
667 #"largest-mutable-file",
671 for k in ["size-files-histogram"]:
672 self.histograms[k] = {} # maps (min,max) to count
673 self.buckets = [ (0,0), (1,3)]
674 self.root = math.sqrt(10)
676 def set_monitor(self, monitor):
677 self.monitor = monitor
678 monitor.origin_si = self.origin.get_storage_index()
679 monitor.set_status(self.get_results())
681 def add_node(self, node, childpath):
682 if isinstance(node, UnknownNode):
683 self.add("count-unknown")
684 elif IDirectoryNode.providedBy(node):
685 self.add("count-directories")
686 elif IMutableFileNode.providedBy(node):
687 self.add("count-files")
688 self.add("count-mutable-files")
689 # TODO: update the servermap, compute a size, add it to
690 # size-mutable-files, max it into "largest-mutable-file"
691 elif IFileNode.providedBy(node): # CHK and LIT
692 self.add("count-files")
693 size = node.get_size()
694 self.histogram("size-files-histogram", size)
695 theuri = from_string(node.get_uri())
696 if isinstance(theuri, LiteralFileURI):
697 self.add("count-literal-files")
698 self.add("size-literal-files", size)
700 self.add("count-immutable-files")
701 self.add("size-immutable-files", size)
702 self.max("largest-immutable-file", size)
704 def enter_directory(self, parent, children):
705 dirsize_bytes = parent.get_size()
706 dirsize_children = len(children)
707 self.add("size-directories", dirsize_bytes)
708 self.max("largest-directory", dirsize_bytes)
709 self.max("largest-directory-children", dirsize_children)
711 def add(self, key, value=1):
712 self.stats[key] += value
714 def max(self, key, value):
715 self.stats[key] = max(self.stats[key], value)
717 def which_bucket(self, size):
718 # return (min,max) such that min <= size <= max
719 # values are from the set (0,0), (1,3), (4,10), (11,31), (32,100),
720 # (101,316), (317, 1000), etc: two per decade
724 if i >= len(self.buckets):
726 new_lower = self.buckets[i-1][1]+1
727 new_upper = int(mathutil.next_power_of_k(new_lower, self.root))
728 self.buckets.append( (new_lower, new_upper) )
729 maybe = self.buckets[i]
730 if maybe[0] <= size <= maybe[1]:
734 def histogram(self, key, size):
735 bucket = self.which_bucket(size)
736 h = self.histograms[key]
741 def get_results(self):
742 stats = self.stats.copy()
743 for key in self.histograms:
744 h = self.histograms[key]
745 out = [ (bucket[0], bucket[1], h[bucket]) for bucket in h ]
751 return self.get_results()
753 class ManifestWalker(DeepStats):
754 def __init__(self, origin):
755 DeepStats.__init__(self, origin)
757 self.storage_index_strings = set()
758 self.verifycaps = set()
760 def add_node(self, node, path):
761 self.manifest.append( (tuple(path), node.get_uri()) )
762 si = node.get_storage_index()
764 self.storage_index_strings.add(base32.b2a(si))
765 v = node.get_verify_cap()
767 self.verifycaps.add(v.to_string())
768 return DeepStats.add_node(self, node, path)
770 def get_results(self):
771 stats = DeepStats.get_results(self)
772 return {"manifest": self.manifest,
773 "verifycaps": self.verifycaps,
774 "storage-index": self.storage_index_strings,
780 def __init__(self, root, verify, repair, add_lease):
781 root_si = root.get_storage_index()
782 self._lp = log.msg(format="deep-check starting (%(si)s),"
783 " verify=%(verify)s, repair=%(repair)s",
784 si=base32.b2a(root_si), verify=verify, repair=repair)
785 self._verify = verify
786 self._repair = repair
787 self._add_lease = add_lease
789 self._results = DeepCheckAndRepairResults(root_si)
791 self._results = DeepCheckResults(root_si)
792 self._stats = DeepStats(root)
794 def set_monitor(self, monitor):
795 self.monitor = monitor
796 monitor.set_status(self._results)
798 def add_node(self, node, childpath):
800 d = node.check_and_repair(self.monitor, self._verify, self._add_lease)
801 d.addCallback(self._results.add_check_and_repair, childpath)
803 d = node.check(self.monitor, self._verify, self._add_lease)
804 d.addCallback(self._results.add_check, childpath)
805 d.addCallback(lambda ignored: self._stats.add_node(node, childpath))
808 def enter_directory(self, parent, children):
809 return self._stats.enter_directory(parent, children)
812 log.msg("deep-check done", parent=self._lp)
813 self._results.update_stats(self._stats.get_results())
817 # use client.create_dirnode() to make one of these