4 from zope.interface import implements
5 from twisted.internet import defer
6 from foolscap.api import fireEventually
8 from allmydata.mutable.common import NotMutableError
9 from allmydata.mutable.filenode import MutableFileNode
10 from allmydata.unknown import UnknownNode
11 from allmydata.interfaces import IMutableFileNode, IDirectoryNode,\
12 IFileNode, IMutableFileURI, IFilesystemNode, \
13 ExistingChildError, NoSuchChildError, ICheckable, IDeepCheckable, \
14 CannotPackUnknownNodeError
15 from allmydata.check_results import DeepCheckResults, \
16 DeepCheckAndRepairResults
17 from allmydata.monitor import Monitor
18 from allmydata.util import hashutil, mathutil, base32, log
19 from allmydata.util.assertutil import _assert, precondition
20 from allmydata.util.netstring import netstring, split_netstring
21 from allmydata.uri import DirectoryURI, ReadonlyDirectoryURI, \
22 LiteralFileURI, from_string
23 from pycryptopp.cipher.aes import AES
24 from allmydata.util.dictutil import AuxValueDict
27 def __init__(self, node, name, must_exist=True):
30 self.must_exist = True
31 def modify(self, old_contents, servermap, first_time):
32 children = self.node._unpack_contents(old_contents)
33 if self.name not in children:
34 if first_time and self.must_exist:
35 raise NoSuchChildError(self.name)
38 self.old_child, metadata = children[self.name]
39 del children[self.name]
40 new_contents = self.node._pack_contents(children)
44 def __init__(self, node, name, metadata):
47 self.metadata = metadata
49 def modify(self, old_contents, servermap, first_time):
50 children = self.node._unpack_contents(old_contents)
51 if self.name not in children:
52 raise NoSuchChildError(self.name)
53 children[self.name] = (children[self.name][0], self.metadata)
54 new_contents = self.node._pack_contents(children)
59 def __init__(self, node, entries=None, overwrite=True):
63 self.entries = entries
64 self.overwrite = overwrite
66 def set_node(self, name, node, metadata):
67 precondition(isinstance(name, unicode), name)
68 precondition(IFilesystemNode.providedBy(node), node)
69 self.entries.append( [name, node, metadata] )
71 def modify(self, old_contents, servermap, first_time):
72 children = self.node._unpack_contents(old_contents)
74 for e in self.entries:
80 name, child, new_metadata = e
81 assert _assert(IFilesystemNode.providedBy(child), child)
82 assert isinstance(name, unicode)
84 if not self.overwrite:
85 raise ExistingChildError("child '%s' already exists" % name)
87 if self.overwrite == "only-files" and IDirectoryNode.providedBy(children[name][0]):
88 raise ExistingChildError("child '%s' already exists" % name)
89 metadata = children[name][1].copy()
91 metadata = {"ctime": now,
99 if new_metadata is not None:
100 # Overwrite all metadata.
101 newmd = new_metadata.copy()
104 if newmd.has_key('tahoe'):
106 if metadata.has_key('tahoe'):
107 newmd['tahoe'] = metadata['tahoe']
111 # For backwards compatibility with Tahoe < 1.4.0:
112 if "ctime" not in metadata:
113 metadata["ctime"] = now
114 metadata["mtime"] = now
117 sysmd = metadata.get('tahoe', {})
118 if not 'linkcrtime' in sysmd:
119 if "ctime" in metadata:
120 # In Tahoe < 1.4.0 we used the word "ctime" to mean what Tahoe >= 1.4.0
121 # calls "linkcrtime".
122 sysmd["linkcrtime"] = metadata["ctime"]
124 sysmd["linkcrtime"] = now
125 sysmd["linkmotime"] = now
127 children[name] = (child, metadata)
128 new_contents = self.node._pack_contents(children)
131 def _encrypt_rwcap(filenode, rwcap):
132 assert isinstance(rwcap, str)
133 salt = hashutil.mutable_rwcap_salt_hash(rwcap)
134 key = hashutil.mutable_rwcap_key_hash(salt, filenode.get_writekey())
136 crypttext = cryptor.process(rwcap)
137 mac = hashutil.hmac(key, salt + crypttext)
138 assert len(mac) == 32
139 return salt + crypttext + mac
140 # The MAC is not checked by readers in Tahoe >= 1.3.0, but we still
141 # produce it for the sake of older readers.
143 def pack_children(filenode, children):
144 """Take a dict that maps:
145 children[unicode_name] = (IFileSystemNode, metadata_dict)
146 and pack it into a single string, for use as the contents of the backing
147 file. This is the same format as is returned by _unpack_contents. I also
148 accept an AuxValueDict, in which case I'll use the auxilliary cached data
149 as the pre-packed entry, which is faster than re-packing everything each
151 has_aux = isinstance(children, AuxValueDict)
153 for name in sorted(children.keys()):
154 assert isinstance(name, unicode)
157 entry = children.get_aux(name)
159 (child, metadata) = children[name]
160 assert IFilesystemNode.providedBy(child), (name,child)
161 assert isinstance(metadata, dict)
162 rwcap = child.get_uri() # might be RO if the child is not writeable
165 assert isinstance(rwcap, str), rwcap
166 rocap = child.get_readonly_uri()
169 assert isinstance(rocap, str), rocap
170 entry = "".join([netstring(name.encode("utf-8")),
172 netstring(_encrypt_rwcap(filenode, rwcap)),
173 netstring(simplejson.dumps(metadata))])
174 entries.append(netstring(entry))
175 return "".join(entries)
178 implements(IDirectoryNode, ICheckable, IDeepCheckable)
179 filenode_class = MutableFileNode
181 def __init__(self, filenode, nodemaker, uploader):
182 self._node = filenode
183 filenode_uri = IMutableFileURI(filenode.get_uri())
184 if filenode_uri.is_readonly():
185 self._uri = ReadonlyDirectoryURI(filenode_uri)
187 self._uri = DirectoryURI(filenode_uri)
188 self._nodemaker = nodemaker
189 self._uploader = uploader
190 self._most_recent_size = None
193 return "<%s %s %s>" % (self.__class__.__name__, self.is_readonly() and "RO" or "RW", hasattr(self, '_uri') and self._uri.abbrev())
196 # return the size of our backing mutable file, in bytes, if we've
198 return self._most_recent_size
200 def _set_size(self, data):
201 self._most_recent_size = len(data)
205 d = self._node.download_best_version()
206 d.addCallback(self._set_size)
207 d.addCallback(self._unpack_contents)
210 def _decrypt_rwcapdata(self, encwrcap):
212 crypttext = encwrcap[16:-32]
213 key = hashutil.mutable_rwcap_key_hash(salt, self._node.get_writekey())
215 plaintext = cryptor.process(crypttext)
218 def _create_node(self, rwcap, rocap):
219 return self._nodemaker.create_from_cap(rwcap, rocap)
221 def _unpack_contents(self, data):
222 # the directory is serialized as a list of netstrings, one per child.
223 # Each child is serialized as a list of four netstrings: (name,
224 # rocap, rwcap, metadata), in which the name,rocap,metadata are in
225 # cleartext. The 'name' is UTF-8 encoded. The rwcap is formatted as:
226 # pack("16ss32s", iv, AES(H(writekey+iv), plaintextrwcap), mac)
227 assert isinstance(data, str), (repr(data), type(data))
228 # an empty directory is serialized as an empty string
230 return AuxValueDict()
231 writeable = not self.is_readonly()
232 children = AuxValueDict()
234 while position < len(data):
235 entries, position = split_netstring(data, 1, position)
237 (name, rocap, rwcapdata, metadata_s), subpos = split_netstring(entry, 4)
238 name = name.decode("utf-8")
241 rwcap = self._decrypt_rwcapdata(rwcapdata)
243 rwcap = None # rwcap is None or a non-empty string
245 rocap = None # rocap is None or a non-empty string
246 child = self._create_node(rwcap, rocap)
247 metadata = simplejson.loads(metadata_s)
248 assert isinstance(metadata, dict)
249 children.set_with_aux(name, (child, metadata), auxilliary=entry)
252 def _pack_contents(self, children):
253 # expects children in the same format as _unpack_contents
254 return pack_children(self._node, children)
256 def is_readonly(self):
257 return self._node.is_readonly()
258 def is_mutable(self):
259 return self._node.is_mutable()
262 return self._uri.to_string()
264 def get_readonly_uri(self):
265 return self._uri.get_readonly().to_string()
267 def get_verify_cap(self):
268 return self._uri.get_verify_cap()
270 def get_repair_cap(self):
271 if self._node.is_readonly():
275 def get_storage_index(self):
276 return self._uri._filenode_uri.storage_index
278 def check(self, monitor, verify=False, add_lease=False):
279 """Perform a file check. See IChecker.check for details."""
280 return self._node.check(monitor, verify, add_lease)
281 def check_and_repair(self, monitor, verify=False, add_lease=False):
282 return self._node.check_and_repair(monitor, verify, add_lease)
285 """I return a Deferred that fires with a dictionary mapping child
286 name to a tuple of (IFileNode or IDirectoryNode, metadata)."""
289 def has_child(self, name):
290 """I return a Deferred that fires with a boolean, True if there
291 exists a child of the given name, False if not."""
292 assert isinstance(name, unicode)
294 d.addCallback(lambda children: children.has_key(name))
297 def _get(self, children, name):
298 child = children.get(name)
300 raise NoSuchChildError(name)
303 def _get_with_metadata(self, children, name):
304 child = children.get(name)
306 raise NoSuchChildError(name)
310 """I return a Deferred that fires with the named child node,
311 which is either an IFileNode or an IDirectoryNode."""
312 assert isinstance(name, unicode)
314 d.addCallback(self._get, name)
317 def get_child_and_metadata(self, name):
318 """I return a Deferred that fires with the (node, metadata) pair for
319 the named child. The node is either an IFileNode or an
320 IDirectoryNode, and the metadata is a dictionary."""
321 assert isinstance(name, unicode)
323 d.addCallback(self._get_with_metadata, name)
326 def get_metadata_for(self, name):
327 assert isinstance(name, unicode)
329 d.addCallback(lambda children: children[name][1])
332 def set_metadata_for(self, name, metadata):
333 assert isinstance(name, unicode)
334 if self.is_readonly():
335 return defer.fail(NotMutableError())
336 assert isinstance(metadata, dict)
337 s = MetadataSetter(self, name, metadata)
338 d = self._node.modify(s.modify)
339 d.addCallback(lambda res: self)
342 def get_child_at_path(self, path):
343 """Transform a child path into an IDirectoryNode or IFileNode.
345 I perform a recursive series of 'get' operations to find the named
346 descendant node. I return a Deferred that fires with the node, or
347 errbacks with IndexError if the node could not be found.
349 The path can be either a single string (slash-separated) or a list of
352 d = self.get_child_and_metadata_at_path(path)
353 d.addCallback(lambda (node, metadata): node)
356 def get_child_and_metadata_at_path(self, path):
357 """Transform a child path into an IDirectoryNode or IFileNode and
358 a metadata dictionary from the last edge that was traversed.
362 return defer.succeed((self, {}))
363 if isinstance(path, (list, tuple)):
366 path = path.split("/")
368 assert isinstance(p, unicode)
370 remaining_path = path[1:]
372 d = self.get(childname)
373 d.addCallback(lambda node:
374 node.get_child_and_metadata_at_path(remaining_path))
376 d = self.get_child_and_metadata(childname)
379 def set_uri(self, name, writecap, readcap, metadata=None, overwrite=True):
380 precondition(isinstance(name, unicode), name)
381 precondition(isinstance(writecap, (str,type(None))), writecap)
382 precondition(isinstance(readcap, (str,type(None))), readcap)
383 child_node = self._create_node(writecap, readcap)
384 if isinstance(child_node, UnknownNode):
385 # don't be willing to pack unknown nodes: we might accidentally
386 # put some write-authority into the rocap slot because we don't
387 # know how to diminish the URI they gave us. We don't even know
388 # if they gave us a readcap or a writecap.
389 msg = "cannot pack unknown node as child %s" % str(name)
390 raise CannotPackUnknownNodeError(msg)
391 d = self.set_node(name, child_node, metadata, overwrite)
392 d.addCallback(lambda res: child_node)
395 def set_children(self, entries, overwrite=True):
397 a = Adder(self, overwrite=overwrite)
399 for (name, e) in entries.iteritems():
400 assert isinstance(name, unicode)
402 writecap, readcap = e
406 writecap, readcap, metadata = e
407 precondition(isinstance(writecap, (str,type(None))), writecap)
408 precondition(isinstance(readcap, (str,type(None))), readcap)
409 child_node = self._create_node(writecap, readcap)
410 if isinstance(child_node, UnknownNode):
411 msg = "cannot pack unknown node as child %s" % str(name)
412 raise CannotPackUnknownNodeError(msg)
413 a.set_node(name, child_node, metadata)
414 d = self._node.modify(a.modify)
415 d.addCallback(lambda ign: self)
418 def set_node(self, name, child, metadata=None, overwrite=True):
419 """I add a child at the specific name. I return a Deferred that fires
420 when the operation finishes. This Deferred will fire with the child
421 node that was just added. I will replace any existing child of the
424 If this directory node is read-only, the Deferred will errback with a
427 precondition(IFilesystemNode.providedBy(child), child)
429 if self.is_readonly():
430 return defer.fail(NotMutableError())
431 assert isinstance(name, unicode)
432 assert IFilesystemNode.providedBy(child), child
433 a = Adder(self, overwrite=overwrite)
434 a.set_node(name, child, metadata)
435 d = self._node.modify(a.modify)
436 d.addCallback(lambda res: child)
439 def set_nodes(self, entries, overwrite=True):
440 if self.is_readonly():
441 return defer.fail(NotMutableError())
442 a = Adder(self, entries, overwrite=overwrite)
443 d = self._node.modify(a.modify)
444 d.addCallback(lambda res: self)
448 def add_file(self, name, uploadable, metadata=None, overwrite=True):
449 """I upload a file (using the given IUploadable), then attach the
450 resulting FileNode to the directory at the given name. I return a
451 Deferred that fires (with the IFileNode of the uploaded file) when
452 the operation completes."""
453 assert isinstance(name, unicode)
454 if self.is_readonly():
455 return defer.fail(NotMutableError())
456 d = self._uploader.upload(uploadable)
457 d.addCallback(lambda results: results.uri)
458 d.addCallback(self._nodemaker.create_from_cap)
459 d.addCallback(lambda node:
460 self.set_node(name, node, metadata, overwrite))
463 def delete(self, name):
464 """I remove the child at the specific name. I return a Deferred that
465 fires (with the node just removed) when the operation finishes."""
466 assert isinstance(name, unicode)
467 if self.is_readonly():
468 return defer.fail(NotMutableError())
469 deleter = Deleter(self, name)
470 d = self._node.modify(deleter.modify)
471 d.addCallback(lambda res: deleter.old_child)
474 def create_subdirectory(self, name, initial_children={}, overwrite=True):
475 assert isinstance(name, unicode)
476 if self.is_readonly():
477 return defer.fail(NotMutableError())
478 d = self._nodemaker.create_new_mutable_directory(initial_children)
480 entries = [(name, child, None)]
481 a = Adder(self, entries, overwrite=overwrite)
482 d = self._node.modify(a.modify)
483 d.addCallback(lambda res: child)
485 d.addCallback(_created)
488 def move_child_to(self, current_child_name, new_parent,
489 new_child_name=None, overwrite=True):
490 """I take one of my children and move them to a new parent. The child
491 is referenced by name. On the new parent, the child will live under
492 'new_child_name', which defaults to 'current_child_name'. I return a
493 Deferred that fires when the operation finishes."""
494 assert isinstance(current_child_name, unicode)
495 if self.is_readonly() or new_parent.is_readonly():
496 return defer.fail(NotMutableError())
497 if new_child_name is None:
498 new_child_name = current_child_name
499 assert isinstance(new_child_name, unicode)
500 d = self.get(current_child_name)
502 return new_parent.set_node(new_child_name, child,
505 d.addCallback(lambda child: self.delete(current_child_name))
509 def deep_traverse(self, walker):
510 """Perform a recursive walk, using this dirnode as a root, notifying
511 the 'walker' instance of everything I encounter.
513 I call walker.enter_directory(parent, children) once for each dirnode
514 I visit, immediately after retrieving the list of children. I pass in
515 the parent dirnode and the dict of childname->(childnode,metadata).
516 This function should *not* traverse the children: I will do that.
517 enter_directory() is most useful for the deep-stats number that
518 counts how large a directory is.
520 I call walker.add_node(node, path) for each node (both files and
521 directories) I can reach. Most work should be done here.
523 I avoid loops by keeping track of verifier-caps and refusing to call
524 walker.add_node() or traverse a node that I've seen before. This
525 means that any file or directory will only be given to the walker
526 once. If files or directories are referenced multiple times by a
527 directory structure, this may appear to under-count or miss some of
530 I return a Monitor which can be used to wait for the operation to
531 finish, learn about its progress, or cancel the operation.
534 # this is just a tree-walker, except that following each edge
535 # requires a Deferred. We used to use a ConcurrencyLimiter to limit
536 # fanout to 10 simultaneous operations, but the memory load of the
537 # queued operations was excessive (in one case, with 330k dirnodes,
538 # it caused the process to run into the 3.0GB-ish per-process 32bit
539 # linux memory limit, and crashed). So we use a single big Deferred
540 # chain, and do a strict depth-first traversal, one node at a time.
541 # This can be slower, because we aren't pipelining directory reads,
542 # but it brought the memory footprint down by roughly 50%.
545 walker.set_monitor(monitor)
547 found = set([self.get_verify_cap()])
548 d = self._deep_traverse_dirnode(self, [], walker, monitor, found)
549 d.addCallback(lambda ignored: walker.finish())
550 d.addBoth(monitor.finish)
551 d.addErrback(lambda f: None)
555 def _deep_traverse_dirnode(self, node, path, walker, monitor, found):
556 # process this directory, then walk its children
557 monitor.raise_if_cancelled()
558 d = defer.maybeDeferred(walker.add_node, node, path)
559 d.addCallback(lambda ignored: node.list())
560 d.addCallback(self._deep_traverse_dirnode_children, node, path,
561 walker, monitor, found)
564 def _deep_traverse_dirnode_children(self, children, parent, path,
565 walker, monitor, found):
566 monitor.raise_if_cancelled()
567 d = defer.maybeDeferred(walker.enter_directory, parent, children)
568 # we process file-like children first, so we can drop their FileNode
569 # objects as quickly as possible. Tests suggest that a FileNode (held
570 # in the client's nodecache) consumes about 2440 bytes. dirnodes (not
571 # in the nodecache) seem to consume about 2000 bytes.
574 for name, (child, metadata) in sorted(children.iteritems()):
575 childpath = path + [name]
576 if isinstance(child, UnknownNode):
577 walker.add_node(child, childpath)
579 verifier = child.get_verify_cap()
580 # allow LIT files (for which verifier==None) to be processed
581 if (verifier is not None) and (verifier in found):
584 if IDirectoryNode.providedBy(child):
585 dirkids.append( (child, childpath) )
587 filekids.append( (child, childpath) )
588 for i, (child, childpath) in enumerate(filekids):
589 d.addCallback(lambda ignored, child=child, childpath=childpath:
590 walker.add_node(child, childpath))
591 # to work around the Deferred tail-recursion problem
592 # (specifically the defer.succeed flavor) requires us to avoid
593 # doing more than 158 LIT files in a row. We insert a turn break
594 # once every 100 files (LIT or CHK) to preserve some stack space
595 # for other code. This is a different expression of the same
596 # Twisted problem as in #237.
598 d.addCallback(lambda ignored: fireEventually())
599 for (child, childpath) in dirkids:
600 d.addCallback(lambda ignored, child=child, childpath=childpath:
601 self._deep_traverse_dirnode(child, childpath,
607 def build_manifest(self):
608 """Return a Monitor, with a ['status'] that will be a list of (path,
609 cap) tuples, for all nodes (directories and files) reachable from
611 walker = ManifestWalker(self)
612 return self.deep_traverse(walker)
614 def start_deep_stats(self):
615 # Since deep_traverse tracks verifier caps, we avoid double-counting
616 # children for which we've got both a write-cap and a read-cap
617 return self.deep_traverse(DeepStats(self))
619 def start_deep_check(self, verify=False, add_lease=False):
620 return self.deep_traverse(DeepChecker(self, verify, repair=False, add_lease=add_lease))
622 def start_deep_check_and_repair(self, verify=False, add_lease=False):
623 return self.deep_traverse(DeepChecker(self, verify, repair=True, add_lease=add_lease))
628 def __init__(self, origin):
631 for k in ["count-immutable-files",
632 "count-mutable-files",
633 "count-literal-files",
637 "size-immutable-files",
638 #"size-mutable-files",
639 "size-literal-files",
642 "largest-directory-children",
643 "largest-immutable-file",
644 #"largest-mutable-file",
648 for k in ["size-files-histogram"]:
649 self.histograms[k] = {} # maps (min,max) to count
650 self.buckets = [ (0,0), (1,3)]
651 self.root = math.sqrt(10)
653 def set_monitor(self, monitor):
654 self.monitor = monitor
655 monitor.origin_si = self.origin.get_storage_index()
656 monitor.set_status(self.get_results())
658 def add_node(self, node, childpath):
659 if isinstance(node, UnknownNode):
660 self.add("count-unknown")
661 elif IDirectoryNode.providedBy(node):
662 self.add("count-directories")
663 elif IMutableFileNode.providedBy(node):
664 self.add("count-files")
665 self.add("count-mutable-files")
666 # TODO: update the servermap, compute a size, add it to
667 # size-mutable-files, max it into "largest-mutable-file"
668 elif IFileNode.providedBy(node): # CHK and LIT
669 self.add("count-files")
670 size = node.get_size()
671 self.histogram("size-files-histogram", size)
672 theuri = from_string(node.get_uri())
673 if isinstance(theuri, LiteralFileURI):
674 self.add("count-literal-files")
675 self.add("size-literal-files", size)
677 self.add("count-immutable-files")
678 self.add("size-immutable-files", size)
679 self.max("largest-immutable-file", size)
681 def enter_directory(self, parent, children):
682 dirsize_bytes = parent.get_size()
683 dirsize_children = len(children)
684 self.add("size-directories", dirsize_bytes)
685 self.max("largest-directory", dirsize_bytes)
686 self.max("largest-directory-children", dirsize_children)
688 def add(self, key, value=1):
689 self.stats[key] += value
691 def max(self, key, value):
692 self.stats[key] = max(self.stats[key], value)
694 def which_bucket(self, size):
695 # return (min,max) such that min <= size <= max
696 # values are from the set (0,0), (1,3), (4,10), (11,31), (32,100),
697 # (101,316), (317, 1000), etc: two per decade
701 if i >= len(self.buckets):
703 new_lower = self.buckets[i-1][1]+1
704 new_upper = int(mathutil.next_power_of_k(new_lower, self.root))
705 self.buckets.append( (new_lower, new_upper) )
706 maybe = self.buckets[i]
707 if maybe[0] <= size <= maybe[1]:
711 def histogram(self, key, size):
712 bucket = self.which_bucket(size)
713 h = self.histograms[key]
718 def get_results(self):
719 stats = self.stats.copy()
720 for key in self.histograms:
721 h = self.histograms[key]
722 out = [ (bucket[0], bucket[1], h[bucket]) for bucket in h ]
728 return self.get_results()
730 class ManifestWalker(DeepStats):
731 def __init__(self, origin):
732 DeepStats.__init__(self, origin)
734 self.storage_index_strings = set()
735 self.verifycaps = set()
737 def add_node(self, node, path):
738 self.manifest.append( (tuple(path), node.get_uri()) )
739 si = node.get_storage_index()
741 self.storage_index_strings.add(base32.b2a(si))
742 v = node.get_verify_cap()
744 self.verifycaps.add(v.to_string())
745 return DeepStats.add_node(self, node, path)
747 def get_results(self):
748 stats = DeepStats.get_results(self)
749 return {"manifest": self.manifest,
750 "verifycaps": self.verifycaps,
751 "storage-index": self.storage_index_strings,
757 def __init__(self, root, verify, repair, add_lease):
758 root_si = root.get_storage_index()
759 self._lp = log.msg(format="deep-check starting (%(si)s),"
760 " verify=%(verify)s, repair=%(repair)s",
761 si=base32.b2a(root_si), verify=verify, repair=repair)
762 self._verify = verify
763 self._repair = repair
764 self._add_lease = add_lease
766 self._results = DeepCheckAndRepairResults(root_si)
768 self._results = DeepCheckResults(root_si)
769 self._stats = DeepStats(root)
771 def set_monitor(self, monitor):
772 self.monitor = monitor
773 monitor.set_status(self._results)
775 def add_node(self, node, childpath):
777 d = node.check_and_repair(self.monitor, self._verify, self._add_lease)
778 d.addCallback(self._results.add_check_and_repair, childpath)
780 d = node.check(self.monitor, self._verify, self._add_lease)
781 d.addCallback(self._results.add_check, childpath)
782 d.addCallback(lambda ignored: self._stats.add_node(node, childpath))
785 def enter_directory(self, parent, children):
786 return self._stats.enter_directory(parent, children)
789 log.msg("deep-check done", parent=self._lp)
790 self._results.update_stats(self._stats.get_results())
794 # use client.create_dirnode() to make one of these