4 from zope.interface import implements
5 from twisted.internet import defer
6 from foolscap.api import fireEventually
8 from allmydata.mutable.common import NotMutableError
9 from allmydata.mutable.filenode import MutableFileNode
10 from allmydata.unknown import UnknownNode
11 from allmydata.interfaces import IMutableFileNode, IDirectoryNode,\
12 IURI, IFileNode, IMutableFileURI, IFilesystemNode, \
13 ExistingChildError, NoSuchChildError, ICheckable, IDeepCheckable, \
14 CannotPackUnknownNodeError
15 from allmydata.check_results import DeepCheckResults, \
16 DeepCheckAndRepairResults
17 from allmydata.monitor import Monitor
18 from allmydata.util import hashutil, mathutil, base32, log
19 from allmydata.util.assertutil import _assert, precondition
20 from allmydata.util.netstring import netstring, split_netstring
21 from allmydata.uri import DirectoryURI, LiteralFileURI, from_string
22 from pycryptopp.cipher.aes import AES
24 class CachingDict(dict):
25 def __init__(self, *args):
26 super(CachingDict, self).__init__(*args)
29 def __setitem__(self, k, v):
30 super(CachingDict, self).__setitem__(k, v)
31 self.serialized[k] = None
33 def get_both_items(self, k):
34 return (self.serialized.setdefault(k, None),
35 super(CachingDict, self).__getitem__(k))
37 def set_both_items(self, key, serialized, t):
38 self.serialized[key] = serialized
39 super(CachingDict, self).__setitem__(key, t)
42 def __init__(self, node, name, must_exist=True):
45 self.must_exist = True
46 def modify(self, old_contents, servermap, first_time):
47 children = self.node._unpack_contents(old_contents)
48 if self.name not in children:
49 if first_time and self.must_exist:
50 raise NoSuchChildError(self.name)
53 self.old_child, metadata = children[self.name]
54 del children[self.name]
55 new_contents = self.node._pack_contents(children)
59 def __init__(self, node, name, metadata):
62 self.metadata = metadata
64 def modify(self, old_contents, servermap, first_time):
65 children = self.node._unpack_contents(old_contents)
66 if self.name not in children:
67 raise NoSuchChildError(self.name)
68 children[self.name] = (children[self.name][0], self.metadata)
69 new_contents = self.node._pack_contents(children)
74 def __init__(self, node, entries=None, overwrite=True):
78 self.entries = entries
79 self.overwrite = overwrite
81 def set_node(self, name, node, metadata):
82 precondition(isinstance(name, unicode), name)
83 precondition(IFilesystemNode.providedBy(node), node)
84 self.entries.append( [name, node, metadata] )
86 def modify(self, old_contents, servermap, first_time):
87 children = self.node._unpack_contents(old_contents)
89 for e in self.entries:
95 name, child, new_metadata = e
96 assert _assert(IFilesystemNode.providedBy(child), child)
97 assert isinstance(name, unicode)
99 if not self.overwrite:
100 raise ExistingChildError("child '%s' already exists" % name)
102 if self.overwrite == "only-files" and IDirectoryNode.providedBy(children[name][0]):
103 raise ExistingChildError("child '%s' already exists" % name)
104 metadata = children[name][1].copy()
106 metadata = {"ctime": now,
114 if new_metadata is not None:
115 # Overwrite all metadata.
116 newmd = new_metadata.copy()
119 if newmd.has_key('tahoe'):
121 if metadata.has_key('tahoe'):
122 newmd['tahoe'] = metadata['tahoe']
126 # For backwards compatibility with Tahoe < 1.4.0:
127 if "ctime" not in metadata:
128 metadata["ctime"] = now
129 metadata["mtime"] = now
132 sysmd = metadata.get('tahoe', {})
133 if not 'linkcrtime' in sysmd:
134 if "ctime" in metadata:
135 # In Tahoe < 1.4.0 we used the word "ctime" to mean what Tahoe >= 1.4.0
136 # calls "linkcrtime".
137 sysmd["linkcrtime"] = metadata["ctime"]
139 sysmd["linkcrtime"] = now
140 sysmd["linkmotime"] = now
142 children[name] = (child, metadata)
143 new_contents = self.node._pack_contents(children)
147 implements(IDirectoryNode, ICheckable, IDeepCheckable)
148 filenode_class = MutableFileNode
150 def __init__(self, client):
151 self._client = client
152 self._most_recent_size = None
155 return "<%s %s %s>" % (self.__class__.__name__, self.is_readonly() and "RO" or "RW", hasattr(self, '_uri') and self._uri.abbrev())
156 def init_from_uri(self, myuri):
157 self._uri = IURI(myuri)
158 self._node = self.filenode_class(self._client)
159 self._node.init_from_uri(self._uri.get_filenode_uri())
163 def create_with_mutablefile(cls, filenode, client):
165 self._node = filenode
166 return self._filenode_created(filenode)
168 def create(self, keypair_generator=None, keysize=None):
170 Returns a deferred that eventually fires with self once the directory
171 has been created (distributed across a set of storage servers).
173 # first we create a MutableFileNode with empty_contents, then use its
174 # URI to create our own.
175 self._node = self.filenode_class(self._client)
176 empty_contents = self._pack_contents(CachingDict())
177 d = self._node.create(empty_contents, keypair_generator, keysize=keysize)
178 d.addCallback(self._filenode_created)
180 def _filenode_created(self, res):
181 self._uri = DirectoryURI(IMutableFileURI(self._node.get_uri()))
185 # return the size of our backing mutable file, in bytes, if we've
187 return self._most_recent_size
189 def _set_size(self, data):
190 self._most_recent_size = len(data)
194 d = self._node.download_best_version()
195 d.addCallback(self._set_size)
196 d.addCallback(self._unpack_contents)
199 def _encrypt_rwcap(self, rwcap):
200 assert isinstance(rwcap, str)
201 salt = hashutil.mutable_rwcap_salt_hash(rwcap)
202 key = hashutil.mutable_rwcap_key_hash(salt, self._node.get_writekey())
204 crypttext = cryptor.process(rwcap)
205 mac = hashutil.hmac(key, salt + crypttext)
206 assert len(mac) == 32
207 return salt + crypttext + mac
208 # The MAC is not checked by readers in Tahoe >= 1.3.0, but we still
209 # produce it for the sake of older readers.
211 def _decrypt_rwcapdata(self, encwrcap):
213 crypttext = encwrcap[16:-32]
214 key = hashutil.mutable_rwcap_key_hash(salt, self._node.get_writekey())
216 plaintext = cryptor.process(crypttext)
219 def _create_node(self, rwcap, rocap):
220 return self._client.create_node_from_uri(rwcap, rocap)
222 def _unpack_contents(self, data):
223 # the directory is serialized as a list of netstrings, one per child.
224 # Each child is serialized as a list of four netstrings: (name,
225 # rocap, rwcap, metadata), in which the name,rocap,metadata are in
226 # cleartext. The 'name' is UTF-8 encoded. The rwcap is formatted as:
227 # pack("16ss32s", iv, AES(H(writekey+iv), plaintextrwcap), mac)
228 assert isinstance(data, str), (repr(data), type(data))
229 # an empty directory is serialized as an empty string
232 writeable = not self.is_readonly()
233 children = CachingDict()
235 while position < len(data):
236 entries, position = split_netstring(data, 1, position)
238 (name, rocap, rwcapdata, metadata_s), subpos = split_netstring(entry, 4)
239 name = name.decode("utf-8")
242 rwcap = self._decrypt_rwcapdata(rwcapdata)
244 rwcap = None # rwcap is None or a non-empty string
246 rocap = None # rocap is None or a non-empty string
247 child = self._create_node(rwcap, rocap)
248 metadata = simplejson.loads(metadata_s)
249 assert isinstance(metadata, dict)
250 children.set_both_items(name, entry, (child, metadata))
253 def _pack_contents(self, children):
254 # expects children in the same format as _unpack_contents
255 assert isinstance(children, CachingDict)
257 for name in sorted(children.keys()):
258 entry, metadata = children.get_both_items(name)
260 child, metadata = metadata
261 assert isinstance(name, unicode)
262 assert IFilesystemNode.providedBy(child), (name,child)
263 assert isinstance(metadata, dict)
264 rwcap = child.get_uri() # might be RO if the child is not writeable
267 assert isinstance(rwcap, str), rwcap
268 rocap = child.get_readonly_uri()
271 assert isinstance(rocap, str), rocap
272 entry = "".join([netstring(name.encode("utf-8")),
274 netstring(self._encrypt_rwcap(rwcap)),
275 netstring(simplejson.dumps(metadata))])
276 entries.append(netstring(entry))
277 return "".join(entries)
279 def is_readonly(self):
280 return self._node.is_readonly()
281 def is_mutable(self):
282 return self._node.is_mutable()
285 return self._uri.to_string()
287 def get_readonly_uri(self):
288 return self._uri.get_readonly().to_string()
290 def get_verify_cap(self):
291 return self._uri.get_verify_cap()
293 def get_repair_cap(self):
294 if self._node.is_readonly():
298 def get_storage_index(self):
299 return self._uri._filenode_uri.storage_index
301 def check(self, monitor, verify=False, add_lease=False):
302 """Perform a file check. See IChecker.check for details."""
303 return self._node.check(monitor, verify, add_lease)
304 def check_and_repair(self, monitor, verify=False, add_lease=False):
305 return self._node.check_and_repair(monitor, verify, add_lease)
308 """I return a Deferred that fires with a dictionary mapping child
309 name to a tuple of (IFileNode or IDirectoryNode, metadata)."""
312 def has_child(self, name):
313 """I return a Deferred that fires with a boolean, True if there
314 exists a child of the given name, False if not."""
315 assert isinstance(name, unicode)
317 d.addCallback(lambda children: children.has_key(name))
320 def _get(self, children, name):
321 child = children.get(name)
323 raise NoSuchChildError(name)
326 def _get_with_metadata(self, children, name):
327 child = children.get(name)
329 raise NoSuchChildError(name)
333 """I return a Deferred that fires with the named child node,
334 which is either an IFileNode or an IDirectoryNode."""
335 assert isinstance(name, unicode)
337 d.addCallback(self._get, name)
340 def get_child_and_metadata(self, name):
341 """I return a Deferred that fires with the (node, metadata) pair for
342 the named child. The node is either an IFileNode or an
343 IDirectoryNode, and the metadata is a dictionary."""
344 assert isinstance(name, unicode)
346 d.addCallback(self._get_with_metadata, name)
349 def get_metadata_for(self, name):
350 assert isinstance(name, unicode)
352 d.addCallback(lambda children: children[name][1])
355 def set_metadata_for(self, name, metadata):
356 assert isinstance(name, unicode)
357 if self.is_readonly():
358 return defer.fail(NotMutableError())
359 assert isinstance(metadata, dict)
360 s = MetadataSetter(self, name, metadata)
361 d = self._node.modify(s.modify)
362 d.addCallback(lambda res: self)
365 def get_child_at_path(self, path):
366 """Transform a child path into an IDirectoryNode or IFileNode.
368 I perform a recursive series of 'get' operations to find the named
369 descendant node. I return a Deferred that fires with the node, or
370 errbacks with IndexError if the node could not be found.
372 The path can be either a single string (slash-separated) or a list of
375 d = self.get_child_and_metadata_at_path(path)
376 d.addCallback(lambda (node, metadata): node)
379 def get_child_and_metadata_at_path(self, path):
380 """Transform a child path into an IDirectoryNode or IFileNode and
381 a metadata dictionary from the last edge that was traversed.
385 return defer.succeed((self, {}))
386 if isinstance(path, (list, tuple)):
389 path = path.split("/")
391 assert isinstance(p, unicode)
393 remaining_path = path[1:]
395 d = self.get(childname)
396 d.addCallback(lambda node:
397 node.get_child_and_metadata_at_path(remaining_path))
399 d = self.get_child_and_metadata(childname)
402 def set_uri(self, name, child_uri, metadata=None, overwrite=True):
403 """I add a child (by URI) at the specific name. I return a Deferred
404 that fires with the child node when the operation finishes. I will
405 replace any existing child of the same name.
407 The child_uri could be for a file, or for a directory (either
408 read-write or read-only, using a URI that came from get_uri() ).
410 If this directory node is read-only, the Deferred will errback with a
412 precondition(isinstance(name, unicode), name)
413 precondition(isinstance(child_uri, str), child_uri)
414 child_node = self._create_node(child_uri, None)
415 if isinstance(child_node, UnknownNode):
416 # don't be willing to pack unknown nodes: we might accidentally
417 # put some write-authority into the rocap slot because we don't
418 # know how to diminish the URI they gave us. We don't even know
419 # if they gave us a readcap or a writecap.
420 msg = "cannot pack unknown node as child %s" % str(name)
421 raise CannotPackUnknownNodeError(msg)
422 d = self.set_node(name, child_node, metadata, overwrite)
423 d.addCallback(lambda res: child_node)
426 def set_children(self, entries, overwrite=True):
428 a = Adder(self, overwrite=overwrite)
436 name, child_uri, metadata = e
437 assert isinstance(name, unicode)
438 child_node = self._create_node(child_uri, None)
439 if isinstance(child_node, UnknownNode):
440 msg = "cannot pack unknown node as child %s" % str(name)
441 raise CannotPackUnknownNodeError(msg)
442 a.set_node(name, child_node, metadata)
443 return self._node.modify(a.modify)
445 def set_node(self, name, child, metadata=None, overwrite=True):
446 """I add a child at the specific name. I return a Deferred that fires
447 when the operation finishes. This Deferred will fire with the child
448 node that was just added. I will replace any existing child of the
451 If this directory node is read-only, the Deferred will errback with a
454 precondition(IFilesystemNode.providedBy(child), child)
456 if self.is_readonly():
457 return defer.fail(NotMutableError())
458 assert isinstance(name, unicode)
459 assert IFilesystemNode.providedBy(child), child
460 a = Adder(self, overwrite=overwrite)
461 a.set_node(name, child, metadata)
462 d = self._node.modify(a.modify)
463 d.addCallback(lambda res: child)
466 def set_nodes(self, entries, overwrite=True):
467 if self.is_readonly():
468 return defer.fail(NotMutableError())
469 a = Adder(self, entries, overwrite=overwrite)
470 d = self._node.modify(a.modify)
471 d.addCallback(lambda res: None)
475 def add_file(self, name, uploadable, metadata=None, overwrite=True):
476 """I upload a file (using the given IUploadable), then attach the
477 resulting FileNode to the directory at the given name. I return a
478 Deferred that fires (with the IFileNode of the uploaded file) when
479 the operation completes."""
480 assert isinstance(name, unicode)
481 if self.is_readonly():
482 return defer.fail(NotMutableError())
483 d = self._client.upload(uploadable)
484 d.addCallback(lambda results: results.uri)
485 d.addCallback(self._client.create_node_from_uri)
486 d.addCallback(lambda node:
487 self.set_node(name, node, metadata, overwrite))
490 def delete(self, name):
491 """I remove the child at the specific name. I return a Deferred that
492 fires (with the node just removed) when the operation finishes."""
493 assert isinstance(name, unicode)
494 if self.is_readonly():
495 return defer.fail(NotMutableError())
496 deleter = Deleter(self, name)
497 d = self._node.modify(deleter.modify)
498 d.addCallback(lambda res: deleter.old_child)
501 def create_empty_directory(self, name, overwrite=True):
502 """I create and attach an empty directory at the given name. I return
503 a Deferred that fires (with the new directory node) when the
504 operation finishes."""
505 assert isinstance(name, unicode)
506 if self.is_readonly():
507 return defer.fail(NotMutableError())
508 d = self._client.create_empty_dirnode()
510 entries = [(name, child, None)]
511 a = Adder(self, entries, overwrite=overwrite)
512 d = self._node.modify(a.modify)
513 d.addCallback(lambda res: child)
515 d.addCallback(_created)
518 def move_child_to(self, current_child_name, new_parent,
519 new_child_name=None, overwrite=True):
520 """I take one of my children and move them to a new parent. The child
521 is referenced by name. On the new parent, the child will live under
522 'new_child_name', which defaults to 'current_child_name'. I return a
523 Deferred that fires when the operation finishes."""
524 assert isinstance(current_child_name, unicode)
525 if self.is_readonly() or new_parent.is_readonly():
526 return defer.fail(NotMutableError())
527 if new_child_name is None:
528 new_child_name = current_child_name
529 assert isinstance(new_child_name, unicode)
530 d = self.get(current_child_name)
532 return new_parent.set_node(new_child_name, child,
535 d.addCallback(lambda child: self.delete(current_child_name))
539 def deep_traverse(self, walker):
540 """Perform a recursive walk, using this dirnode as a root, notifying
541 the 'walker' instance of everything I encounter.
543 I call walker.enter_directory(parent, children) once for each dirnode
544 I visit, immediately after retrieving the list of children. I pass in
545 the parent dirnode and the dict of childname->(childnode,metadata).
546 This function should *not* traverse the children: I will do that.
547 enter_directory() is most useful for the deep-stats number that
548 counts how large a directory is.
550 I call walker.add_node(node, path) for each node (both files and
551 directories) I can reach. Most work should be done here.
553 I avoid loops by keeping track of verifier-caps and refusing to call
554 walker.add_node() or traverse a node that I've seen before. This
555 means that any file or directory will only be given to the walker
556 once. If files or directories are referenced multiple times by a
557 directory structure, this may appear to under-count or miss some of
560 I return a Monitor which can be used to wait for the operation to
561 finish, learn about its progress, or cancel the operation.
564 # this is just a tree-walker, except that following each edge
565 # requires a Deferred. We used to use a ConcurrencyLimiter to limit
566 # fanout to 10 simultaneous operations, but the memory load of the
567 # queued operations was excessive (in one case, with 330k dirnodes,
568 # it caused the process to run into the 3.0GB-ish per-process 32bit
569 # linux memory limit, and crashed). So we use a single big Deferred
570 # chain, and do a strict depth-first traversal, one node at a time.
571 # This can be slower, because we aren't pipelining directory reads,
572 # but it brought the memory footprint down by roughly 50%.
575 walker.set_monitor(monitor)
577 found = set([self.get_verify_cap()])
578 d = self._deep_traverse_dirnode(self, [], walker, monitor, found)
579 d.addCallback(lambda ignored: walker.finish())
580 d.addBoth(monitor.finish)
581 d.addErrback(lambda f: None)
585 def _deep_traverse_dirnode(self, node, path, walker, monitor, found):
586 # process this directory, then walk its children
587 monitor.raise_if_cancelled()
588 d = defer.maybeDeferred(walker.add_node, node, path)
589 d.addCallback(lambda ignored: node.list())
590 d.addCallback(self._deep_traverse_dirnode_children, node, path,
591 walker, monitor, found)
594 def _deep_traverse_dirnode_children(self, children, parent, path,
595 walker, monitor, found):
596 monitor.raise_if_cancelled()
597 d = defer.maybeDeferred(walker.enter_directory, parent, children)
598 # we process file-like children first, so we can drop their FileNode
599 # objects as quickly as possible. Tests suggest that a FileNode (held
600 # in the client's nodecache) consumes about 2440 bytes. dirnodes (not
601 # in the nodecache) seem to consume about 2000 bytes.
604 for name, (child, metadata) in sorted(children.iteritems()):
605 childpath = path + [name]
606 if isinstance(child, UnknownNode):
607 walker.add_node(child, childpath)
609 verifier = child.get_verify_cap()
610 # allow LIT files (for which verifier==None) to be processed
611 if (verifier is not None) and (verifier in found):
614 if IDirectoryNode.providedBy(child):
615 dirkids.append( (child, childpath) )
617 filekids.append( (child, childpath) )
618 for i, (child, childpath) in enumerate(filekids):
619 d.addCallback(lambda ignored, child=child, childpath=childpath:
620 walker.add_node(child, childpath))
621 # to work around the Deferred tail-recursion problem
622 # (specifically the defer.succeed flavor) requires us to avoid
623 # doing more than 158 LIT files in a row. We insert a turn break
624 # once every 100 files (LIT or CHK) to preserve some stack space
625 # for other code. This is a different expression of the same
626 # Twisted problem as in #237.
628 d.addCallback(lambda ignored: fireEventually())
629 for (child, childpath) in dirkids:
630 d.addCallback(lambda ignored, child=child, childpath=childpath:
631 self._deep_traverse_dirnode(child, childpath,
637 def build_manifest(self):
638 """Return a Monitor, with a ['status'] that will be a list of (path,
639 cap) tuples, for all nodes (directories and files) reachable from
641 walker = ManifestWalker(self)
642 return self.deep_traverse(walker)
644 def start_deep_stats(self):
645 # Since deep_traverse tracks verifier caps, we avoid double-counting
646 # children for which we've got both a write-cap and a read-cap
647 return self.deep_traverse(DeepStats(self))
649 def start_deep_check(self, verify=False, add_lease=False):
650 return self.deep_traverse(DeepChecker(self, verify, repair=False, add_lease=add_lease))
652 def start_deep_check_and_repair(self, verify=False, add_lease=False):
653 return self.deep_traverse(DeepChecker(self, verify, repair=True, add_lease=add_lease))
658 def __init__(self, origin):
661 for k in ["count-immutable-files",
662 "count-mutable-files",
663 "count-literal-files",
667 "size-immutable-files",
668 #"size-mutable-files",
669 "size-literal-files",
672 "largest-directory-children",
673 "largest-immutable-file",
674 #"largest-mutable-file",
678 for k in ["size-files-histogram"]:
679 self.histograms[k] = {} # maps (min,max) to count
680 self.buckets = [ (0,0), (1,3)]
681 self.root = math.sqrt(10)
683 def set_monitor(self, monitor):
684 self.monitor = monitor
685 monitor.origin_si = self.origin.get_storage_index()
686 monitor.set_status(self.get_results())
688 def add_node(self, node, childpath):
689 if isinstance(node, UnknownNode):
690 self.add("count-unknown")
691 elif IDirectoryNode.providedBy(node):
692 self.add("count-directories")
693 elif IMutableFileNode.providedBy(node):
694 self.add("count-files")
695 self.add("count-mutable-files")
696 # TODO: update the servermap, compute a size, add it to
697 # size-mutable-files, max it into "largest-mutable-file"
698 elif IFileNode.providedBy(node): # CHK and LIT
699 self.add("count-files")
700 size = node.get_size()
701 self.histogram("size-files-histogram", size)
702 theuri = from_string(node.get_uri())
703 if isinstance(theuri, LiteralFileURI):
704 self.add("count-literal-files")
705 self.add("size-literal-files", size)
707 self.add("count-immutable-files")
708 self.add("size-immutable-files", size)
709 self.max("largest-immutable-file", size)
711 def enter_directory(self, parent, children):
712 dirsize_bytes = parent.get_size()
713 dirsize_children = len(children)
714 self.add("size-directories", dirsize_bytes)
715 self.max("largest-directory", dirsize_bytes)
716 self.max("largest-directory-children", dirsize_children)
718 def add(self, key, value=1):
719 self.stats[key] += value
721 def max(self, key, value):
722 self.stats[key] = max(self.stats[key], value)
724 def which_bucket(self, size):
725 # return (min,max) such that min <= size <= max
726 # values are from the set (0,0), (1,3), (4,10), (11,31), (32,100),
727 # (101,316), (317, 1000), etc: two per decade
731 if i >= len(self.buckets):
733 new_lower = self.buckets[i-1][1]+1
734 new_upper = int(mathutil.next_power_of_k(new_lower, self.root))
735 self.buckets.append( (new_lower, new_upper) )
736 maybe = self.buckets[i]
737 if maybe[0] <= size <= maybe[1]:
741 def histogram(self, key, size):
742 bucket = self.which_bucket(size)
743 h = self.histograms[key]
748 def get_results(self):
749 stats = self.stats.copy()
750 for key in self.histograms:
751 h = self.histograms[key]
752 out = [ (bucket[0], bucket[1], h[bucket]) for bucket in h ]
758 return self.get_results()
760 class ManifestWalker(DeepStats):
761 def __init__(self, origin):
762 DeepStats.__init__(self, origin)
764 self.storage_index_strings = set()
765 self.verifycaps = set()
767 def add_node(self, node, path):
768 self.manifest.append( (tuple(path), node.get_uri()) )
769 si = node.get_storage_index()
771 self.storage_index_strings.add(base32.b2a(si))
772 v = node.get_verify_cap()
774 self.verifycaps.add(v.to_string())
775 return DeepStats.add_node(self, node, path)
777 def get_results(self):
778 stats = DeepStats.get_results(self)
779 return {"manifest": self.manifest,
780 "verifycaps": self.verifycaps,
781 "storage-index": self.storage_index_strings,
787 def __init__(self, root, verify, repair, add_lease):
788 root_si = root.get_storage_index()
789 self._lp = log.msg(format="deep-check starting (%(si)s),"
790 " verify=%(verify)s, repair=%(repair)s",
791 si=base32.b2a(root_si), verify=verify, repair=repair)
792 self._verify = verify
793 self._repair = repair
794 self._add_lease = add_lease
796 self._results = DeepCheckAndRepairResults(root_si)
798 self._results = DeepCheckResults(root_si)
799 self._stats = DeepStats(root)
801 def set_monitor(self, monitor):
802 self.monitor = monitor
803 monitor.set_status(self._results)
805 def add_node(self, node, childpath):
807 d = node.check_and_repair(self.monitor, self._verify, self._add_lease)
808 d.addCallback(self._results.add_check_and_repair, childpath)
810 d = node.check(self.monitor, self._verify, self._add_lease)
811 d.addCallback(self._results.add_check, childpath)
812 d.addCallback(lambda ignored: self._stats.add_node(node, childpath))
815 def enter_directory(self, parent, children):
816 return self._stats.enter_directory(parent, children)
819 log.msg("deep-check done", parent=self._lp)
820 self._results.update_stats(self._stats.get_results())
824 # use client.create_dirnode() to make one of these