4 from zope.interface import implements
5 from twisted.internet import defer
6 from foolscap.api import fireEventually
8 from allmydata.mutable.common import NotMutableError
9 from allmydata.mutable.filenode import MutableFileNode
10 from allmydata.unknown import UnknownNode
11 from allmydata.interfaces import IMutableFileNode, IDirectoryNode,\
12 IFileNode, IMutableFileURI, IFilesystemNode, \
13 ExistingChildError, NoSuchChildError, ICheckable, IDeepCheckable, \
14 CannotPackUnknownNodeError
15 from allmydata.check_results import DeepCheckResults, \
16 DeepCheckAndRepairResults
17 from allmydata.monitor import Monitor
18 from allmydata.util import hashutil, mathutil, base32, log
19 from allmydata.util.assertutil import precondition
20 from allmydata.util.netstring import netstring, split_netstring
21 from allmydata.uri import DirectoryURI, ReadonlyDirectoryURI, \
22 LiteralFileURI, from_string
23 from pycryptopp.cipher.aes import AES
24 from allmydata.util.dictutil import AuxValueDict
27 def __init__(self, node, name, must_exist=True):
30 self.must_exist = True
31 def modify(self, old_contents, servermap, first_time):
32 children = self.node._unpack_contents(old_contents)
33 if self.name not in children:
34 if first_time and self.must_exist:
35 raise NoSuchChildError(self.name)
38 self.old_child, metadata = children[self.name]
39 del children[self.name]
40 new_contents = self.node._pack_contents(children)
44 def __init__(self, node, name, metadata):
47 self.metadata = metadata
49 def modify(self, old_contents, servermap, first_time):
50 children = self.node._unpack_contents(old_contents)
51 if self.name not in children:
52 raise NoSuchChildError(self.name)
53 children[self.name] = (children[self.name][0], self.metadata)
54 new_contents = self.node._pack_contents(children)
59 def __init__(self, node, entries=None, overwrite=True):
63 precondition(isinstance(entries, dict), entries)
64 self.entries = entries
65 self.overwrite = overwrite
67 def set_node(self, name, node, metadata):
68 precondition(isinstance(name, unicode), name)
69 precondition(IFilesystemNode.providedBy(node), node)
70 self.entries[name] = (node, metadata)
72 def modify(self, old_contents, servermap, first_time):
73 children = self.node._unpack_contents(old_contents)
75 for (name, (child, new_metadata)) in self.entries.iteritems():
76 precondition(isinstance(name, unicode), name)
77 precondition(IFilesystemNode.providedBy(child), child)
79 if not self.overwrite:
80 raise ExistingChildError("child '%s' already exists" % name)
82 if self.overwrite == "only-files" and IDirectoryNode.providedBy(children[name][0]):
83 raise ExistingChildError("child '%s' already exists" % name)
84 metadata = children[name][1].copy()
86 metadata = {"ctime": now,
94 if new_metadata is not None:
95 # Overwrite all metadata.
96 newmd = new_metadata.copy()
99 if newmd.has_key('tahoe'):
101 if metadata.has_key('tahoe'):
102 newmd['tahoe'] = metadata['tahoe']
106 # For backwards compatibility with Tahoe < 1.4.0:
107 if "ctime" not in metadata:
108 metadata["ctime"] = now
109 metadata["mtime"] = now
112 sysmd = metadata.get('tahoe', {})
113 if not 'linkcrtime' in sysmd:
114 if "ctime" in metadata:
115 # In Tahoe < 1.4.0 we used the word "ctime" to mean what Tahoe >= 1.4.0
116 # calls "linkcrtime".
117 sysmd["linkcrtime"] = metadata["ctime"]
119 sysmd["linkcrtime"] = now
120 sysmd["linkmotime"] = now
122 children[name] = (child, metadata)
123 new_contents = self.node._pack_contents(children)
126 def _encrypt_rwcap(filenode, rwcap):
127 assert isinstance(rwcap, str)
128 salt = hashutil.mutable_rwcap_salt_hash(rwcap)
129 key = hashutil.mutable_rwcap_key_hash(salt, filenode.get_writekey())
131 crypttext = cryptor.process(rwcap)
132 mac = hashutil.hmac(key, salt + crypttext)
133 assert len(mac) == 32
134 return salt + crypttext + mac
135 # The MAC is not checked by readers in Tahoe >= 1.3.0, but we still
136 # produce it for the sake of older readers.
138 class MustBeDeepImmutable(Exception):
139 """You tried to add a non-deep-immutable node to a deep-immutable
142 def pack_children(filenode, children, deep_immutable=False):
143 """Take a dict that maps:
144 children[unicode_name] = (IFileSystemNode, metadata_dict)
145 and pack it into a single string, for use as the contents of the backing
146 file. This is the same format as is returned by _unpack_contents. I also
147 accept an AuxValueDict, in which case I'll use the auxilliary cached data
148 as the pre-packed entry, which is faster than re-packing everything each
151 If deep_immutable is True, I will require that all my children are deeply
152 immutable, and will raise a MustBeDeepImmutable exception if not.
155 has_aux = isinstance(children, AuxValueDict)
157 for name in sorted(children.keys()):
158 assert isinstance(name, unicode)
160 (child, metadata) = children[name]
161 if deep_immutable and child.is_mutable():
162 # TODO: consider adding IFileSystemNode.is_deep_immutable()
163 raise MustBeDeepImmutable("child '%s' is mutable" % (name,))
165 entry = children.get_aux(name)
167 assert IFilesystemNode.providedBy(child), (name,child)
168 assert isinstance(metadata, dict)
169 rwcap = child.get_uri() # might be RO if the child is not writeable
172 assert isinstance(rwcap, str), rwcap
173 rocap = child.get_readonly_uri()
176 assert isinstance(rocap, str), rocap
177 entry = "".join([netstring(name.encode("utf-8")),
179 netstring(_encrypt_rwcap(filenode, rwcap)),
180 netstring(simplejson.dumps(metadata))])
181 entries.append(netstring(entry))
182 return "".join(entries)
185 implements(IDirectoryNode, ICheckable, IDeepCheckable)
186 filenode_class = MutableFileNode
188 def __init__(self, filenode, nodemaker, uploader):
189 self._node = filenode
190 filenode_uri = IMutableFileURI(filenode.get_uri())
191 if filenode_uri.is_readonly():
192 self._uri = ReadonlyDirectoryURI(filenode_uri)
194 self._uri = DirectoryURI(filenode_uri)
195 self._nodemaker = nodemaker
196 self._uploader = uploader
197 self._most_recent_size = None
200 return "<%s %s %s>" % (self.__class__.__name__, self.is_readonly() and "RO" or "RW", hasattr(self, '_uri') and self._uri.abbrev())
203 # return the size of our backing mutable file, in bytes, if we've
205 return self._most_recent_size
207 def _set_size(self, data):
208 self._most_recent_size = len(data)
212 d = self._node.download_best_version()
213 d.addCallback(self._set_size)
214 d.addCallback(self._unpack_contents)
217 def _decrypt_rwcapdata(self, encwrcap):
219 crypttext = encwrcap[16:-32]
220 key = hashutil.mutable_rwcap_key_hash(salt, self._node.get_writekey())
222 plaintext = cryptor.process(crypttext)
225 def _create_node(self, rwcap, rocap):
226 return self._nodemaker.create_from_cap(rwcap, rocap)
228 def _unpack_contents(self, data):
229 # the directory is serialized as a list of netstrings, one per child.
230 # Each child is serialized as a list of four netstrings: (name,
231 # rocap, rwcap, metadata), in which the name,rocap,metadata are in
232 # cleartext. The 'name' is UTF-8 encoded. The rwcap is formatted as:
233 # pack("16ss32s", iv, AES(H(writekey+iv), plaintextrwcap), mac)
234 assert isinstance(data, str), (repr(data), type(data))
235 # an empty directory is serialized as an empty string
237 return AuxValueDict()
238 writeable = not self.is_readonly()
239 children = AuxValueDict()
241 while position < len(data):
242 entries, position = split_netstring(data, 1, position)
244 (name, rocap, rwcapdata, metadata_s), subpos = split_netstring(entry, 4)
245 name = name.decode("utf-8")
248 rwcap = self._decrypt_rwcapdata(rwcapdata)
250 rwcap = None # rwcap is None or a non-empty string
252 rocap = None # rocap is None or a non-empty string
253 child = self._create_node(rwcap, rocap)
254 metadata = simplejson.loads(metadata_s)
255 assert isinstance(metadata, dict)
256 children.set_with_aux(name, (child, metadata), auxilliary=entry)
259 def _pack_contents(self, children):
260 # expects children in the same format as _unpack_contents
261 return pack_children(self._node, children)
263 def is_readonly(self):
264 return self._node.is_readonly()
265 def is_mutable(self):
266 return self._node.is_mutable()
269 return self._uri.to_string()
271 def get_readonly_uri(self):
272 return self._uri.get_readonly().to_string()
274 def get_verify_cap(self):
275 return self._uri.get_verify_cap()
277 def get_repair_cap(self):
278 if self._node.is_readonly():
282 def get_storage_index(self):
283 return self._uri._filenode_uri.storage_index
285 def check(self, monitor, verify=False, add_lease=False):
286 """Perform a file check. See IChecker.check for details."""
287 return self._node.check(monitor, verify, add_lease)
288 def check_and_repair(self, monitor, verify=False, add_lease=False):
289 return self._node.check_and_repair(monitor, verify, add_lease)
292 """I return a Deferred that fires with a dictionary mapping child
293 name to a tuple of (IFileNode or IDirectoryNode, metadata)."""
296 def has_child(self, name):
297 """I return a Deferred that fires with a boolean, True if there
298 exists a child of the given name, False if not."""
299 assert isinstance(name, unicode)
301 d.addCallback(lambda children: children.has_key(name))
304 def _get(self, children, name):
305 child = children.get(name)
307 raise NoSuchChildError(name)
310 def _get_with_metadata(self, children, name):
311 child = children.get(name)
313 raise NoSuchChildError(name)
317 """I return a Deferred that fires with the named child node,
318 which is either an IFileNode or an IDirectoryNode."""
319 assert isinstance(name, unicode)
321 d.addCallback(self._get, name)
324 def get_child_and_metadata(self, name):
325 """I return a Deferred that fires with the (node, metadata) pair for
326 the named child. The node is either an IFileNode or an
327 IDirectoryNode, and the metadata is a dictionary."""
328 assert isinstance(name, unicode)
330 d.addCallback(self._get_with_metadata, name)
333 def get_metadata_for(self, name):
334 assert isinstance(name, unicode)
336 d.addCallback(lambda children: children[name][1])
339 def set_metadata_for(self, name, metadata):
340 assert isinstance(name, unicode)
341 if self.is_readonly():
342 return defer.fail(NotMutableError())
343 assert isinstance(metadata, dict)
344 s = MetadataSetter(self, name, metadata)
345 d = self._node.modify(s.modify)
346 d.addCallback(lambda res: self)
349 def get_child_at_path(self, path):
350 """Transform a child path into an IDirectoryNode or IFileNode.
352 I perform a recursive series of 'get' operations to find the named
353 descendant node. I return a Deferred that fires with the node, or
354 errbacks with IndexError if the node could not be found.
356 The path can be either a single string (slash-separated) or a list of
359 d = self.get_child_and_metadata_at_path(path)
360 d.addCallback(lambda (node, metadata): node)
363 def get_child_and_metadata_at_path(self, path):
364 """Transform a child path into an IDirectoryNode or IFileNode and
365 a metadata dictionary from the last edge that was traversed.
369 return defer.succeed((self, {}))
370 if isinstance(path, (list, tuple)):
373 path = path.split("/")
375 assert isinstance(p, unicode)
377 remaining_path = path[1:]
379 d = self.get(childname)
380 d.addCallback(lambda node:
381 node.get_child_and_metadata_at_path(remaining_path))
383 d = self.get_child_and_metadata(childname)
386 def set_uri(self, name, writecap, readcap, metadata=None, overwrite=True):
387 precondition(isinstance(name, unicode), name)
388 precondition(isinstance(writecap, (str,type(None))), writecap)
389 precondition(isinstance(readcap, (str,type(None))), readcap)
390 child_node = self._create_node(writecap, readcap)
391 if isinstance(child_node, UnknownNode):
392 # don't be willing to pack unknown nodes: we might accidentally
393 # put some write-authority into the rocap slot because we don't
394 # know how to diminish the URI they gave us. We don't even know
395 # if they gave us a readcap or a writecap.
396 msg = "cannot pack unknown node as child %s" % str(name)
397 raise CannotPackUnknownNodeError(msg)
398 d = self.set_node(name, child_node, metadata, overwrite)
399 d.addCallback(lambda res: child_node)
402 def set_children(self, entries, overwrite=True):
404 a = Adder(self, overwrite=overwrite)
406 for (name, e) in entries.iteritems():
407 assert isinstance(name, unicode)
409 writecap, readcap = e
413 writecap, readcap, metadata = e
414 precondition(isinstance(writecap, (str,type(None))), writecap)
415 precondition(isinstance(readcap, (str,type(None))), readcap)
416 child_node = self._create_node(writecap, readcap)
417 if isinstance(child_node, UnknownNode):
418 msg = "cannot pack unknown node as child %s" % str(name)
419 raise CannotPackUnknownNodeError(msg)
420 a.set_node(name, child_node, metadata)
421 d = self._node.modify(a.modify)
422 d.addCallback(lambda ign: self)
425 def set_node(self, name, child, metadata=None, overwrite=True):
426 """I add a child at the specific name. I return a Deferred that fires
427 when the operation finishes. This Deferred will fire with the child
428 node that was just added. I will replace any existing child of the
431 If this directory node is read-only, the Deferred will errback with a
434 precondition(IFilesystemNode.providedBy(child), child)
436 if self.is_readonly():
437 return defer.fail(NotMutableError())
438 assert isinstance(name, unicode)
439 assert IFilesystemNode.providedBy(child), child
440 a = Adder(self, overwrite=overwrite)
441 a.set_node(name, child, metadata)
442 d = self._node.modify(a.modify)
443 d.addCallback(lambda res: child)
446 def set_nodes(self, entries, overwrite=True):
447 precondition(isinstance(entries, dict), entries)
448 if self.is_readonly():
449 return defer.fail(NotMutableError())
450 a = Adder(self, entries, overwrite=overwrite)
451 d = self._node.modify(a.modify)
452 d.addCallback(lambda res: self)
456 def add_file(self, name, uploadable, metadata=None, overwrite=True):
457 """I upload a file (using the given IUploadable), then attach the
458 resulting FileNode to the directory at the given name. I return a
459 Deferred that fires (with the IFileNode of the uploaded file) when
460 the operation completes."""
461 assert isinstance(name, unicode)
462 if self.is_readonly():
463 return defer.fail(NotMutableError())
464 d = self._uploader.upload(uploadable)
465 d.addCallback(lambda results: results.uri)
466 d.addCallback(self._nodemaker.create_from_cap)
467 d.addCallback(lambda node:
468 self.set_node(name, node, metadata, overwrite))
471 def delete(self, name):
472 """I remove the child at the specific name. I return a Deferred that
473 fires (with the node just removed) when the operation finishes."""
474 assert isinstance(name, unicode)
475 if self.is_readonly():
476 return defer.fail(NotMutableError())
477 deleter = Deleter(self, name)
478 d = self._node.modify(deleter.modify)
479 d.addCallback(lambda res: deleter.old_child)
482 def create_subdirectory(self, name, initial_children={}, overwrite=True):
483 assert isinstance(name, unicode)
484 if self.is_readonly():
485 return defer.fail(NotMutableError())
486 d = self._nodemaker.create_new_mutable_directory(initial_children)
488 entries = {name: (child, None)}
489 a = Adder(self, entries, overwrite=overwrite)
490 d = self._node.modify(a.modify)
491 d.addCallback(lambda res: child)
493 d.addCallback(_created)
496 def move_child_to(self, current_child_name, new_parent,
497 new_child_name=None, overwrite=True):
498 """I take one of my children and move them to a new parent. The child
499 is referenced by name. On the new parent, the child will live under
500 'new_child_name', which defaults to 'current_child_name'. I return a
501 Deferred that fires when the operation finishes."""
502 assert isinstance(current_child_name, unicode)
503 if self.is_readonly() or new_parent.is_readonly():
504 return defer.fail(NotMutableError())
505 if new_child_name is None:
506 new_child_name = current_child_name
507 assert isinstance(new_child_name, unicode)
508 d = self.get(current_child_name)
510 return new_parent.set_node(new_child_name, child,
513 d.addCallback(lambda child: self.delete(current_child_name))
517 def deep_traverse(self, walker):
518 """Perform a recursive walk, using this dirnode as a root, notifying
519 the 'walker' instance of everything I encounter.
521 I call walker.enter_directory(parent, children) once for each dirnode
522 I visit, immediately after retrieving the list of children. I pass in
523 the parent dirnode and the dict of childname->(childnode,metadata).
524 This function should *not* traverse the children: I will do that.
525 enter_directory() is most useful for the deep-stats number that
526 counts how large a directory is.
528 I call walker.add_node(node, path) for each node (both files and
529 directories) I can reach. Most work should be done here.
531 I avoid loops by keeping track of verifier-caps and refusing to call
532 walker.add_node() or traverse a node that I've seen before. This
533 means that any file or directory will only be given to the walker
534 once. If files or directories are referenced multiple times by a
535 directory structure, this may appear to under-count or miss some of
538 I return a Monitor which can be used to wait for the operation to
539 finish, learn about its progress, or cancel the operation.
542 # this is just a tree-walker, except that following each edge
543 # requires a Deferred. We used to use a ConcurrencyLimiter to limit
544 # fanout to 10 simultaneous operations, but the memory load of the
545 # queued operations was excessive (in one case, with 330k dirnodes,
546 # it caused the process to run into the 3.0GB-ish per-process 32bit
547 # linux memory limit, and crashed). So we use a single big Deferred
548 # chain, and do a strict depth-first traversal, one node at a time.
549 # This can be slower, because we aren't pipelining directory reads,
550 # but it brought the memory footprint down by roughly 50%.
553 walker.set_monitor(monitor)
555 found = set([self.get_verify_cap()])
556 d = self._deep_traverse_dirnode(self, [], walker, monitor, found)
557 d.addCallback(lambda ignored: walker.finish())
558 d.addBoth(monitor.finish)
559 d.addErrback(lambda f: None)
563 def _deep_traverse_dirnode(self, node, path, walker, monitor, found):
564 # process this directory, then walk its children
565 monitor.raise_if_cancelled()
566 d = defer.maybeDeferred(walker.add_node, node, path)
567 d.addCallback(lambda ignored: node.list())
568 d.addCallback(self._deep_traverse_dirnode_children, node, path,
569 walker, monitor, found)
572 def _deep_traverse_dirnode_children(self, children, parent, path,
573 walker, monitor, found):
574 monitor.raise_if_cancelled()
575 d = defer.maybeDeferred(walker.enter_directory, parent, children)
576 # we process file-like children first, so we can drop their FileNode
577 # objects as quickly as possible. Tests suggest that a FileNode (held
578 # in the client's nodecache) consumes about 2440 bytes. dirnodes (not
579 # in the nodecache) seem to consume about 2000 bytes.
582 for name, (child, metadata) in sorted(children.iteritems()):
583 childpath = path + [name]
584 if isinstance(child, UnknownNode):
585 walker.add_node(child, childpath)
587 verifier = child.get_verify_cap()
588 # allow LIT files (for which verifier==None) to be processed
589 if (verifier is not None) and (verifier in found):
592 if IDirectoryNode.providedBy(child):
593 dirkids.append( (child, childpath) )
595 filekids.append( (child, childpath) )
596 for i, (child, childpath) in enumerate(filekids):
597 d.addCallback(lambda ignored, child=child, childpath=childpath:
598 walker.add_node(child, childpath))
599 # to work around the Deferred tail-recursion problem
600 # (specifically the defer.succeed flavor) requires us to avoid
601 # doing more than 158 LIT files in a row. We insert a turn break
602 # once every 100 files (LIT or CHK) to preserve some stack space
603 # for other code. This is a different expression of the same
604 # Twisted problem as in #237.
606 d.addCallback(lambda ignored: fireEventually())
607 for (child, childpath) in dirkids:
608 d.addCallback(lambda ignored, child=child, childpath=childpath:
609 self._deep_traverse_dirnode(child, childpath,
615 def build_manifest(self):
616 """Return a Monitor, with a ['status'] that will be a list of (path,
617 cap) tuples, for all nodes (directories and files) reachable from
619 walker = ManifestWalker(self)
620 return self.deep_traverse(walker)
622 def start_deep_stats(self):
623 # Since deep_traverse tracks verifier caps, we avoid double-counting
624 # children for which we've got both a write-cap and a read-cap
625 return self.deep_traverse(DeepStats(self))
627 def start_deep_check(self, verify=False, add_lease=False):
628 return self.deep_traverse(DeepChecker(self, verify, repair=False, add_lease=add_lease))
630 def start_deep_check_and_repair(self, verify=False, add_lease=False):
631 return self.deep_traverse(DeepChecker(self, verify, repair=True, add_lease=add_lease))
636 def __init__(self, origin):
639 for k in ["count-immutable-files",
640 "count-mutable-files",
641 "count-literal-files",
645 "size-immutable-files",
646 #"size-mutable-files",
647 "size-literal-files",
650 "largest-directory-children",
651 "largest-immutable-file",
652 #"largest-mutable-file",
656 for k in ["size-files-histogram"]:
657 self.histograms[k] = {} # maps (min,max) to count
658 self.buckets = [ (0,0), (1,3)]
659 self.root = math.sqrt(10)
661 def set_monitor(self, monitor):
662 self.monitor = monitor
663 monitor.origin_si = self.origin.get_storage_index()
664 monitor.set_status(self.get_results())
666 def add_node(self, node, childpath):
667 if isinstance(node, UnknownNode):
668 self.add("count-unknown")
669 elif IDirectoryNode.providedBy(node):
670 self.add("count-directories")
671 elif IMutableFileNode.providedBy(node):
672 self.add("count-files")
673 self.add("count-mutable-files")
674 # TODO: update the servermap, compute a size, add it to
675 # size-mutable-files, max it into "largest-mutable-file"
676 elif IFileNode.providedBy(node): # CHK and LIT
677 self.add("count-files")
678 size = node.get_size()
679 self.histogram("size-files-histogram", size)
680 theuri = from_string(node.get_uri())
681 if isinstance(theuri, LiteralFileURI):
682 self.add("count-literal-files")
683 self.add("size-literal-files", size)
685 self.add("count-immutable-files")
686 self.add("size-immutable-files", size)
687 self.max("largest-immutable-file", size)
689 def enter_directory(self, parent, children):
690 dirsize_bytes = parent.get_size()
691 dirsize_children = len(children)
692 self.add("size-directories", dirsize_bytes)
693 self.max("largest-directory", dirsize_bytes)
694 self.max("largest-directory-children", dirsize_children)
696 def add(self, key, value=1):
697 self.stats[key] += value
699 def max(self, key, value):
700 self.stats[key] = max(self.stats[key], value)
702 def which_bucket(self, size):
703 # return (min,max) such that min <= size <= max
704 # values are from the set (0,0), (1,3), (4,10), (11,31), (32,100),
705 # (101,316), (317, 1000), etc: two per decade
709 if i >= len(self.buckets):
711 new_lower = self.buckets[i-1][1]+1
712 new_upper = int(mathutil.next_power_of_k(new_lower, self.root))
713 self.buckets.append( (new_lower, new_upper) )
714 maybe = self.buckets[i]
715 if maybe[0] <= size <= maybe[1]:
719 def histogram(self, key, size):
720 bucket = self.which_bucket(size)
721 h = self.histograms[key]
726 def get_results(self):
727 stats = self.stats.copy()
728 for key in self.histograms:
729 h = self.histograms[key]
730 out = [ (bucket[0], bucket[1], h[bucket]) for bucket in h ]
736 return self.get_results()
738 class ManifestWalker(DeepStats):
739 def __init__(self, origin):
740 DeepStats.__init__(self, origin)
742 self.storage_index_strings = set()
743 self.verifycaps = set()
745 def add_node(self, node, path):
746 self.manifest.append( (tuple(path), node.get_uri()) )
747 si = node.get_storage_index()
749 self.storage_index_strings.add(base32.b2a(si))
750 v = node.get_verify_cap()
752 self.verifycaps.add(v.to_string())
753 return DeepStats.add_node(self, node, path)
755 def get_results(self):
756 stats = DeepStats.get_results(self)
757 return {"manifest": self.manifest,
758 "verifycaps": self.verifycaps,
759 "storage-index": self.storage_index_strings,
765 def __init__(self, root, verify, repair, add_lease):
766 root_si = root.get_storage_index()
767 self._lp = log.msg(format="deep-check starting (%(si)s),"
768 " verify=%(verify)s, repair=%(repair)s",
769 si=base32.b2a(root_si), verify=verify, repair=repair)
770 self._verify = verify
771 self._repair = repair
772 self._add_lease = add_lease
774 self._results = DeepCheckAndRepairResults(root_si)
776 self._results = DeepCheckResults(root_si)
777 self._stats = DeepStats(root)
779 def set_monitor(self, monitor):
780 self.monitor = monitor
781 monitor.set_status(self._results)
783 def add_node(self, node, childpath):
785 d = node.check_and_repair(self.monitor, self._verify, self._add_lease)
786 d.addCallback(self._results.add_check_and_repair, childpath)
788 d = node.check(self.monitor, self._verify, self._add_lease)
789 d.addCallback(self._results.add_check, childpath)
790 d.addCallback(lambda ignored: self._stats.add_node(node, childpath))
793 def enter_directory(self, parent, children):
794 return self._stats.enter_directory(parent, children)
797 log.msg("deep-check done", parent=self._lp)
798 self._results.update_stats(self._stats.get_results())
802 # use client.create_dirnode() to make one of these