4 from zope.interface import implements
5 from twisted.internet import defer
6 from foolscap.api import fireEventually
8 from allmydata.mutable.common import NotMutableError
9 from allmydata.mutable.filenode import MutableFileNode
10 from allmydata.unknown import UnknownNode
11 from allmydata.interfaces import IMutableFileNode, IDirectoryNode,\
12 IFileNode, IFilesystemNode, \
13 ExistingChildError, NoSuchChildError, ICheckable, IDeepCheckable, \
14 CannotPackUnknownNodeError
15 from allmydata.check_results import DeepCheckResults, \
16 DeepCheckAndRepairResults
17 from allmydata.monitor import Monitor
18 from allmydata.util import hashutil, mathutil, base32, log
19 from allmydata.util.assertutil import precondition
20 from allmydata.util.netstring import netstring, split_netstring
21 from allmydata.uri import LiteralFileURI, from_string, wrap_dirnode_cap
22 from pycryptopp.cipher.aes import AES
23 from allmydata.util.dictutil import AuxValueDict
26 def __init__(self, node, name, must_exist=True):
29 self.must_exist = True
30 def modify(self, old_contents, servermap, first_time):
31 children = self.node._unpack_contents(old_contents)
32 if self.name not in children:
33 if first_time and self.must_exist:
34 raise NoSuchChildError(self.name)
37 self.old_child, metadata = children[self.name]
38 del children[self.name]
39 new_contents = self.node._pack_contents(children)
43 def __init__(self, node, name, metadata):
46 self.metadata = metadata
48 def modify(self, old_contents, servermap, first_time):
49 children = self.node._unpack_contents(old_contents)
50 if self.name not in children:
51 raise NoSuchChildError(self.name)
52 children[self.name] = (children[self.name][0], self.metadata)
53 new_contents = self.node._pack_contents(children)
58 def __init__(self, node, entries=None, overwrite=True):
62 precondition(isinstance(entries, dict), entries)
63 self.entries = entries
64 self.overwrite = overwrite
66 def set_node(self, name, node, metadata):
67 precondition(isinstance(name, unicode), name)
68 precondition(IFilesystemNode.providedBy(node), node)
69 self.entries[name] = (node, metadata)
71 def modify(self, old_contents, servermap, first_time):
72 children = self.node._unpack_contents(old_contents)
74 for (name, (child, new_metadata)) in self.entries.iteritems():
75 precondition(isinstance(name, unicode), name)
76 precondition(IFilesystemNode.providedBy(child), child)
78 if not self.overwrite:
79 raise ExistingChildError("child '%s' already exists" % name)
81 if self.overwrite == "only-files" and IDirectoryNode.providedBy(children[name][0]):
82 raise ExistingChildError("child '%s' already exists" % name)
83 metadata = children[name][1].copy()
85 metadata = {"ctime": now,
93 if new_metadata is not None:
94 # Overwrite all metadata.
95 newmd = new_metadata.copy()
98 if newmd.has_key('tahoe'):
100 if metadata.has_key('tahoe'):
101 newmd['tahoe'] = metadata['tahoe']
105 # For backwards compatibility with Tahoe < 1.4.0:
106 if "ctime" not in metadata:
107 metadata["ctime"] = now
108 metadata["mtime"] = now
111 sysmd = metadata.get('tahoe', {})
112 if not 'linkcrtime' in sysmd:
113 if "ctime" in metadata:
114 # In Tahoe < 1.4.0 we used the word "ctime" to mean what Tahoe >= 1.4.0
115 # calls "linkcrtime".
116 sysmd["linkcrtime"] = metadata["ctime"]
118 sysmd["linkcrtime"] = now
119 sysmd["linkmotime"] = now
121 children[name] = (child, metadata)
122 new_contents = self.node._pack_contents(children)
125 def _encrypt_rwcap(filenode, rwcap):
126 assert isinstance(rwcap, str)
127 writekey = filenode.get_writekey()
130 salt = hashutil.mutable_rwcap_salt_hash(rwcap)
131 key = hashutil.mutable_rwcap_key_hash(salt, writekey)
133 crypttext = cryptor.process(rwcap)
134 mac = hashutil.hmac(key, salt + crypttext)
135 assert len(mac) == 32
136 return salt + crypttext + mac
137 # The MAC is not checked by readers in Tahoe >= 1.3.0, but we still
138 # produce it for the sake of older readers.
140 class MustBeDeepImmutable(Exception):
141 """You tried to add a non-deep-immutable node to a deep-immutable
144 def pack_children(filenode, children, deep_immutable=False):
145 """Take a dict that maps:
146 children[unicode_name] = (IFileSystemNode, metadata_dict)
147 and pack it into a single string, for use as the contents of the backing
148 file. This is the same format as is returned by _unpack_contents. I also
149 accept an AuxValueDict, in which case I'll use the auxilliary cached data
150 as the pre-packed entry, which is faster than re-packing everything each
153 If deep_immutable is True, I will require that all my children are deeply
154 immutable, and will raise a MustBeDeepImmutable exception if not.
157 has_aux = isinstance(children, AuxValueDict)
159 for name in sorted(children.keys()):
160 assert isinstance(name, unicode)
162 (child, metadata) = children[name]
163 if deep_immutable and child.is_mutable():
164 # TODO: consider adding IFileSystemNode.is_deep_immutable()
165 raise MustBeDeepImmutable("child '%s' is mutable" % (name,))
167 entry = children.get_aux(name)
169 assert IFilesystemNode.providedBy(child), (name,child)
170 assert isinstance(metadata, dict)
171 rwcap = child.get_uri() # might be RO if the child is not writeable
174 assert isinstance(rwcap, str), rwcap
175 rocap = child.get_readonly_uri()
178 assert isinstance(rocap, str), rocap
179 entry = "".join([netstring(name.encode("utf-8")),
181 netstring(_encrypt_rwcap(filenode, rwcap)),
182 netstring(simplejson.dumps(metadata))])
183 entries.append(netstring(entry))
184 return "".join(entries)
187 implements(IDirectoryNode, ICheckable, IDeepCheckable)
188 filenode_class = MutableFileNode
190 def __init__(self, filenode, nodemaker, uploader):
191 assert IFileNode.providedBy(filenode), filenode
192 assert not IDirectoryNode.providedBy(filenode), filenode
193 self._node = filenode
194 filenode_cap = filenode.get_cap()
195 self._uri = wrap_dirnode_cap(filenode_cap)
196 self._nodemaker = nodemaker
197 self._uploader = uploader
198 self._most_recent_size = None
201 return "<%s %s-%s %s>" % (self.__class__.__name__,
202 self.is_readonly() and "RO" or "RW",
203 self.is_mutable() and "MUT" or "IMM",
204 hasattr(self, '_uri') and self._uri.abbrev())
207 # return the size of our backing mutable file, in bytes, if we've
209 if not self._node.is_mutable():
210 # TODO?: consider using IMutableFileNode.providedBy(self._node)
211 return self._node.get_size()
212 return self._most_recent_size
214 def _set_size(self, data):
215 self._most_recent_size = len(data)
219 if self._node.is_mutable():
220 # use the IMutableFileNode API.
221 d = self._node.download_best_version()
222 d.addCallback(self._set_size)
224 d = self._node.download_to_data()
225 d.addCallback(self._unpack_contents)
228 def _decrypt_rwcapdata(self, encwrcap):
230 crypttext = encwrcap[16:-32]
231 key = hashutil.mutable_rwcap_key_hash(salt, self._node.get_writekey())
233 plaintext = cryptor.process(crypttext)
236 def _create_node(self, rwcap, rocap):
237 return self._nodemaker.create_from_cap(rwcap, rocap)
239 def _unpack_contents(self, data):
240 # the directory is serialized as a list of netstrings, one per child.
241 # Each child is serialized as a list of four netstrings: (name,
242 # rocap, rwcap, metadata), in which the name,rocap,metadata are in
243 # cleartext. The 'name' is UTF-8 encoded. The rwcap is formatted as:
244 # pack("16ss32s", iv, AES(H(writekey+iv), plaintextrwcap), mac)
245 assert isinstance(data, str), (repr(data), type(data))
246 # an empty directory is serialized as an empty string
248 return AuxValueDict()
249 writeable = not self.is_readonly()
250 children = AuxValueDict()
252 while position < len(data):
253 entries, position = split_netstring(data, 1, position)
255 (name, rocap, rwcapdata, metadata_s), subpos = split_netstring(entry, 4)
256 name = name.decode("utf-8")
259 rwcap = self._decrypt_rwcapdata(rwcapdata)
261 rwcap = None # rwcap is None or a non-empty string
263 rocap = None # rocap is None or a non-empty string
264 child = self._create_node(rwcap, rocap)
265 metadata = simplejson.loads(metadata_s)
266 assert isinstance(metadata, dict)
267 children.set_with_aux(name, (child, metadata), auxilliary=entry)
270 def _pack_contents(self, children):
271 # expects children in the same format as _unpack_contents
272 return pack_children(self._node, children)
274 def is_readonly(self):
275 return self._node.is_readonly()
276 def is_mutable(self):
277 return self._node.is_mutable()
280 return self._uri.to_string()
282 def get_readonly_uri(self):
283 return self._uri.get_readonly().to_string()
287 def get_readcap(self):
288 return self._uri.get_readonly()
289 def get_verify_cap(self):
290 return self._uri.get_verify_cap()
291 def get_repair_cap(self):
292 if self._node.is_readonly():
293 return None # readonly (mutable) dirnodes are not yet repairable
296 def get_storage_index(self):
297 return self._uri._filenode_uri.storage_index
299 def check(self, monitor, verify=False, add_lease=False):
300 """Perform a file check. See IChecker.check for details."""
301 return self._node.check(monitor, verify, add_lease)
302 def check_and_repair(self, monitor, verify=False, add_lease=False):
303 return self._node.check_and_repair(monitor, verify, add_lease)
306 """I return a Deferred that fires with a dictionary mapping child
307 name to a tuple of (IFileNode or IDirectoryNode, metadata)."""
310 def has_child(self, name):
311 """I return a Deferred that fires with a boolean, True if there
312 exists a child of the given name, False if not."""
313 assert isinstance(name, unicode)
315 d.addCallback(lambda children: children.has_key(name))
318 def _get(self, children, name):
319 child = children.get(name)
321 raise NoSuchChildError(name)
324 def _get_with_metadata(self, children, name):
325 child = children.get(name)
327 raise NoSuchChildError(name)
331 """I return a Deferred that fires with the named child node,
332 which is either an IFileNode or an IDirectoryNode."""
333 assert isinstance(name, unicode)
335 d.addCallback(self._get, name)
338 def get_child_and_metadata(self, name):
339 """I return a Deferred that fires with the (node, metadata) pair for
340 the named child. The node is either an IFileNode or an
341 IDirectoryNode, and the metadata is a dictionary."""
342 assert isinstance(name, unicode)
344 d.addCallback(self._get_with_metadata, name)
347 def get_metadata_for(self, name):
348 assert isinstance(name, unicode)
350 d.addCallback(lambda children: children[name][1])
353 def set_metadata_for(self, name, metadata):
354 assert isinstance(name, unicode)
355 if self.is_readonly():
356 return defer.fail(NotMutableError())
357 assert isinstance(metadata, dict)
358 s = MetadataSetter(self, name, metadata)
359 d = self._node.modify(s.modify)
360 d.addCallback(lambda res: self)
363 def get_child_at_path(self, path):
364 """Transform a child path into an IDirectoryNode or IFileNode.
366 I perform a recursive series of 'get' operations to find the named
367 descendant node. I return a Deferred that fires with the node, or
368 errbacks with IndexError if the node could not be found.
370 The path can be either a single string (slash-separated) or a list of
373 d = self.get_child_and_metadata_at_path(path)
374 d.addCallback(lambda (node, metadata): node)
377 def get_child_and_metadata_at_path(self, path):
378 """Transform a child path into an IDirectoryNode or IFileNode and
379 a metadata dictionary from the last edge that was traversed.
383 return defer.succeed((self, {}))
384 if isinstance(path, (list, tuple)):
387 path = path.split("/")
389 assert isinstance(p, unicode)
391 remaining_path = path[1:]
393 d = self.get(childname)
394 d.addCallback(lambda node:
395 node.get_child_and_metadata_at_path(remaining_path))
397 d = self.get_child_and_metadata(childname)
400 def set_uri(self, name, writecap, readcap, metadata=None, overwrite=True):
401 precondition(isinstance(name, unicode), name)
402 precondition(isinstance(writecap, (str,type(None))), writecap)
403 precondition(isinstance(readcap, (str,type(None))), readcap)
404 child_node = self._create_node(writecap, readcap)
405 if isinstance(child_node, UnknownNode):
406 # don't be willing to pack unknown nodes: we might accidentally
407 # put some write-authority into the rocap slot because we don't
408 # know how to diminish the URI they gave us. We don't even know
409 # if they gave us a readcap or a writecap.
410 msg = "cannot pack unknown node as child %s" % str(name)
411 raise CannotPackUnknownNodeError(msg)
412 d = self.set_node(name, child_node, metadata, overwrite)
413 d.addCallback(lambda res: child_node)
416 def set_children(self, entries, overwrite=True):
418 a = Adder(self, overwrite=overwrite)
420 for (name, e) in entries.iteritems():
421 assert isinstance(name, unicode)
423 writecap, readcap = e
427 writecap, readcap, metadata = e
428 precondition(isinstance(writecap, (str,type(None))), writecap)
429 precondition(isinstance(readcap, (str,type(None))), readcap)
430 child_node = self._create_node(writecap, readcap)
431 if isinstance(child_node, UnknownNode):
432 msg = "cannot pack unknown node as child %s" % str(name)
433 raise CannotPackUnknownNodeError(msg)
434 a.set_node(name, child_node, metadata)
435 d = self._node.modify(a.modify)
436 d.addCallback(lambda ign: self)
439 def set_node(self, name, child, metadata=None, overwrite=True):
440 """I add a child at the specific name. I return a Deferred that fires
441 when the operation finishes. This Deferred will fire with the child
442 node that was just added. I will replace any existing child of the
445 If this directory node is read-only, the Deferred will errback with a
448 precondition(IFilesystemNode.providedBy(child), child)
450 if self.is_readonly():
451 return defer.fail(NotMutableError())
452 assert isinstance(name, unicode)
453 assert IFilesystemNode.providedBy(child), child
454 a = Adder(self, overwrite=overwrite)
455 a.set_node(name, child, metadata)
456 d = self._node.modify(a.modify)
457 d.addCallback(lambda res: child)
460 def set_nodes(self, entries, overwrite=True):
461 precondition(isinstance(entries, dict), entries)
462 if self.is_readonly():
463 return defer.fail(NotMutableError())
464 a = Adder(self, entries, overwrite=overwrite)
465 d = self._node.modify(a.modify)
466 d.addCallback(lambda res: self)
470 def add_file(self, name, uploadable, metadata=None, overwrite=True):
471 """I upload a file (using the given IUploadable), then attach the
472 resulting FileNode to the directory at the given name. I return a
473 Deferred that fires (with the IFileNode of the uploaded file) when
474 the operation completes."""
475 assert isinstance(name, unicode)
476 if self.is_readonly():
477 return defer.fail(NotMutableError())
478 d = self._uploader.upload(uploadable)
479 d.addCallback(lambda results: results.uri)
480 d.addCallback(self._nodemaker.create_from_cap)
481 d.addCallback(lambda node:
482 self.set_node(name, node, metadata, overwrite))
485 def delete(self, name):
486 """I remove the child at the specific name. I return a Deferred that
487 fires (with the node just removed) when the operation finishes."""
488 assert isinstance(name, unicode)
489 if self.is_readonly():
490 return defer.fail(NotMutableError())
491 deleter = Deleter(self, name)
492 d = self._node.modify(deleter.modify)
493 d.addCallback(lambda res: deleter.old_child)
496 def create_subdirectory(self, name, initial_children={}, overwrite=True,
498 assert isinstance(name, unicode)
499 if self.is_readonly():
500 return defer.fail(NotMutableError())
502 d = self._nodemaker.create_new_mutable_directory(initial_children)
504 d = self._nodemaker.create_immutable_directory(initial_children)
506 entries = {name: (child, None)}
507 a = Adder(self, entries, overwrite=overwrite)
508 d = self._node.modify(a.modify)
509 d.addCallback(lambda res: child)
511 d.addCallback(_created)
514 def move_child_to(self, current_child_name, new_parent,
515 new_child_name=None, overwrite=True):
516 """I take one of my children and move them to a new parent. The child
517 is referenced by name. On the new parent, the child will live under
518 'new_child_name', which defaults to 'current_child_name'. I return a
519 Deferred that fires when the operation finishes."""
520 assert isinstance(current_child_name, unicode)
521 if self.is_readonly() or new_parent.is_readonly():
522 return defer.fail(NotMutableError())
523 if new_child_name is None:
524 new_child_name = current_child_name
525 assert isinstance(new_child_name, unicode)
526 d = self.get(current_child_name)
528 return new_parent.set_node(new_child_name, child,
531 d.addCallback(lambda child: self.delete(current_child_name))
535 def deep_traverse(self, walker):
536 """Perform a recursive walk, using this dirnode as a root, notifying
537 the 'walker' instance of everything I encounter.
539 I call walker.enter_directory(parent, children) once for each dirnode
540 I visit, immediately after retrieving the list of children. I pass in
541 the parent dirnode and the dict of childname->(childnode,metadata).
542 This function should *not* traverse the children: I will do that.
543 enter_directory() is most useful for the deep-stats number that
544 counts how large a directory is.
546 I call walker.add_node(node, path) for each node (both files and
547 directories) I can reach. Most work should be done here.
549 I avoid loops by keeping track of verifier-caps and refusing to call
550 walker.add_node() or traverse a node that I've seen before. This
551 means that any file or directory will only be given to the walker
552 once. If files or directories are referenced multiple times by a
553 directory structure, this may appear to under-count or miss some of
556 I return a Monitor which can be used to wait for the operation to
557 finish, learn about its progress, or cancel the operation.
560 # this is just a tree-walker, except that following each edge
561 # requires a Deferred. We used to use a ConcurrencyLimiter to limit
562 # fanout to 10 simultaneous operations, but the memory load of the
563 # queued operations was excessive (in one case, with 330k dirnodes,
564 # it caused the process to run into the 3.0GB-ish per-process 32bit
565 # linux memory limit, and crashed). So we use a single big Deferred
566 # chain, and do a strict depth-first traversal, one node at a time.
567 # This can be slower, because we aren't pipelining directory reads,
568 # but it brought the memory footprint down by roughly 50%.
571 walker.set_monitor(monitor)
573 found = set([self.get_verify_cap()])
574 d = self._deep_traverse_dirnode(self, [], walker, monitor, found)
575 d.addCallback(lambda ignored: walker.finish())
576 d.addBoth(monitor.finish)
577 d.addErrback(lambda f: None)
581 def _deep_traverse_dirnode(self, node, path, walker, monitor, found):
582 # process this directory, then walk its children
583 monitor.raise_if_cancelled()
584 d = defer.maybeDeferred(walker.add_node, node, path)
585 d.addCallback(lambda ignored: node.list())
586 d.addCallback(self._deep_traverse_dirnode_children, node, path,
587 walker, monitor, found)
590 def _deep_traverse_dirnode_children(self, children, parent, path,
591 walker, monitor, found):
592 monitor.raise_if_cancelled()
593 d = defer.maybeDeferred(walker.enter_directory, parent, children)
594 # we process file-like children first, so we can drop their FileNode
595 # objects as quickly as possible. Tests suggest that a FileNode (held
596 # in the client's nodecache) consumes about 2440 bytes. dirnodes (not
597 # in the nodecache) seem to consume about 2000 bytes.
600 for name, (child, metadata) in sorted(children.iteritems()):
601 childpath = path + [name]
602 if isinstance(child, UnknownNode):
603 walker.add_node(child, childpath)
605 verifier = child.get_verify_cap()
606 # allow LIT files (for which verifier==None) to be processed
607 if (verifier is not None) and (verifier in found):
610 if IDirectoryNode.providedBy(child):
611 dirkids.append( (child, childpath) )
613 filekids.append( (child, childpath) )
614 for i, (child, childpath) in enumerate(filekids):
615 d.addCallback(lambda ignored, child=child, childpath=childpath:
616 walker.add_node(child, childpath))
617 # to work around the Deferred tail-recursion problem
618 # (specifically the defer.succeed flavor) requires us to avoid
619 # doing more than 158 LIT files in a row. We insert a turn break
620 # once every 100 files (LIT or CHK) to preserve some stack space
621 # for other code. This is a different expression of the same
622 # Twisted problem as in #237.
624 d.addCallback(lambda ignored: fireEventually())
625 for (child, childpath) in dirkids:
626 d.addCallback(lambda ignored, child=child, childpath=childpath:
627 self._deep_traverse_dirnode(child, childpath,
633 def build_manifest(self):
634 """Return a Monitor, with a ['status'] that will be a list of (path,
635 cap) tuples, for all nodes (directories and files) reachable from
637 walker = ManifestWalker(self)
638 return self.deep_traverse(walker)
640 def start_deep_stats(self):
641 # Since deep_traverse tracks verifier caps, we avoid double-counting
642 # children for which we've got both a write-cap and a read-cap
643 return self.deep_traverse(DeepStats(self))
645 def start_deep_check(self, verify=False, add_lease=False):
646 return self.deep_traverse(DeepChecker(self, verify, repair=False, add_lease=add_lease))
648 def start_deep_check_and_repair(self, verify=False, add_lease=False):
649 return self.deep_traverse(DeepChecker(self, verify, repair=True, add_lease=add_lease))
654 def __init__(self, origin):
657 for k in ["count-immutable-files",
658 "count-mutable-files",
659 "count-literal-files",
663 "size-immutable-files",
664 #"size-mutable-files",
665 "size-literal-files",
668 "largest-directory-children",
669 "largest-immutable-file",
670 #"largest-mutable-file",
674 for k in ["size-files-histogram"]:
675 self.histograms[k] = {} # maps (min,max) to count
676 self.buckets = [ (0,0), (1,3)]
677 self.root = math.sqrt(10)
679 def set_monitor(self, monitor):
680 self.monitor = monitor
681 monitor.origin_si = self.origin.get_storage_index()
682 monitor.set_status(self.get_results())
684 def add_node(self, node, childpath):
685 if isinstance(node, UnknownNode):
686 self.add("count-unknown")
687 elif IDirectoryNode.providedBy(node):
688 self.add("count-directories")
689 elif IMutableFileNode.providedBy(node):
690 self.add("count-files")
691 self.add("count-mutable-files")
692 # TODO: update the servermap, compute a size, add it to
693 # size-mutable-files, max it into "largest-mutable-file"
694 elif IFileNode.providedBy(node): # CHK and LIT
695 self.add("count-files")
696 size = node.get_size()
697 self.histogram("size-files-histogram", size)
698 theuri = from_string(node.get_uri())
699 if isinstance(theuri, LiteralFileURI):
700 self.add("count-literal-files")
701 self.add("size-literal-files", size)
703 self.add("count-immutable-files")
704 self.add("size-immutable-files", size)
705 self.max("largest-immutable-file", size)
707 def enter_directory(self, parent, children):
708 dirsize_bytes = parent.get_size()
709 dirsize_children = len(children)
710 self.add("size-directories", dirsize_bytes)
711 self.max("largest-directory", dirsize_bytes)
712 self.max("largest-directory-children", dirsize_children)
714 def add(self, key, value=1):
715 self.stats[key] += value
717 def max(self, key, value):
718 self.stats[key] = max(self.stats[key], value)
720 def which_bucket(self, size):
721 # return (min,max) such that min <= size <= max
722 # values are from the set (0,0), (1,3), (4,10), (11,31), (32,100),
723 # (101,316), (317, 1000), etc: two per decade
727 if i >= len(self.buckets):
729 new_lower = self.buckets[i-1][1]+1
730 new_upper = int(mathutil.next_power_of_k(new_lower, self.root))
731 self.buckets.append( (new_lower, new_upper) )
732 maybe = self.buckets[i]
733 if maybe[0] <= size <= maybe[1]:
737 def histogram(self, key, size):
738 bucket = self.which_bucket(size)
739 h = self.histograms[key]
744 def get_results(self):
745 stats = self.stats.copy()
746 for key in self.histograms:
747 h = self.histograms[key]
748 out = [ (bucket[0], bucket[1], h[bucket]) for bucket in h ]
754 return self.get_results()
756 class ManifestWalker(DeepStats):
757 def __init__(self, origin):
758 DeepStats.__init__(self, origin)
760 self.storage_index_strings = set()
761 self.verifycaps = set()
763 def add_node(self, node, path):
764 self.manifest.append( (tuple(path), node.get_uri()) )
765 si = node.get_storage_index()
767 self.storage_index_strings.add(base32.b2a(si))
768 v = node.get_verify_cap()
770 self.verifycaps.add(v.to_string())
771 return DeepStats.add_node(self, node, path)
773 def get_results(self):
774 stats = DeepStats.get_results(self)
775 return {"manifest": self.manifest,
776 "verifycaps": self.verifycaps,
777 "storage-index": self.storage_index_strings,
783 def __init__(self, root, verify, repair, add_lease):
784 root_si = root.get_storage_index()
785 self._lp = log.msg(format="deep-check starting (%(si)s),"
786 " verify=%(verify)s, repair=%(repair)s",
787 si=base32.b2a(root_si), verify=verify, repair=repair)
788 self._verify = verify
789 self._repair = repair
790 self._add_lease = add_lease
792 self._results = DeepCheckAndRepairResults(root_si)
794 self._results = DeepCheckResults(root_si)
795 self._stats = DeepStats(root)
797 def set_monitor(self, monitor):
798 self.monitor = monitor
799 monitor.set_status(self._results)
801 def add_node(self, node, childpath):
803 d = node.check_and_repair(self.monitor, self._verify, self._add_lease)
804 d.addCallback(self._results.add_check_and_repair, childpath)
806 d = node.check(self.monitor, self._verify, self._add_lease)
807 d.addCallback(self._results.add_check, childpath)
808 d.addCallback(lambda ignored: self._stats.add_node(node, childpath))
811 def enter_directory(self, parent, children):
812 return self._stats.enter_directory(parent, children)
815 log.msg("deep-check done", parent=self._lp)
816 self._results.update_stats(self._stats.get_results())
820 # use client.create_dirnode() to make one of these