4 from zope.interface import implements
5 from twisted.internet import defer
6 from foolscap.api import fireEventually
8 from allmydata.mutable.common import NotMutableError
9 from allmydata.mutable.filenode import MutableFileNode
10 from allmydata.unknown import UnknownNode
11 from allmydata.interfaces import IMutableFileNode, IDirectoryNode,\
12 IFileNode, IMutableFileURI, IFilesystemNode, \
13 ExistingChildError, NoSuchChildError, ICheckable, IDeepCheckable, \
14 CannotPackUnknownNodeError
15 from allmydata.check_results import DeepCheckResults, \
16 DeepCheckAndRepairResults
17 from allmydata.monitor import Monitor
18 from allmydata.util import hashutil, mathutil, base32, log
19 from allmydata.util.assertutil import _assert, precondition
20 from allmydata.util.netstring import netstring, split_netstring
21 from allmydata.uri import DirectoryURI, ReadonlyDirectoryURI, \
22 LiteralFileURI, from_string
23 from pycryptopp.cipher.aes import AES
24 from allmydata.util.dictutil import AuxValueDict
27 def __init__(self, node, name, must_exist=True):
30 self.must_exist = True
31 def modify(self, old_contents, servermap, first_time):
32 children = self.node._unpack_contents(old_contents)
33 if self.name not in children:
34 if first_time and self.must_exist:
35 raise NoSuchChildError(self.name)
38 self.old_child, metadata = children[self.name]
39 del children[self.name]
40 new_contents = self.node._pack_contents(children)
44 def __init__(self, node, name, metadata):
47 self.metadata = metadata
49 def modify(self, old_contents, servermap, first_time):
50 children = self.node._unpack_contents(old_contents)
51 if self.name not in children:
52 raise NoSuchChildError(self.name)
53 children[self.name] = (children[self.name][0], self.metadata)
54 new_contents = self.node._pack_contents(children)
59 def __init__(self, node, entries=None, overwrite=True):
63 self.entries = entries
64 self.overwrite = overwrite
66 def set_node(self, name, node, metadata):
67 precondition(isinstance(name, unicode), name)
68 precondition(IFilesystemNode.providedBy(node), node)
69 self.entries.append( [name, node, metadata] )
71 def modify(self, old_contents, servermap, first_time):
72 children = self.node._unpack_contents(old_contents)
74 for e in self.entries:
80 name, child, new_metadata = e
81 assert _assert(IFilesystemNode.providedBy(child), child)
82 assert isinstance(name, unicode)
84 if not self.overwrite:
85 raise ExistingChildError("child '%s' already exists" % name)
87 if self.overwrite == "only-files" and IDirectoryNode.providedBy(children[name][0]):
88 raise ExistingChildError("child '%s' already exists" % name)
89 metadata = children[name][1].copy()
91 metadata = {"ctime": now,
99 if new_metadata is not None:
100 # Overwrite all metadata.
101 newmd = new_metadata.copy()
104 if newmd.has_key('tahoe'):
106 if metadata.has_key('tahoe'):
107 newmd['tahoe'] = metadata['tahoe']
111 # For backwards compatibility with Tahoe < 1.4.0:
112 if "ctime" not in metadata:
113 metadata["ctime"] = now
114 metadata["mtime"] = now
117 sysmd = metadata.get('tahoe', {})
118 if not 'linkcrtime' in sysmd:
119 if "ctime" in metadata:
120 # In Tahoe < 1.4.0 we used the word "ctime" to mean what Tahoe >= 1.4.0
121 # calls "linkcrtime".
122 sysmd["linkcrtime"] = metadata["ctime"]
124 sysmd["linkcrtime"] = now
125 sysmd["linkmotime"] = now
127 children[name] = (child, metadata)
128 new_contents = self.node._pack_contents(children)
132 implements(IDirectoryNode, ICheckable, IDeepCheckable)
133 filenode_class = MutableFileNode
135 def __init__(self, filenode, nodemaker, uploader):
136 self._node = filenode
137 filenode_uri = IMutableFileURI(filenode.get_uri())
138 if filenode_uri.is_readonly():
139 self._uri = ReadonlyDirectoryURI(filenode_uri)
141 self._uri = DirectoryURI(filenode_uri)
142 self._nodemaker = nodemaker
143 self._uploader = uploader
144 self._most_recent_size = None
147 return "<%s %s %s>" % (self.__class__.__name__, self.is_readonly() and "RO" or "RW", hasattr(self, '_uri') and self._uri.abbrev())
150 # return the size of our backing mutable file, in bytes, if we've
152 return self._most_recent_size
154 def _set_size(self, data):
155 self._most_recent_size = len(data)
159 d = self._node.download_best_version()
160 d.addCallback(self._set_size)
161 d.addCallback(self._unpack_contents)
164 def _encrypt_rwcap(self, rwcap):
165 assert isinstance(rwcap, str)
166 salt = hashutil.mutable_rwcap_salt_hash(rwcap)
167 key = hashutil.mutable_rwcap_key_hash(salt, self._node.get_writekey())
169 crypttext = cryptor.process(rwcap)
170 mac = hashutil.hmac(key, salt + crypttext)
171 assert len(mac) == 32
172 return salt + crypttext + mac
173 # The MAC is not checked by readers in Tahoe >= 1.3.0, but we still
174 # produce it for the sake of older readers.
176 def _decrypt_rwcapdata(self, encwrcap):
178 crypttext = encwrcap[16:-32]
179 key = hashutil.mutable_rwcap_key_hash(salt, self._node.get_writekey())
181 plaintext = cryptor.process(crypttext)
184 def _create_node(self, rwcap, rocap):
185 return self._nodemaker.create_from_cap(rwcap, rocap)
187 def _unpack_contents(self, data):
188 # the directory is serialized as a list of netstrings, one per child.
189 # Each child is serialized as a list of four netstrings: (name,
190 # rocap, rwcap, metadata), in which the name,rocap,metadata are in
191 # cleartext. The 'name' is UTF-8 encoded. The rwcap is formatted as:
192 # pack("16ss32s", iv, AES(H(writekey+iv), plaintextrwcap), mac)
193 assert isinstance(data, str), (repr(data), type(data))
194 # an empty directory is serialized as an empty string
196 return AuxValueDict()
197 writeable = not self.is_readonly()
198 children = AuxValueDict()
200 while position < len(data):
201 entries, position = split_netstring(data, 1, position)
203 (name, rocap, rwcapdata, metadata_s), subpos = split_netstring(entry, 4)
204 name = name.decode("utf-8")
207 rwcap = self._decrypt_rwcapdata(rwcapdata)
209 rwcap = None # rwcap is None or a non-empty string
211 rocap = None # rocap is None or a non-empty string
212 child = self._create_node(rwcap, rocap)
213 metadata = simplejson.loads(metadata_s)
214 assert isinstance(metadata, dict)
215 children.set_with_aux(name, (child, metadata), auxilliary=entry)
218 def _pack_contents(self, children):
219 # expects children in the same format as _unpack_contents
220 has_aux = isinstance(children, AuxValueDict)
222 for name in sorted(children.keys()):
223 assert isinstance(name, unicode)
226 entry = children.get_aux(name)
228 child, metadata = children.get(name)
229 assert IFilesystemNode.providedBy(child), (name,child)
230 assert isinstance(metadata, dict)
231 rwcap = child.get_uri() # might be RO if the child is not writeable
234 assert isinstance(rwcap, str), rwcap
235 rocap = child.get_readonly_uri()
238 assert isinstance(rocap, str), rocap
239 entry = "".join([netstring(name.encode("utf-8")),
241 netstring(self._encrypt_rwcap(rwcap)),
242 netstring(simplejson.dumps(metadata))])
243 entries.append(netstring(entry))
244 return "".join(entries)
246 def is_readonly(self):
247 return self._node.is_readonly()
248 def is_mutable(self):
249 return self._node.is_mutable()
252 return self._uri.to_string()
254 def get_readonly_uri(self):
255 return self._uri.get_readonly().to_string()
257 def get_verify_cap(self):
258 return self._uri.get_verify_cap()
260 def get_repair_cap(self):
261 if self._node.is_readonly():
265 def get_storage_index(self):
266 return self._uri._filenode_uri.storage_index
268 def check(self, monitor, verify=False, add_lease=False):
269 """Perform a file check. See IChecker.check for details."""
270 return self._node.check(monitor, verify, add_lease)
271 def check_and_repair(self, monitor, verify=False, add_lease=False):
272 return self._node.check_and_repair(monitor, verify, add_lease)
275 """I return a Deferred that fires with a dictionary mapping child
276 name to a tuple of (IFileNode or IDirectoryNode, metadata)."""
279 def has_child(self, name):
280 """I return a Deferred that fires with a boolean, True if there
281 exists a child of the given name, False if not."""
282 assert isinstance(name, unicode)
284 d.addCallback(lambda children: children.has_key(name))
287 def _get(self, children, name):
288 child = children.get(name)
290 raise NoSuchChildError(name)
293 def _get_with_metadata(self, children, name):
294 child = children.get(name)
296 raise NoSuchChildError(name)
300 """I return a Deferred that fires with the named child node,
301 which is either an IFileNode or an IDirectoryNode."""
302 assert isinstance(name, unicode)
304 d.addCallback(self._get, name)
307 def get_child_and_metadata(self, name):
308 """I return a Deferred that fires with the (node, metadata) pair for
309 the named child. The node is either an IFileNode or an
310 IDirectoryNode, and the metadata is a dictionary."""
311 assert isinstance(name, unicode)
313 d.addCallback(self._get_with_metadata, name)
316 def get_metadata_for(self, name):
317 assert isinstance(name, unicode)
319 d.addCallback(lambda children: children[name][1])
322 def set_metadata_for(self, name, metadata):
323 assert isinstance(name, unicode)
324 if self.is_readonly():
325 return defer.fail(NotMutableError())
326 assert isinstance(metadata, dict)
327 s = MetadataSetter(self, name, metadata)
328 d = self._node.modify(s.modify)
329 d.addCallback(lambda res: self)
332 def get_child_at_path(self, path):
333 """Transform a child path into an IDirectoryNode or IFileNode.
335 I perform a recursive series of 'get' operations to find the named
336 descendant node. I return a Deferred that fires with the node, or
337 errbacks with IndexError if the node could not be found.
339 The path can be either a single string (slash-separated) or a list of
342 d = self.get_child_and_metadata_at_path(path)
343 d.addCallback(lambda (node, metadata): node)
346 def get_child_and_metadata_at_path(self, path):
347 """Transform a child path into an IDirectoryNode or IFileNode and
348 a metadata dictionary from the last edge that was traversed.
352 return defer.succeed((self, {}))
353 if isinstance(path, (list, tuple)):
356 path = path.split("/")
358 assert isinstance(p, unicode)
360 remaining_path = path[1:]
362 d = self.get(childname)
363 d.addCallback(lambda node:
364 node.get_child_and_metadata_at_path(remaining_path))
366 d = self.get_child_and_metadata(childname)
369 def set_uri(self, name, writecap, readcap, metadata=None, overwrite=True):
370 precondition(isinstance(name, unicode), name)
371 precondition(isinstance(writecap, (str,type(None))), writecap)
372 precondition(isinstance(readcap, (str,type(None))), readcap)
373 child_node = self._create_node(writecap, readcap)
374 if isinstance(child_node, UnknownNode):
375 # don't be willing to pack unknown nodes: we might accidentally
376 # put some write-authority into the rocap slot because we don't
377 # know how to diminish the URI they gave us. We don't even know
378 # if they gave us a readcap or a writecap.
379 msg = "cannot pack unknown node as child %s" % str(name)
380 raise CannotPackUnknownNodeError(msg)
381 d = self.set_node(name, child_node, metadata, overwrite)
382 d.addCallback(lambda res: child_node)
385 def set_children(self, entries, overwrite=True):
387 a = Adder(self, overwrite=overwrite)
389 for (name, e) in entries.iteritems():
390 assert isinstance(name, unicode)
392 writecap, readcap = e
396 writecap, readcap, metadata = e
397 precondition(isinstance(writecap, (str,type(None))), writecap)
398 precondition(isinstance(readcap, (str,type(None))), readcap)
399 child_node = self._create_node(writecap, readcap)
400 if isinstance(child_node, UnknownNode):
401 msg = "cannot pack unknown node as child %s" % str(name)
402 raise CannotPackUnknownNodeError(msg)
403 a.set_node(name, child_node, metadata)
404 d = self._node.modify(a.modify)
405 d.addCallback(lambda ign: self)
408 def set_node(self, name, child, metadata=None, overwrite=True):
409 """I add a child at the specific name. I return a Deferred that fires
410 when the operation finishes. This Deferred will fire with the child
411 node that was just added. I will replace any existing child of the
414 If this directory node is read-only, the Deferred will errback with a
417 precondition(IFilesystemNode.providedBy(child), child)
419 if self.is_readonly():
420 return defer.fail(NotMutableError())
421 assert isinstance(name, unicode)
422 assert IFilesystemNode.providedBy(child), child
423 a = Adder(self, overwrite=overwrite)
424 a.set_node(name, child, metadata)
425 d = self._node.modify(a.modify)
426 d.addCallback(lambda res: child)
429 def set_nodes(self, entries, overwrite=True):
430 if self.is_readonly():
431 return defer.fail(NotMutableError())
432 a = Adder(self, entries, overwrite=overwrite)
433 d = self._node.modify(a.modify)
434 d.addCallback(lambda res: self)
438 def add_file(self, name, uploadable, metadata=None, overwrite=True):
439 """I upload a file (using the given IUploadable), then attach the
440 resulting FileNode to the directory at the given name. I return a
441 Deferred that fires (with the IFileNode of the uploaded file) when
442 the operation completes."""
443 assert isinstance(name, unicode)
444 if self.is_readonly():
445 return defer.fail(NotMutableError())
446 d = self._uploader.upload(uploadable)
447 d.addCallback(lambda results: results.uri)
448 d.addCallback(self._nodemaker.create_from_cap)
449 d.addCallback(lambda node:
450 self.set_node(name, node, metadata, overwrite))
453 def delete(self, name):
454 """I remove the child at the specific name. I return a Deferred that
455 fires (with the node just removed) when the operation finishes."""
456 assert isinstance(name, unicode)
457 if self.is_readonly():
458 return defer.fail(NotMutableError())
459 deleter = Deleter(self, name)
460 d = self._node.modify(deleter.modify)
461 d.addCallback(lambda res: deleter.old_child)
464 def create_subdirectory(self, name, initial_children={}, overwrite=True):
465 assert isinstance(name, unicode)
466 if self.is_readonly():
467 return defer.fail(NotMutableError())
468 d = self._nodemaker.create_new_mutable_directory(initial_children)
470 entries = [(name, child, None)]
471 a = Adder(self, entries, overwrite=overwrite)
472 d = self._node.modify(a.modify)
473 d.addCallback(lambda res: child)
475 d.addCallback(_created)
478 def move_child_to(self, current_child_name, new_parent,
479 new_child_name=None, overwrite=True):
480 """I take one of my children and move them to a new parent. The child
481 is referenced by name. On the new parent, the child will live under
482 'new_child_name', which defaults to 'current_child_name'. I return a
483 Deferred that fires when the operation finishes."""
484 assert isinstance(current_child_name, unicode)
485 if self.is_readonly() or new_parent.is_readonly():
486 return defer.fail(NotMutableError())
487 if new_child_name is None:
488 new_child_name = current_child_name
489 assert isinstance(new_child_name, unicode)
490 d = self.get(current_child_name)
492 return new_parent.set_node(new_child_name, child,
495 d.addCallback(lambda child: self.delete(current_child_name))
499 def deep_traverse(self, walker):
500 """Perform a recursive walk, using this dirnode as a root, notifying
501 the 'walker' instance of everything I encounter.
503 I call walker.enter_directory(parent, children) once for each dirnode
504 I visit, immediately after retrieving the list of children. I pass in
505 the parent dirnode and the dict of childname->(childnode,metadata).
506 This function should *not* traverse the children: I will do that.
507 enter_directory() is most useful for the deep-stats number that
508 counts how large a directory is.
510 I call walker.add_node(node, path) for each node (both files and
511 directories) I can reach. Most work should be done here.
513 I avoid loops by keeping track of verifier-caps and refusing to call
514 walker.add_node() or traverse a node that I've seen before. This
515 means that any file or directory will only be given to the walker
516 once. If files or directories are referenced multiple times by a
517 directory structure, this may appear to under-count or miss some of
520 I return a Monitor which can be used to wait for the operation to
521 finish, learn about its progress, or cancel the operation.
524 # this is just a tree-walker, except that following each edge
525 # requires a Deferred. We used to use a ConcurrencyLimiter to limit
526 # fanout to 10 simultaneous operations, but the memory load of the
527 # queued operations was excessive (in one case, with 330k dirnodes,
528 # it caused the process to run into the 3.0GB-ish per-process 32bit
529 # linux memory limit, and crashed). So we use a single big Deferred
530 # chain, and do a strict depth-first traversal, one node at a time.
531 # This can be slower, because we aren't pipelining directory reads,
532 # but it brought the memory footprint down by roughly 50%.
535 walker.set_monitor(monitor)
537 found = set([self.get_verify_cap()])
538 d = self._deep_traverse_dirnode(self, [], walker, monitor, found)
539 d.addCallback(lambda ignored: walker.finish())
540 d.addBoth(monitor.finish)
541 d.addErrback(lambda f: None)
545 def _deep_traverse_dirnode(self, node, path, walker, monitor, found):
546 # process this directory, then walk its children
547 monitor.raise_if_cancelled()
548 d = defer.maybeDeferred(walker.add_node, node, path)
549 d.addCallback(lambda ignored: node.list())
550 d.addCallback(self._deep_traverse_dirnode_children, node, path,
551 walker, monitor, found)
554 def _deep_traverse_dirnode_children(self, children, parent, path,
555 walker, monitor, found):
556 monitor.raise_if_cancelled()
557 d = defer.maybeDeferred(walker.enter_directory, parent, children)
558 # we process file-like children first, so we can drop their FileNode
559 # objects as quickly as possible. Tests suggest that a FileNode (held
560 # in the client's nodecache) consumes about 2440 bytes. dirnodes (not
561 # in the nodecache) seem to consume about 2000 bytes.
564 for name, (child, metadata) in sorted(children.iteritems()):
565 childpath = path + [name]
566 if isinstance(child, UnknownNode):
567 walker.add_node(child, childpath)
569 verifier = child.get_verify_cap()
570 # allow LIT files (for which verifier==None) to be processed
571 if (verifier is not None) and (verifier in found):
574 if IDirectoryNode.providedBy(child):
575 dirkids.append( (child, childpath) )
577 filekids.append( (child, childpath) )
578 for i, (child, childpath) in enumerate(filekids):
579 d.addCallback(lambda ignored, child=child, childpath=childpath:
580 walker.add_node(child, childpath))
581 # to work around the Deferred tail-recursion problem
582 # (specifically the defer.succeed flavor) requires us to avoid
583 # doing more than 158 LIT files in a row. We insert a turn break
584 # once every 100 files (LIT or CHK) to preserve some stack space
585 # for other code. This is a different expression of the same
586 # Twisted problem as in #237.
588 d.addCallback(lambda ignored: fireEventually())
589 for (child, childpath) in dirkids:
590 d.addCallback(lambda ignored, child=child, childpath=childpath:
591 self._deep_traverse_dirnode(child, childpath,
597 def build_manifest(self):
598 """Return a Monitor, with a ['status'] that will be a list of (path,
599 cap) tuples, for all nodes (directories and files) reachable from
601 walker = ManifestWalker(self)
602 return self.deep_traverse(walker)
604 def start_deep_stats(self):
605 # Since deep_traverse tracks verifier caps, we avoid double-counting
606 # children for which we've got both a write-cap and a read-cap
607 return self.deep_traverse(DeepStats(self))
609 def start_deep_check(self, verify=False, add_lease=False):
610 return self.deep_traverse(DeepChecker(self, verify, repair=False, add_lease=add_lease))
612 def start_deep_check_and_repair(self, verify=False, add_lease=False):
613 return self.deep_traverse(DeepChecker(self, verify, repair=True, add_lease=add_lease))
618 def __init__(self, origin):
621 for k in ["count-immutable-files",
622 "count-mutable-files",
623 "count-literal-files",
627 "size-immutable-files",
628 #"size-mutable-files",
629 "size-literal-files",
632 "largest-directory-children",
633 "largest-immutable-file",
634 #"largest-mutable-file",
638 for k in ["size-files-histogram"]:
639 self.histograms[k] = {} # maps (min,max) to count
640 self.buckets = [ (0,0), (1,3)]
641 self.root = math.sqrt(10)
643 def set_monitor(self, monitor):
644 self.monitor = monitor
645 monitor.origin_si = self.origin.get_storage_index()
646 monitor.set_status(self.get_results())
648 def add_node(self, node, childpath):
649 if isinstance(node, UnknownNode):
650 self.add("count-unknown")
651 elif IDirectoryNode.providedBy(node):
652 self.add("count-directories")
653 elif IMutableFileNode.providedBy(node):
654 self.add("count-files")
655 self.add("count-mutable-files")
656 # TODO: update the servermap, compute a size, add it to
657 # size-mutable-files, max it into "largest-mutable-file"
658 elif IFileNode.providedBy(node): # CHK and LIT
659 self.add("count-files")
660 size = node.get_size()
661 self.histogram("size-files-histogram", size)
662 theuri = from_string(node.get_uri())
663 if isinstance(theuri, LiteralFileURI):
664 self.add("count-literal-files")
665 self.add("size-literal-files", size)
667 self.add("count-immutable-files")
668 self.add("size-immutable-files", size)
669 self.max("largest-immutable-file", size)
671 def enter_directory(self, parent, children):
672 dirsize_bytes = parent.get_size()
673 dirsize_children = len(children)
674 self.add("size-directories", dirsize_bytes)
675 self.max("largest-directory", dirsize_bytes)
676 self.max("largest-directory-children", dirsize_children)
678 def add(self, key, value=1):
679 self.stats[key] += value
681 def max(self, key, value):
682 self.stats[key] = max(self.stats[key], value)
684 def which_bucket(self, size):
685 # return (min,max) such that min <= size <= max
686 # values are from the set (0,0), (1,3), (4,10), (11,31), (32,100),
687 # (101,316), (317, 1000), etc: two per decade
691 if i >= len(self.buckets):
693 new_lower = self.buckets[i-1][1]+1
694 new_upper = int(mathutil.next_power_of_k(new_lower, self.root))
695 self.buckets.append( (new_lower, new_upper) )
696 maybe = self.buckets[i]
697 if maybe[0] <= size <= maybe[1]:
701 def histogram(self, key, size):
702 bucket = self.which_bucket(size)
703 h = self.histograms[key]
708 def get_results(self):
709 stats = self.stats.copy()
710 for key in self.histograms:
711 h = self.histograms[key]
712 out = [ (bucket[0], bucket[1], h[bucket]) for bucket in h ]
718 return self.get_results()
720 class ManifestWalker(DeepStats):
721 def __init__(self, origin):
722 DeepStats.__init__(self, origin)
724 self.storage_index_strings = set()
725 self.verifycaps = set()
727 def add_node(self, node, path):
728 self.manifest.append( (tuple(path), node.get_uri()) )
729 si = node.get_storage_index()
731 self.storage_index_strings.add(base32.b2a(si))
732 v = node.get_verify_cap()
734 self.verifycaps.add(v.to_string())
735 return DeepStats.add_node(self, node, path)
737 def get_results(self):
738 stats = DeepStats.get_results(self)
739 return {"manifest": self.manifest,
740 "verifycaps": self.verifycaps,
741 "storage-index": self.storage_index_strings,
747 def __init__(self, root, verify, repair, add_lease):
748 root_si = root.get_storage_index()
749 self._lp = log.msg(format="deep-check starting (%(si)s),"
750 " verify=%(verify)s, repair=%(repair)s",
751 si=base32.b2a(root_si), verify=verify, repair=repair)
752 self._verify = verify
753 self._repair = repair
754 self._add_lease = add_lease
756 self._results = DeepCheckAndRepairResults(root_si)
758 self._results = DeepCheckResults(root_si)
759 self._stats = DeepStats(root)
761 def set_monitor(self, monitor):
762 self.monitor = monitor
763 monitor.set_status(self._results)
765 def add_node(self, node, childpath):
767 d = node.check_and_repair(self.monitor, self._verify, self._add_lease)
768 d.addCallback(self._results.add_check_and_repair, childpath)
770 d = node.check(self.monitor, self._verify, self._add_lease)
771 d.addCallback(self._results.add_check, childpath)
772 d.addCallback(lambda ignored: self._stats.add_node(node, childpath))
775 def enter_directory(self, parent, children):
776 return self._stats.enter_directory(parent, children)
779 log.msg("deep-check done", parent=self._lp)
780 self._results.update_stats(self._stats.get_results())
784 # use client.create_dirnode() to make one of these