2 # do not import any allmydata modules at this level. Do that from inside
3 # individual functions instead.
4 import struct, time, os
5 from twisted.python import usage
7 class DumpOptions(usage.Options):
9 return "Usage: tahoe debug dump-share SHARE_FILENAME"
12 ["offsets", None, "Display a table of section offsets"],
15 def getUsage(self, width=None):
16 t = usage.Options.getUsage(self, width)
18 Print lots of information about the given share, by parsing the share's
19 contents. This includes share type, lease information, encoding parameters,
20 hash-tree roots, public keys, and segment sizes. This command also emits a
21 verify-cap for the file that uses the share.
23 tahoe debug dump-share testgrid/node-3/storage/shares/4v/4vozh77tsrw7mdhnj7qvp5ky74/0
28 def parseArgs(self, filename):
29 self['filename'] = filename
31 def dump_share(options):
32 from allmydata import storage
36 # check the version, to see if we have a mutable or immutable share
37 print >>out, "share filename: %s" % options['filename']
39 f = open(options['filename'], "rb")
42 if prefix == storage.MutableShareFile.MAGIC:
43 return dump_mutable_share(options)
44 # otherwise assume it's immutable
45 return dump_immutable_share(options)
47 def dump_immutable_share(options):
48 from allmydata import uri, storage
49 from allmydata.util import base32
50 from allmydata.immutable.layout import ReadBucketProxy
53 f = storage.ShareFile(options['filename'])
54 # use a ReadBucketProxy to parse the bucket and find the uri extension
55 bp = ReadBucketProxy(None)
56 offsets = bp._parse_offsets(f.read_share_data(0, 0x24))
57 seek = offsets['uri_extension']
58 length = struct.unpack(">L", f.read_share_data(seek, 4))[0]
60 UEB_data = f.read_share_data(seek, length)
62 unpacked = uri.unpack_extension_readable(UEB_data)
63 keys1 = ("size", "num_segments", "segment_size",
64 "needed_shares", "total_shares")
65 keys2 = ("codec_name", "codec_params", "tail_codec_params")
66 keys3 = ("plaintext_hash", "plaintext_root_hash",
67 "crypttext_hash", "crypttext_root_hash",
68 "share_root_hash", "UEB_hash")
69 display_keys = {"size": "file_size"}
72 dk = display_keys.get(k, k)
73 print >>out, "%20s: %s" % (dk, unpacked[k])
77 dk = display_keys.get(k, k)
78 print >>out, "%20s: %s" % (dk, unpacked[k])
82 dk = display_keys.get(k, k)
83 print >>out, "%20s: %s" % (dk, unpacked[k])
85 leftover = set(unpacked.keys()) - set(keys1 + keys2 + keys3)
88 print >>out, "LEFTOVER:"
89 for k in sorted(leftover):
90 print >>out, "%20s: %s" % (k, unpacked[k])
92 # the storage index isn't stored in the share itself, so we depend upon
93 # knowing the parent directory name to get it
94 pieces = options['filename'].split(os.sep)
95 if len(pieces) >= 2 and base32.could_be_base32_encoded(pieces[-2]):
96 storage_index = base32.a2b(pieces[-2])
97 uri_extension_hash = base32.a2b(unpacked["UEB_hash"])
98 u = uri.CHKFileVerifierURI(storage_index, uri_extension_hash,
99 unpacked["needed_shares"],
100 unpacked["total_shares"], unpacked["size"])
101 verify_cap = u.to_string()
102 print >>out, "%20s: %s" % ("verify-cap", verify_cap)
105 sizes['data'] = bp._data_size
106 sizes['validation'] = (offsets['uri_extension'] -
107 offsets['plaintext_hash_tree'])
108 sizes['uri-extension'] = len(UEB_data)
110 print >>out, " Size of data within the share:"
111 for k in sorted(sizes):
112 print >>out, "%20s: %s" % (k, sizes[k])
114 if options['offsets']:
116 print >>out, " Section Offsets:"
117 print >>out, "%20s: %s" % ("share data", f._data_offset)
118 for k in ["data", "plaintext_hash_tree", "crypttext_hash_tree",
119 "block_hashes", "share_hashes", "uri_extension"]:
120 name = {"data": "block data"}.get(k,k)
121 offset = f._data_offset + offsets[k]
122 print >>out, " %20s: %s (0x%x)" % (name, offset, offset)
123 print >>out, "%20s: %s" % ("leases", f._lease_offset)
126 # display lease information too
128 leases = list(f.iter_leases())
130 for i,lease in enumerate(leases):
131 when = format_expiration_time(lease.expiration_time)
132 print >>out, " Lease #%d: owner=%d, expire in %s" \
133 % (i, lease.owner_num, when)
135 print >>out, " No leases."
140 def format_expiration_time(expiration_time):
142 remains = expiration_time - now
143 when = "%ds" % remains
144 if remains > 24*3600:
145 when += " (%d days)" % (remains / (24*3600))
147 when += " (%d hours)" % (remains / 3600)
151 def dump_mutable_share(options):
152 from allmydata import storage
153 from allmydata.util import base32, idlib
155 m = storage.MutableShareFile(options['filename'])
156 f = open(options['filename'], "rb")
157 WE, nodeid = m._read_write_enabler_and_nodeid(f)
158 num_extra_leases = m._read_num_extra_leases(f)
159 data_length = m._read_data_length(f)
160 extra_lease_offset = m._read_extra_lease_offset(f)
161 container_size = extra_lease_offset - m.DATA_OFFSET
162 leases = list(m._enumerate_leases(f))
164 share_type = "unknown"
165 f.seek(m.DATA_OFFSET)
166 if f.read(1) == "\x00":
167 # this slot contains an SMDF share
172 print >>out, "Mutable slot found:"
173 print >>out, " share_type: %s" % share_type
174 print >>out, " write_enabler: %s" % base32.b2a(WE)
175 print >>out, " WE for nodeid: %s" % idlib.nodeid_b2a(nodeid)
176 print >>out, " num_extra_leases: %d" % num_extra_leases
177 print >>out, " container_size: %d" % container_size
178 print >>out, " data_length: %d" % data_length
180 for (leasenum, lease) in leases:
182 print >>out, " Lease #%d:" % leasenum
183 print >>out, " ownerid: %d" % lease.owner_num
184 when = format_expiration_time(lease.expiration_time)
185 print >>out, " expires in %s" % when
186 print >>out, " renew_secret: %s" % base32.b2a(lease.renew_secret)
187 print >>out, " cancel_secret: %s" % base32.b2a(lease.cancel_secret)
188 print >>out, " secrets are for nodeid: %s" % idlib.nodeid_b2a(lease.nodeid)
190 print >>out, "No leases."
193 if share_type == "SDMF":
194 dump_SDMF_share(m, data_length, options)
198 def dump_SDMF_share(m, length, options):
199 from allmydata.mutable.layout import unpack_share, unpack_header
200 from allmydata.mutable.common import NeedMoreDataError
201 from allmydata.util import base32, hashutil
202 from allmydata.uri import SSKVerifierURI
204 offset = m.DATA_OFFSET
208 f = open(options['filename'], "rb")
210 data = f.read(min(length, 2000))
214 pieces = unpack_share(data)
215 except NeedMoreDataError, e:
216 # retry once with the larger size
217 size = e.needed_bytes
218 f = open(options['filename'], "rb")
220 data = f.read(min(length, size))
222 pieces = unpack_share(data)
224 (seqnum, root_hash, IV, k, N, segsize, datalen,
225 pubkey, signature, share_hash_chain, block_hash_tree,
226 share_data, enc_privkey) = pieces
227 (ig_version, ig_seqnum, ig_roothash, ig_IV, ig_k, ig_N, ig_segsize,
228 ig_datalen, offsets) = unpack_header(data)
230 print >>out, " SDMF contents:"
231 print >>out, " seqnum: %d" % seqnum
232 print >>out, " root_hash: %s" % base32.b2a(root_hash)
233 print >>out, " IV: %s" % base32.b2a(IV)
234 print >>out, " required_shares: %d" % k
235 print >>out, " total_shares: %d" % N
236 print >>out, " segsize: %d" % segsize
237 print >>out, " datalen: %d" % datalen
238 print >>out, " enc_privkey: %d bytes" % len(enc_privkey)
239 print >>out, " pubkey: %d bytes" % len(pubkey)
240 print >>out, " signature: %d bytes" % len(signature)
241 share_hash_ids = ",".join(sorted([str(hid)
242 for hid in share_hash_chain.keys()]))
243 print >>out, " share_hash_chain: %s" % share_hash_ids
244 print >>out, " block_hash_tree: %d nodes" % len(block_hash_tree)
246 # the storage index isn't stored in the share itself, so we depend upon
247 # knowing the parent directory name to get it
248 pieces = options['filename'].split(os.sep)
249 if len(pieces) >= 2 and base32.could_be_base32_encoded(pieces[-2]):
250 storage_index = base32.a2b(pieces[-2])
251 fingerprint = hashutil.ssk_pubkey_fingerprint_hash(pubkey)
252 u = SSKVerifierURI(storage_index, fingerprint)
253 verify_cap = u.to_string()
254 print >>out, " verify-cap:", verify_cap
256 if options['offsets']:
257 # NOTE: this offset-calculation code is fragile, and needs to be
258 # merged with MutableShareFile's internals.
260 print >>out, " Section Offsets:"
261 def printoffset(name, value, shift=0):
262 print >>out, "%s%20s: %s (0x%x)" % (" "*shift, name, value, value)
263 printoffset("first lease", m.HEADER_SIZE)
264 printoffset("share data", m.DATA_OFFSET)
265 o_seqnum = m.DATA_OFFSET + struct.calcsize(">B")
266 printoffset("seqnum", o_seqnum, 2)
267 o_root_hash = m.DATA_OFFSET + struct.calcsize(">BQ")
268 printoffset("root_hash", o_root_hash, 2)
269 for k in ["signature", "share_hash_chain", "block_hash_tree",
271 "enc_privkey", "EOF"]:
272 name = {"share_data": "block data",
273 "EOF": "end of share data"}.get(k,k)
274 offset = m.DATA_OFFSET + offsets[k]
275 printoffset(name, offset, 2)
276 f = open(options['filename'], "rb")
277 printoffset("extra leases", m._read_extra_lease_offset(f) + 4)
284 class DumpCapOptions(usage.Options):
285 def getSynopsis(self):
286 return "Usage: tahoe debug dump-cap [options] FILECAP"
289 None, "storage server nodeid (ascii), to construct WE and secrets."],
290 ["client-secret", "c", None,
291 "client's base secret (ascii), to construct secrets"],
292 ["client-dir", "d", None,
293 "client's base directory, from which a -c secret will be read"],
295 def parseArgs(self, cap):
298 def getUsage(self, width=None):
299 t = usage.Options.getUsage(self, width)
301 Print information about the given cap-string (aka: URI, file-cap, dir-cap,
302 read-cap, write-cap). The URI string is parsed and unpacked. This prints the
303 type of the cap, its storage index, and any derived keys.
305 tahoe debug dump-cap URI:SSK-Verifier:4vozh77tsrw7mdhnj7qvp5ky74:q7f3dwz76sjys4kqfdt3ocur2pay3a6rftnkqmi2uxu3vqsdsofq
307 This may be useful to determine if a read-cap and a write-cap refer to the
308 same time, or to extract the storage-index from a file-cap (to then use with
311 If additional information is provided (storage server nodeid and/or client
312 base secret), this command will compute the shared secrets used for the
313 write-enabler and for lease-renewal.
318 def dump_cap(options):
319 from allmydata import uri
320 from allmydata.util import base32
321 from base64 import b32decode
322 import urlparse, urllib
327 if options['nodeid']:
328 nodeid = b32decode(options['nodeid'].upper())
330 if options['client-secret']:
331 secret = base32.a2b(options['client-secret'])
332 elif options['client-dir']:
333 secretfile = os.path.join(options['client-dir'], "private", "secret")
335 secret = base32.a2b(open(secretfile, "r").read().strip())
336 except EnvironmentError:
339 if cap.startswith("http"):
340 scheme, netloc, path, params, query, fragment = urlparse.urlparse(cap)
341 assert path.startswith("/uri/")
342 cap = urllib.unquote(path[len("/uri/"):])
344 u = uri.from_string(cap)
347 dump_uri_instance(u, nodeid, secret, out)
349 def _dump_secrets(storage_index, secret, nodeid, out):
350 from allmydata.util import hashutil
351 from allmydata.util import base32
354 crs = hashutil.my_renewal_secret_hash(secret)
355 print >>out, " client renewal secret:", base32.b2a(crs)
356 frs = hashutil.file_renewal_secret_hash(crs, storage_index)
357 print >>out, " file renewal secret:", base32.b2a(frs)
359 renew = hashutil.bucket_renewal_secret_hash(frs, nodeid)
360 print >>out, " lease renewal secret:", base32.b2a(renew)
361 ccs = hashutil.my_cancel_secret_hash(secret)
362 print >>out, " client cancel secret:", base32.b2a(ccs)
363 fcs = hashutil.file_cancel_secret_hash(ccs, storage_index)
364 print >>out, " file cancel secret:", base32.b2a(fcs)
366 cancel = hashutil.bucket_cancel_secret_hash(fcs, nodeid)
367 print >>out, " lease cancel secret:", base32.b2a(cancel)
369 def dump_uri_instance(u, nodeid, secret, out, show_header=True):
370 from allmydata import storage, uri
371 from allmydata.util import base32, hashutil
373 if isinstance(u, uri.CHKFileURI):
375 print >>out, "CHK File:"
376 print >>out, " key:", base32.b2a(u.key)
377 print >>out, " UEB hash:", base32.b2a(u.uri_extension_hash)
378 print >>out, " size:", u.size
379 print >>out, " k/N: %d/%d" % (u.needed_shares, u.total_shares)
380 print >>out, " storage index:", storage.si_b2a(u.storage_index)
381 _dump_secrets(u.storage_index, secret, nodeid, out)
382 elif isinstance(u, uri.CHKFileVerifierURI):
384 print >>out, "CHK Verifier URI:"
385 print >>out, " UEB hash:", base32.b2a(u.uri_extension_hash)
386 print >>out, " size:", u.size
387 print >>out, " k/N: %d/%d" % (u.needed_shares, u.total_shares)
388 print >>out, " storage index:", storage.si_b2a(u.storage_index)
390 elif isinstance(u, uri.LiteralFileURI):
392 print >>out, "Literal File URI:"
393 print >>out, " data:", u.data
395 elif isinstance(u, uri.WriteableSSKFileURI):
397 print >>out, "SSK Writeable URI:"
398 print >>out, " writekey:", base32.b2a(u.writekey)
399 print >>out, " readkey:", base32.b2a(u.readkey)
400 print >>out, " storage index:", storage.si_b2a(u.storage_index)
401 print >>out, " fingerprint:", base32.b2a(u.fingerprint)
404 we = hashutil.ssk_write_enabler_hash(u.writekey, nodeid)
405 print >>out, " write_enabler:", base32.b2a(we)
407 _dump_secrets(u.storage_index, secret, nodeid, out)
409 elif isinstance(u, uri.ReadonlySSKFileURI):
411 print >>out, "SSK Read-only URI:"
412 print >>out, " readkey:", base32.b2a(u.readkey)
413 print >>out, " storage index:", storage.si_b2a(u.storage_index)
414 print >>out, " fingerprint:", base32.b2a(u.fingerprint)
415 elif isinstance(u, uri.SSKVerifierURI):
417 print >>out, "SSK Verifier URI:"
418 print >>out, " storage index:", storage.si_b2a(u.storage_index)
419 print >>out, " fingerprint:", base32.b2a(u.fingerprint)
421 elif isinstance(u, uri.NewDirectoryURI):
423 print >>out, "Directory Writeable URI:"
424 dump_uri_instance(u._filenode_uri, nodeid, secret, out, False)
425 elif isinstance(u, uri.ReadonlyNewDirectoryURI):
427 print >>out, "Directory Read-only URI:"
428 dump_uri_instance(u._filenode_uri, nodeid, secret, out, False)
429 elif isinstance(u, uri.NewDirectoryURIVerifier):
431 print >>out, "Directory Verifier URI:"
432 dump_uri_instance(u._filenode_uri, nodeid, secret, out, False)
434 print >>out, "unknown cap type"
436 class FindSharesOptions(usage.Options):
437 def getSynopsis(self):
438 return "Usage: tahoe debug find-shares STORAGE_INDEX NODEDIRS.."
439 def parseArgs(self, storage_index_s, *nodedirs):
440 self.si_s = storage_index_s
441 self.nodedirs = nodedirs
442 def getUsage(self, width=None):
443 t = usage.Options.getUsage(self, width)
445 Locate all shares for the given storage index. This command looks through one
446 or more node directories to find the shares. It returns a list of filenames,
447 one per line, for each share file found.
449 tahoe debug find-shares 4vozh77tsrw7mdhnj7qvp5ky74 testgrid/node-*
451 It may be useful during testing, when running a test grid in which all the
452 nodes are on a local disk. The share files thus located can be counted,
453 examined (with dump-share), or corrupted/deleted to test checker/repairer.
457 def find_shares(options):
458 """Given a storage index and a list of node directories, emit a list of
459 all matching shares to stdout, one per line. For example:
461 find-shares.py 44kai1tui348689nrw8fjegc8c ~/testnet/node-*
465 /home/warner/testnet/node-1/storage/shares/44k/44kai1tui348689nrw8fjegc8c/5
466 /home/warner/testnet/node-1/storage/shares/44k/44kai1tui348689nrw8fjegc8c/9
467 /home/warner/testnet/node-2/storage/shares/44k/44kai1tui348689nrw8fjegc8c/2
469 from allmydata import storage
472 sharedir = storage.storage_index_to_dir(storage.si_a2b(options.si_s))
473 for d in options.nodedirs:
474 d = os.path.join(os.path.expanduser(d), "storage/shares", sharedir)
475 if os.path.exists(d):
476 for shnum in os.listdir(d):
477 print >>out, os.path.join(d, shnum)
482 class CatalogSharesOptions(usage.Options):
486 def parseArgs(self, *nodedirs):
487 self.nodedirs = nodedirs
489 raise usage.UsageError("must specify at least one node directory")
491 def getSynopsis(self):
492 return "Usage: tahoe debug catalog-shares NODEDIRS.."
494 def getUsage(self, width=None):
495 t = usage.Options.getUsage(self, width)
497 Locate all shares in the given node directories, and emit a one-line summary
498 of each share. Run it like this:
500 tahoe debug catalog-shares testgrid/node-* >allshares.txt
502 The lines it emits will look like the following:
504 CHK $SI $k/$N $filesize $UEB_hash $expiration $abspath_sharefile
505 SDMF $SI $k/$N $filesize $seqnum/$roothash $expiration $abspath_sharefile
506 UNKNOWN $abspath_sharefile
508 This command can be used to build up a catalog of shares from many storage
509 servers and then sort the results to compare all shares for the same file. If
510 you see shares with the same SI but different parameters/filesize/UEB_hash,
511 then something is wrong. The misc/find-share/anomalies.py script may be
516 def describe_share(abs_sharefile, si_s, shnum_s, now, out):
517 from allmydata import uri, storage
518 from allmydata.mutable.layout import unpack_share
519 from allmydata.mutable.common import NeedMoreDataError
520 from allmydata.immutable.layout import ReadBucketProxy
521 from allmydata.util import base32
524 f = open(abs_sharefile, "rb")
527 if prefix == storage.MutableShareFile.MAGIC:
529 m = storage.MutableShareFile(abs_sharefile)
530 WE, nodeid = m._read_write_enabler_and_nodeid(f)
531 num_extra_leases = m._read_num_extra_leases(f)
532 data_length = m._read_data_length(f)
533 extra_lease_offset = m._read_extra_lease_offset(f)
534 container_size = extra_lease_offset - m.DATA_OFFSET
535 leases = list(m._enumerate_leases(f))
536 expiration_time = min( [lease[1].expiration_time
537 for lease in leases] )
538 expiration = max(0, expiration_time - now)
540 share_type = "unknown"
541 f.seek(m.DATA_OFFSET)
542 if f.read(1) == "\x00":
543 # this slot contains an SMDF share
546 if share_type == "SDMF":
547 f.seek(m.DATA_OFFSET)
548 data = f.read(min(data_length, 2000))
551 pieces = unpack_share(data)
552 except NeedMoreDataError, e:
553 # retry once with the larger size
554 size = e.needed_bytes
555 f.seek(m.DATA_OFFSET)
556 data = f.read(min(data_length, size))
557 pieces = unpack_share(data)
558 (seqnum, root_hash, IV, k, N, segsize, datalen,
559 pubkey, signature, share_hash_chain, block_hash_tree,
560 share_data, enc_privkey) = pieces
562 print >>out, "SDMF %s %d/%d %d #%d:%s %d %s" % \
563 (si_s, k, N, datalen,
564 seqnum, base32.b2a(root_hash),
565 expiration, abs_sharefile)
567 print >>out, "UNKNOWN mutable %s" % (abs_sharefile,)
569 elif struct.unpack(">L", prefix[:4]) == (1,):
572 sf = storage.ShareFile(abs_sharefile)
573 # use a ReadBucketProxy to parse the bucket and find the uri extension
574 bp = ReadBucketProxy(None)
575 offsets = bp._parse_offsets(sf.read_share_data(0, 0x24))
576 seek = offsets['uri_extension']
577 length = struct.unpack(">L", sf.read_share_data(seek, 4))[0]
579 UEB_data = sf.read_share_data(seek, length)
580 expiration_time = min( [lease.expiration_time
581 for lease in sf.iter_leases()] )
582 expiration = max(0, expiration_time - now)
584 unpacked = uri.unpack_extension_readable(UEB_data)
585 k = unpacked["needed_shares"]
586 N = unpacked["total_shares"]
587 filesize = unpacked["size"]
588 ueb_hash = unpacked["UEB_hash"]
590 print >>out, "CHK %s %d/%d %d %s %d %s" % (si_s, k, N, filesize,
591 ueb_hash, expiration,
595 print >>out, "UNKNOWN really-unknown %s" % (abs_sharefile,)
600 def catalog_shares(options):
603 for d in options.nodedirs:
604 d = os.path.join(os.path.expanduser(d), "storage/shares")
606 abbrevs = os.listdir(d)
607 except EnvironmentError:
608 # ignore nodes that have storage turned off altogether
611 for abbrevdir in abbrevs:
612 if abbrevdir == "incoming":
614 abbrevdir = os.path.join(d, abbrevdir)
615 for si_s in os.listdir(abbrevdir):
616 si_dir = os.path.join(abbrevdir, si_s)
617 for shnum_s in os.listdir(si_dir):
618 abs_sharefile = os.path.join(si_dir, shnum_s)
619 abs_sharefile = os.path.abspath(abs_sharefile)
620 assert os.path.isfile(abs_sharefile)
621 describe_share(abs_sharefile, si_s, shnum_s, now, out)
624 class CorruptShareOptions(usage.Options):
625 def getSynopsis(self):
626 return "Usage: tahoe debug corrupt-share SHARE_FILENAME"
629 ["offset", "o", "block-random", "Which bit to flip."],
632 def getUsage(self, width=None):
633 t = usage.Options.getUsage(self, width)
635 Corrupt the given share by flipping a bit. This will cause a
636 verifying/downloading client to log an integrity-check failure incident, and
637 downloads will proceed with a different share.
639 The --offset parameter controls which bit should be flipped. The default is
640 to flip a single random bit of the block data.
642 tahoe debug corrupt-share testgrid/node-3/storage/shares/4v/4vozh77tsrw7mdhnj7qvp5ky74/0
644 Obviously, this command should not be used in normal operation.
647 def parseArgs(self, filename):
648 self['filename'] = filename
650 def corrupt_share(options):
652 from allmydata import storage
653 from allmydata.mutable.layout import unpack_header
654 from allmydata.immutable.layout import ReadBucketProxy
656 fn = options['filename']
657 assert options["offset"] == "block-random", "other offsets not implemented"
658 # first, what kind of share is it?
660 def flip_bit(start, end):
661 offset = random.randrange(start, end)
662 bit = random.randrange(0, 8)
663 print >>out, "[%d..%d): %d.b%d" % (start, end, offset, bit)
667 d = chr(ord(d) ^ 0x01)
675 if prefix == storage.MutableShareFile.MAGIC:
677 m = storage.MutableShareFile(fn)
679 f.seek(m.DATA_OFFSET)
681 # make sure this slot contains an SMDF share
682 assert data[0] == "\x00", "non-SDMF mutable shares not supported"
685 (version, ig_seqnum, ig_roothash, ig_IV, ig_k, ig_N, ig_segsize,
686 ig_datalen, offsets) = unpack_header(data)
688 assert version == 0, "we only handle v0 SDMF files"
689 start = m.DATA_OFFSET + offsets["share_data"]
690 end = m.DATA_OFFSET + offsets["enc_privkey"]
693 # otherwise assume it's immutable
694 f = storage.ShareFile(fn)
695 bp = ReadBucketProxy(None)
696 offsets = bp._parse_offsets(f.read_share_data(0, 0x24))
697 start = f._data_offset + offsets["data"]
698 end = f._data_offset + offsets["plaintext_hash_tree"]
703 class ReplOptions(usage.Options):
708 return code.interact()
711 class DebugCommand(usage.Options):
713 ["dump-share", None, DumpOptions,
714 "Unpack and display the contents of a share (uri_extension and leases)."],
715 ["dump-cap", None, DumpCapOptions, "Unpack a read-cap or write-cap"],
716 ["find-shares", None, FindSharesOptions, "Locate sharefiles in node dirs"],
717 ["catalog-shares", None, CatalogSharesOptions, "Describe shares in node dirs"],
718 ["corrupt-share", None, CorruptShareOptions, "Corrupt a share"],
719 ["repl", None, ReplOptions, "Open a python interpreter"],
721 def postOptions(self):
722 if not hasattr(self, 'subOptions'):
723 raise usage.UsageError("must specify a subcommand")
724 def getSynopsis(self):
725 return "Usage: tahoe debug SUBCOMMAND"
726 def getUsage(self, width=None):
727 #t = usage.Options.getUsage(self, width)
730 tahoe debug dump-share Unpack and display the contents of a share
731 tahoe debug dump-cap Unpack a read-cap or write-cap
732 tahoe debug find-shares Locate sharefiles in node directories
733 tahoe debug catalog-shares Describe all shares in node dirs
734 tahoe debug corrupt-share Corrupt a share by flipping a bit.
736 Please run e.g. 'tahoe debug dump-share --help' for more details on each
742 "dump-share": dump_share,
743 "dump-cap": dump_cap,
744 "find-shares": find_shares,
745 "catalog-shares": catalog_shares,
746 "corrupt-share": corrupt_share,
751 def do_debug(options):
752 so = options.subOptions
753 so.stdout = options.stdout
754 so.stderr = options.stderr
755 f = subDispatch[options.subCommand]
760 ["debug", None, DebugCommand, "debug subcommands: use 'tahoe debug' for a list"],