2 # do not import any allmydata modules at this level. Do that from inside
3 # individual functions instead.
4 import struct, time, os
5 from twisted.python import usage, failure
6 from twisted.internet import defer
8 class DumpOptions(usage.Options):
10 return "Usage: tahoe debug dump-share SHARE_FILENAME"
13 ["offsets", None, "Display a table of section offsets"],
16 def getUsage(self, width=None):
17 t = usage.Options.getUsage(self, width)
19 Print lots of information about the given share, by parsing the share's
20 contents. This includes share type, lease information, encoding parameters,
21 hash-tree roots, public keys, and segment sizes. This command also emits a
22 verify-cap for the file that uses the share.
24 tahoe debug dump-share testgrid/node-3/storage/shares/4v/4vozh77tsrw7mdhnj7qvp5ky74/0
29 def parseArgs(self, filename):
30 self['filename'] = filename
32 def dump_share(options):
33 from allmydata import storage
37 # check the version, to see if we have a mutable or immutable share
38 print >>out, "share filename: %s" % options['filename']
40 f = open(options['filename'], "rb")
43 if prefix == storage.MutableShareFile.MAGIC:
44 return dump_mutable_share(options)
45 # otherwise assume it's immutable
46 return dump_immutable_share(options)
48 def dump_immutable_share(options):
49 from allmydata import uri, storage
50 from allmydata.util import base32
51 from allmydata.immutable.layout import ReadBucketProxy
54 f = storage.ShareFile(options['filename'])
55 # use a ReadBucketProxy to parse the bucket and find the uri extension
56 bp = ReadBucketProxy(None)
57 offsets = bp._parse_offsets(f.read_share_data(0, 0x44))
58 print >>out, "%20s: %d" % ("version", bp._version)
59 seek = offsets['uri_extension']
60 length = struct.unpack(bp._fieldstruct,
61 f.read_share_data(seek, bp._fieldsize))[0]
63 UEB_data = f.read_share_data(seek, length)
65 unpacked = uri.unpack_extension_readable(UEB_data)
66 keys1 = ("size", "num_segments", "segment_size",
67 "needed_shares", "total_shares")
68 keys2 = ("codec_name", "codec_params", "tail_codec_params")
69 keys3 = ("plaintext_hash", "plaintext_root_hash",
70 "crypttext_hash", "crypttext_root_hash",
71 "share_root_hash", "UEB_hash")
72 display_keys = {"size": "file_size"}
75 dk = display_keys.get(k, k)
76 print >>out, "%20s: %s" % (dk, unpacked[k])
80 dk = display_keys.get(k, k)
81 print >>out, "%20s: %s" % (dk, unpacked[k])
85 dk = display_keys.get(k, k)
86 print >>out, "%20s: %s" % (dk, unpacked[k])
88 leftover = set(unpacked.keys()) - set(keys1 + keys2 + keys3)
91 print >>out, "LEFTOVER:"
92 for k in sorted(leftover):
93 print >>out, "%20s: %s" % (k, unpacked[k])
95 # the storage index isn't stored in the share itself, so we depend upon
96 # knowing the parent directory name to get it
97 pieces = options['filename'].split(os.sep)
98 if len(pieces) >= 2 and base32.could_be_base32_encoded(pieces[-2]):
99 storage_index = base32.a2b(pieces[-2])
100 uri_extension_hash = base32.a2b(unpacked["UEB_hash"])
101 u = uri.CHKFileVerifierURI(storage_index, uri_extension_hash,
102 unpacked["needed_shares"],
103 unpacked["total_shares"], unpacked["size"])
104 verify_cap = u.to_string()
105 print >>out, "%20s: %s" % ("verify-cap", verify_cap)
108 sizes['data'] = bp._data_size
109 sizes['validation'] = (offsets['uri_extension'] -
110 offsets['plaintext_hash_tree'])
111 sizes['uri-extension'] = len(UEB_data)
113 print >>out, " Size of data within the share:"
114 for k in sorted(sizes):
115 print >>out, "%20s: %s" % (k, sizes[k])
117 if options['offsets']:
119 print >>out, " Section Offsets:"
120 print >>out, "%20s: %s" % ("share data", f._data_offset)
121 for k in ["data", "plaintext_hash_tree", "crypttext_hash_tree",
122 "block_hashes", "share_hashes", "uri_extension"]:
123 name = {"data": "block data"}.get(k,k)
124 offset = f._data_offset + offsets[k]
125 print >>out, " %20s: %s (0x%x)" % (name, offset, offset)
126 print >>out, "%20s: %s" % ("leases", f._lease_offset)
129 # display lease information too
131 leases = list(f.iter_leases())
133 for i,lease in enumerate(leases):
134 when = format_expiration_time(lease.expiration_time)
135 print >>out, " Lease #%d: owner=%d, expire in %s" \
136 % (i, lease.owner_num, when)
138 print >>out, " No leases."
143 def format_expiration_time(expiration_time):
145 remains = expiration_time - now
146 when = "%ds" % remains
147 if remains > 24*3600:
148 when += " (%d days)" % (remains / (24*3600))
150 when += " (%d hours)" % (remains / 3600)
154 def dump_mutable_share(options):
155 from allmydata import storage
156 from allmydata.util import base32, idlib
158 m = storage.MutableShareFile(options['filename'])
159 f = open(options['filename'], "rb")
160 WE, nodeid = m._read_write_enabler_and_nodeid(f)
161 num_extra_leases = m._read_num_extra_leases(f)
162 data_length = m._read_data_length(f)
163 extra_lease_offset = m._read_extra_lease_offset(f)
164 container_size = extra_lease_offset - m.DATA_OFFSET
165 leases = list(m._enumerate_leases(f))
167 share_type = "unknown"
168 f.seek(m.DATA_OFFSET)
169 if f.read(1) == "\x00":
170 # this slot contains an SMDF share
175 print >>out, "Mutable slot found:"
176 print >>out, " share_type: %s" % share_type
177 print >>out, " write_enabler: %s" % base32.b2a(WE)
178 print >>out, " WE for nodeid: %s" % idlib.nodeid_b2a(nodeid)
179 print >>out, " num_extra_leases: %d" % num_extra_leases
180 print >>out, " container_size: %d" % container_size
181 print >>out, " data_length: %d" % data_length
183 for (leasenum, lease) in leases:
185 print >>out, " Lease #%d:" % leasenum
186 print >>out, " ownerid: %d" % lease.owner_num
187 when = format_expiration_time(lease.expiration_time)
188 print >>out, " expires in %s" % when
189 print >>out, " renew_secret: %s" % base32.b2a(lease.renew_secret)
190 print >>out, " cancel_secret: %s" % base32.b2a(lease.cancel_secret)
191 print >>out, " secrets are for nodeid: %s" % idlib.nodeid_b2a(lease.nodeid)
193 print >>out, "No leases."
196 if share_type == "SDMF":
197 dump_SDMF_share(m, data_length, options)
201 def dump_SDMF_share(m, length, options):
202 from allmydata.mutable.layout import unpack_share, unpack_header
203 from allmydata.mutable.common import NeedMoreDataError
204 from allmydata.util import base32, hashutil
205 from allmydata.uri import SSKVerifierURI
207 offset = m.DATA_OFFSET
211 f = open(options['filename'], "rb")
213 data = f.read(min(length, 2000))
217 pieces = unpack_share(data)
218 except NeedMoreDataError, e:
219 # retry once with the larger size
220 size = e.needed_bytes
221 f = open(options['filename'], "rb")
223 data = f.read(min(length, size))
225 pieces = unpack_share(data)
227 (seqnum, root_hash, IV, k, N, segsize, datalen,
228 pubkey, signature, share_hash_chain, block_hash_tree,
229 share_data, enc_privkey) = pieces
230 (ig_version, ig_seqnum, ig_roothash, ig_IV, ig_k, ig_N, ig_segsize,
231 ig_datalen, offsets) = unpack_header(data)
233 print >>out, " SDMF contents:"
234 print >>out, " seqnum: %d" % seqnum
235 print >>out, " root_hash: %s" % base32.b2a(root_hash)
236 print >>out, " IV: %s" % base32.b2a(IV)
237 print >>out, " required_shares: %d" % k
238 print >>out, " total_shares: %d" % N
239 print >>out, " segsize: %d" % segsize
240 print >>out, " datalen: %d" % datalen
241 print >>out, " enc_privkey: %d bytes" % len(enc_privkey)
242 print >>out, " pubkey: %d bytes" % len(pubkey)
243 print >>out, " signature: %d bytes" % len(signature)
244 share_hash_ids = ",".join(sorted([str(hid)
245 for hid in share_hash_chain.keys()]))
246 print >>out, " share_hash_chain: %s" % share_hash_ids
247 print >>out, " block_hash_tree: %d nodes" % len(block_hash_tree)
249 # the storage index isn't stored in the share itself, so we depend upon
250 # knowing the parent directory name to get it
251 pieces = options['filename'].split(os.sep)
252 if len(pieces) >= 2 and base32.could_be_base32_encoded(pieces[-2]):
253 storage_index = base32.a2b(pieces[-2])
254 fingerprint = hashutil.ssk_pubkey_fingerprint_hash(pubkey)
255 u = SSKVerifierURI(storage_index, fingerprint)
256 verify_cap = u.to_string()
257 print >>out, " verify-cap:", verify_cap
259 if options['offsets']:
260 # NOTE: this offset-calculation code is fragile, and needs to be
261 # merged with MutableShareFile's internals.
263 print >>out, " Section Offsets:"
264 def printoffset(name, value, shift=0):
265 print >>out, "%s%20s: %s (0x%x)" % (" "*shift, name, value, value)
266 printoffset("first lease", m.HEADER_SIZE)
267 printoffset("share data", m.DATA_OFFSET)
268 o_seqnum = m.DATA_OFFSET + struct.calcsize(">B")
269 printoffset("seqnum", o_seqnum, 2)
270 o_root_hash = m.DATA_OFFSET + struct.calcsize(">BQ")
271 printoffset("root_hash", o_root_hash, 2)
272 for k in ["signature", "share_hash_chain", "block_hash_tree",
274 "enc_privkey", "EOF"]:
275 name = {"share_data": "block data",
276 "EOF": "end of share data"}.get(k,k)
277 offset = m.DATA_OFFSET + offsets[k]
278 printoffset(name, offset, 2)
279 f = open(options['filename'], "rb")
280 printoffset("extra leases", m._read_extra_lease_offset(f) + 4)
287 class DumpCapOptions(usage.Options):
288 def getSynopsis(self):
289 return "Usage: tahoe debug dump-cap [options] FILECAP"
292 None, "storage server nodeid (ascii), to construct WE and secrets."],
293 ["client-secret", "c", None,
294 "client's base secret (ascii), to construct secrets"],
295 ["client-dir", "d", None,
296 "client's base directory, from which a -c secret will be read"],
298 def parseArgs(self, cap):
301 def getUsage(self, width=None):
302 t = usage.Options.getUsage(self, width)
304 Print information about the given cap-string (aka: URI, file-cap, dir-cap,
305 read-cap, write-cap). The URI string is parsed and unpacked. This prints the
306 type of the cap, its storage index, and any derived keys.
308 tahoe debug dump-cap URI:SSK-Verifier:4vozh77tsrw7mdhnj7qvp5ky74:q7f3dwz76sjys4kqfdt3ocur2pay3a6rftnkqmi2uxu3vqsdsofq
310 This may be useful to determine if a read-cap and a write-cap refer to the
311 same time, or to extract the storage-index from a file-cap (to then use with
314 If additional information is provided (storage server nodeid and/or client
315 base secret), this command will compute the shared secrets used for the
316 write-enabler and for lease-renewal.
321 def dump_cap(options):
322 from allmydata import uri
323 from allmydata.util import base32
324 from base64 import b32decode
325 import urlparse, urllib
330 if options['nodeid']:
331 nodeid = b32decode(options['nodeid'].upper())
333 if options['client-secret']:
334 secret = base32.a2b(options['client-secret'])
335 elif options['client-dir']:
336 secretfile = os.path.join(options['client-dir'], "private", "secret")
338 secret = base32.a2b(open(secretfile, "r").read().strip())
339 except EnvironmentError:
342 if cap.startswith("http"):
343 scheme, netloc, path, params, query, fragment = urlparse.urlparse(cap)
344 assert path.startswith("/uri/")
345 cap = urllib.unquote(path[len("/uri/"):])
347 u = uri.from_string(cap)
350 dump_uri_instance(u, nodeid, secret, out)
352 def _dump_secrets(storage_index, secret, nodeid, out):
353 from allmydata.util import hashutil
354 from allmydata.util import base32
357 crs = hashutil.my_renewal_secret_hash(secret)
358 print >>out, " client renewal secret:", base32.b2a(crs)
359 frs = hashutil.file_renewal_secret_hash(crs, storage_index)
360 print >>out, " file renewal secret:", base32.b2a(frs)
362 renew = hashutil.bucket_renewal_secret_hash(frs, nodeid)
363 print >>out, " lease renewal secret:", base32.b2a(renew)
364 ccs = hashutil.my_cancel_secret_hash(secret)
365 print >>out, " client cancel secret:", base32.b2a(ccs)
366 fcs = hashutil.file_cancel_secret_hash(ccs, storage_index)
367 print >>out, " file cancel secret:", base32.b2a(fcs)
369 cancel = hashutil.bucket_cancel_secret_hash(fcs, nodeid)
370 print >>out, " lease cancel secret:", base32.b2a(cancel)
372 def dump_uri_instance(u, nodeid, secret, out, show_header=True):
373 from allmydata import storage, uri
374 from allmydata.util import base32, hashutil
376 if isinstance(u, uri.CHKFileURI):
378 print >>out, "CHK File:"
379 print >>out, " key:", base32.b2a(u.key)
380 print >>out, " UEB hash:", base32.b2a(u.uri_extension_hash)
381 print >>out, " size:", u.size
382 print >>out, " k/N: %d/%d" % (u.needed_shares, u.total_shares)
383 print >>out, " storage index:", storage.si_b2a(u.storage_index)
384 _dump_secrets(u.storage_index, secret, nodeid, out)
385 elif isinstance(u, uri.CHKFileVerifierURI):
387 print >>out, "CHK Verifier URI:"
388 print >>out, " UEB hash:", base32.b2a(u.uri_extension_hash)
389 print >>out, " size:", u.size
390 print >>out, " k/N: %d/%d" % (u.needed_shares, u.total_shares)
391 print >>out, " storage index:", storage.si_b2a(u.storage_index)
393 elif isinstance(u, uri.LiteralFileURI):
395 print >>out, "Literal File URI:"
396 print >>out, " data:", u.data
398 elif isinstance(u, uri.WriteableSSKFileURI):
400 print >>out, "SSK Writeable URI:"
401 print >>out, " writekey:", base32.b2a(u.writekey)
402 print >>out, " readkey:", base32.b2a(u.readkey)
403 print >>out, " storage index:", storage.si_b2a(u.storage_index)
404 print >>out, " fingerprint:", base32.b2a(u.fingerprint)
407 we = hashutil.ssk_write_enabler_hash(u.writekey, nodeid)
408 print >>out, " write_enabler:", base32.b2a(we)
410 _dump_secrets(u.storage_index, secret, nodeid, out)
412 elif isinstance(u, uri.ReadonlySSKFileURI):
414 print >>out, "SSK Read-only URI:"
415 print >>out, " readkey:", base32.b2a(u.readkey)
416 print >>out, " storage index:", storage.si_b2a(u.storage_index)
417 print >>out, " fingerprint:", base32.b2a(u.fingerprint)
418 elif isinstance(u, uri.SSKVerifierURI):
420 print >>out, "SSK Verifier URI:"
421 print >>out, " storage index:", storage.si_b2a(u.storage_index)
422 print >>out, " fingerprint:", base32.b2a(u.fingerprint)
424 elif isinstance(u, uri.NewDirectoryURI):
426 print >>out, "Directory Writeable URI:"
427 dump_uri_instance(u._filenode_uri, nodeid, secret, out, False)
428 elif isinstance(u, uri.ReadonlyNewDirectoryURI):
430 print >>out, "Directory Read-only URI:"
431 dump_uri_instance(u._filenode_uri, nodeid, secret, out, False)
432 elif isinstance(u, uri.NewDirectoryURIVerifier):
434 print >>out, "Directory Verifier URI:"
435 dump_uri_instance(u._filenode_uri, nodeid, secret, out, False)
437 print >>out, "unknown cap type"
439 class FindSharesOptions(usage.Options):
440 def getSynopsis(self):
441 return "Usage: tahoe debug find-shares STORAGE_INDEX NODEDIRS.."
442 def parseArgs(self, storage_index_s, *nodedirs):
443 self.si_s = storage_index_s
444 self.nodedirs = nodedirs
445 def getUsage(self, width=None):
446 t = usage.Options.getUsage(self, width)
448 Locate all shares for the given storage index. This command looks through one
449 or more node directories to find the shares. It returns a list of filenames,
450 one per line, for each share file found.
452 tahoe debug find-shares 4vozh77tsrw7mdhnj7qvp5ky74 testgrid/node-*
454 It may be useful during testing, when running a test grid in which all the
455 nodes are on a local disk. The share files thus located can be counted,
456 examined (with dump-share), or corrupted/deleted to test checker/repairer.
460 def find_shares(options):
461 """Given a storage index and a list of node directories, emit a list of
462 all matching shares to stdout, one per line. For example:
464 find-shares.py 44kai1tui348689nrw8fjegc8c ~/testnet/node-*
468 /home/warner/testnet/node-1/storage/shares/44k/44kai1tui348689nrw8fjegc8c/5
469 /home/warner/testnet/node-1/storage/shares/44k/44kai1tui348689nrw8fjegc8c/9
470 /home/warner/testnet/node-2/storage/shares/44k/44kai1tui348689nrw8fjegc8c/2
472 from allmydata import storage
475 sharedir = storage.storage_index_to_dir(storage.si_a2b(options.si_s))
476 for d in options.nodedirs:
477 d = os.path.join(os.path.expanduser(d), "storage/shares", sharedir)
478 if os.path.exists(d):
479 for shnum in os.listdir(d):
480 print >>out, os.path.join(d, shnum)
485 class CatalogSharesOptions(usage.Options):
489 def parseArgs(self, *nodedirs):
490 self.nodedirs = nodedirs
492 raise usage.UsageError("must specify at least one node directory")
494 def getSynopsis(self):
495 return "Usage: tahoe debug catalog-shares NODEDIRS.."
497 def getUsage(self, width=None):
498 t = usage.Options.getUsage(self, width)
500 Locate all shares in the given node directories, and emit a one-line summary
501 of each share. Run it like this:
503 tahoe debug catalog-shares testgrid/node-* >allshares.txt
505 The lines it emits will look like the following:
507 CHK $SI $k/$N $filesize $UEB_hash $expiration $abspath_sharefile
508 SDMF $SI $k/$N $filesize $seqnum/$roothash $expiration $abspath_sharefile
509 UNKNOWN $abspath_sharefile
511 This command can be used to build up a catalog of shares from many storage
512 servers and then sort the results to compare all shares for the same file. If
513 you see shares with the same SI but different parameters/filesize/UEB_hash,
514 then something is wrong. The misc/find-share/anomalies.py script may be
519 def call(c, *args, **kwargs):
520 # take advantage of the fact that ImmediateReadBucketProxy returns
521 # Deferreds that are already fired
524 d = defer.maybeDeferred(c, *args, **kwargs)
525 d.addCallbacks(results.append, failures.append)
527 failures[0].raiseException()
530 def describe_share(abs_sharefile, si_s, shnum_s, now, out):
531 from allmydata import uri, storage
532 from allmydata.mutable.layout import unpack_share
533 from allmydata.mutable.common import NeedMoreDataError
534 from allmydata.immutable.layout import ReadBucketProxy
535 from allmydata.util import base32
538 f = open(abs_sharefile, "rb")
541 if prefix == storage.MutableShareFile.MAGIC:
543 m = storage.MutableShareFile(abs_sharefile)
544 WE, nodeid = m._read_write_enabler_and_nodeid(f)
545 num_extra_leases = m._read_num_extra_leases(f)
546 data_length = m._read_data_length(f)
547 extra_lease_offset = m._read_extra_lease_offset(f)
548 container_size = extra_lease_offset - m.DATA_OFFSET
549 leases = list(m._enumerate_leases(f))
550 expiration_time = min( [lease[1].expiration_time
551 for lease in leases] )
552 expiration = max(0, expiration_time - now)
554 share_type = "unknown"
555 f.seek(m.DATA_OFFSET)
556 if f.read(1) == "\x00":
557 # this slot contains an SMDF share
560 if share_type == "SDMF":
561 f.seek(m.DATA_OFFSET)
562 data = f.read(min(data_length, 2000))
565 pieces = unpack_share(data)
566 except NeedMoreDataError, e:
567 # retry once with the larger size
568 size = e.needed_bytes
569 f.seek(m.DATA_OFFSET)
570 data = f.read(min(data_length, size))
571 pieces = unpack_share(data)
572 (seqnum, root_hash, IV, k, N, segsize, datalen,
573 pubkey, signature, share_hash_chain, block_hash_tree,
574 share_data, enc_privkey) = pieces
576 print >>out, "SDMF %s %d/%d %d #%d:%s %d %s" % \
577 (si_s, k, N, datalen,
578 seqnum, base32.b2a(root_hash),
579 expiration, abs_sharefile)
581 print >>out, "UNKNOWN mutable %s" % (abs_sharefile,)
583 elif struct.unpack(">L", prefix[:4]) == (1,):
586 class ImmediateReadBucketProxy(ReadBucketProxy):
587 def __init__(self, sf):
590 return "<ImmediateReadBucketProxy>"
591 def _read(self, offset, size):
592 return defer.succeed(sf.read_share_data(offset, size))
594 # use a ReadBucketProxy to parse the bucket and find the uri extension
595 sf = storage.ShareFile(abs_sharefile)
596 bp = ImmediateReadBucketProxy(sf)
599 expiration_time = min( [lease.expiration_time
600 for lease in sf.iter_leases()] )
601 expiration = max(0, expiration_time - now)
603 UEB_data = call(bp.get_uri_extension)
604 unpacked = uri.unpack_extension_readable(UEB_data)
606 k = unpacked["needed_shares"]
607 N = unpacked["total_shares"]
608 filesize = unpacked["size"]
609 ueb_hash = unpacked["UEB_hash"]
611 print >>out, "CHK %s %d/%d %d %s %d %s" % (si_s, k, N, filesize,
612 ueb_hash, expiration,
616 print >>out, "UNKNOWN really-unknown %s" % (abs_sharefile,)
620 def catalog_shares(options):
624 for d in options.nodedirs:
625 d = os.path.join(os.path.expanduser(d), "storage/shares")
627 abbrevs = os.listdir(d)
628 except EnvironmentError:
629 # ignore nodes that have storage turned off altogether
632 for abbrevdir in abbrevs:
633 if abbrevdir == "incoming":
635 abbrevdir = os.path.join(d, abbrevdir)
636 # this tool may get run against bad disks, so we can't assume
637 # that os.listdir will always succeed. Try to catalog as much
640 sharedirs = os.listdir(abbrevdir)
641 for si_s in sharedirs:
642 si_dir = os.path.join(abbrevdir, si_s)
643 catalog_shares_one_abbrevdir(si_s, si_dir, now, out,err)
645 print >>err, "Error processing %s" % abbrevdir
646 failure.Failure().printTraceback(err)
650 def catalog_shares_one_abbrevdir(si_s, si_dir, now, out, err):
652 for shnum_s in os.listdir(si_dir):
653 abs_sharefile = os.path.join(si_dir, shnum_s)
654 abs_sharefile = os.path.abspath(abs_sharefile)
655 assert os.path.isfile(abs_sharefile)
657 describe_share(abs_sharefile, si_s, shnum_s, now,
660 print >>err, "Error processing %s" % abs_sharefile
661 failure.Failure().printTraceback(err)
663 print >>err, "Error processing %s" % si_dir
664 failure.Failure().printTraceback(err)
666 class CorruptShareOptions(usage.Options):
667 def getSynopsis(self):
668 return "Usage: tahoe debug corrupt-share SHARE_FILENAME"
671 ["offset", "o", "block-random", "Which bit to flip."],
674 def getUsage(self, width=None):
675 t = usage.Options.getUsage(self, width)
677 Corrupt the given share by flipping a bit. This will cause a
678 verifying/downloading client to log an integrity-check failure incident, and
679 downloads will proceed with a different share.
681 The --offset parameter controls which bit should be flipped. The default is
682 to flip a single random bit of the block data.
684 tahoe debug corrupt-share testgrid/node-3/storage/shares/4v/4vozh77tsrw7mdhnj7qvp5ky74/0
686 Obviously, this command should not be used in normal operation.
689 def parseArgs(self, filename):
690 self['filename'] = filename
692 def corrupt_share(options):
694 from allmydata import storage
695 from allmydata.mutable.layout import unpack_header
696 from allmydata.immutable.layout import ReadBucketProxy
698 fn = options['filename']
699 assert options["offset"] == "block-random", "other offsets not implemented"
700 # first, what kind of share is it?
702 def flip_bit(start, end):
703 offset = random.randrange(start, end)
704 bit = random.randrange(0, 8)
705 print >>out, "[%d..%d): %d.b%d" % (start, end, offset, bit)
709 d = chr(ord(d) ^ 0x01)
717 if prefix == storage.MutableShareFile.MAGIC:
719 m = storage.MutableShareFile(fn)
721 f.seek(m.DATA_OFFSET)
723 # make sure this slot contains an SMDF share
724 assert data[0] == "\x00", "non-SDMF mutable shares not supported"
727 (version, ig_seqnum, ig_roothash, ig_IV, ig_k, ig_N, ig_segsize,
728 ig_datalen, offsets) = unpack_header(data)
730 assert version == 0, "we only handle v0 SDMF files"
731 start = m.DATA_OFFSET + offsets["share_data"]
732 end = m.DATA_OFFSET + offsets["enc_privkey"]
735 # otherwise assume it's immutable
736 f = storage.ShareFile(fn)
737 bp = ReadBucketProxy(None)
738 offsets = bp._parse_offsets(f.read_share_data(0, 0x24))
739 start = f._data_offset + offsets["data"]
740 end = f._data_offset + offsets["plaintext_hash_tree"]
745 class ReplOptions(usage.Options):
750 return code.interact()
753 class DebugCommand(usage.Options):
755 ["dump-share", None, DumpOptions,
756 "Unpack and display the contents of a share (uri_extension and leases)."],
757 ["dump-cap", None, DumpCapOptions, "Unpack a read-cap or write-cap"],
758 ["find-shares", None, FindSharesOptions, "Locate sharefiles in node dirs"],
759 ["catalog-shares", None, CatalogSharesOptions, "Describe shares in node dirs"],
760 ["corrupt-share", None, CorruptShareOptions, "Corrupt a share"],
761 ["repl", None, ReplOptions, "Open a python interpreter"],
763 def postOptions(self):
764 if not hasattr(self, 'subOptions'):
765 raise usage.UsageError("must specify a subcommand")
766 def getSynopsis(self):
767 return "Usage: tahoe debug SUBCOMMAND"
768 def getUsage(self, width=None):
769 #t = usage.Options.getUsage(self, width)
772 tahoe debug dump-share Unpack and display the contents of a share
773 tahoe debug dump-cap Unpack a read-cap or write-cap
774 tahoe debug find-shares Locate sharefiles in node directories
775 tahoe debug catalog-shares Describe all shares in node dirs
776 tahoe debug corrupt-share Corrupt a share by flipping a bit.
778 Please run e.g. 'tahoe debug dump-share --help' for more details on each
784 "dump-share": dump_share,
785 "dump-cap": dump_cap,
786 "find-shares": find_shares,
787 "catalog-shares": catalog_shares,
788 "corrupt-share": corrupt_share,
793 def do_debug(options):
794 so = options.subOptions
795 so.stdout = options.stdout
796 so.stderr = options.stderr
797 f = subDispatch[options.subCommand]
802 ["debug", None, DebugCommand, "debug subcommands: use 'tahoe debug' for a list"],