2 # do not import any allmydata modules at this level. Do that from inside
3 # individual functions instead.
4 import struct, time, os
5 from twisted.python import usage, failure
6 from twisted.internet import defer
8 class DumpOptions(usage.Options):
10 return "Usage: tahoe debug dump-share SHARE_FILENAME"
13 ["offsets", None, "Display a table of section offsets"],
16 def getUsage(self, width=None):
17 t = usage.Options.getUsage(self, width)
19 Print lots of information about the given share, by parsing the share's
20 contents. This includes share type, lease information, encoding parameters,
21 hash-tree roots, public keys, and segment sizes. This command also emits a
22 verify-cap for the file that uses the share.
24 tahoe debug dump-share testgrid/node-3/storage/shares/4v/4vozh77tsrw7mdhnj7qvp5ky74/0
29 def parseArgs(self, filename):
30 self['filename'] = filename
32 def dump_share(options):
33 from allmydata import storage
37 # check the version, to see if we have a mutable or immutable share
38 print >>out, "share filename: %s" % options['filename']
40 f = open(options['filename'], "rb")
43 if prefix == storage.MutableShareFile.MAGIC:
44 return dump_mutable_share(options)
45 # otherwise assume it's immutable
46 return dump_immutable_share(options)
48 def dump_immutable_share(options):
49 from allmydata import uri, storage
50 from allmydata.util import base32
51 from allmydata.immutable.layout import ReadBucketProxy
54 f = storage.ShareFile(options['filename'])
55 # use a ReadBucketProxy to parse the bucket and find the uri extension
56 bp = ReadBucketProxy(None, '', '')
57 offsets = bp._parse_offsets(f.read_share_data(0, 0x44))
58 print >>out, "%20s: %d" % ("version", bp._version)
59 seek = offsets['uri_extension']
60 length = struct.unpack(bp._fieldstruct,
61 f.read_share_data(seek, bp._fieldsize))[0]
63 UEB_data = f.read_share_data(seek, length)
65 unpacked = uri.unpack_extension_readable(UEB_data)
66 keys1 = ("size", "num_segments", "segment_size",
67 "needed_shares", "total_shares")
68 keys2 = ("codec_name", "codec_params", "tail_codec_params")
69 keys3 = ("plaintext_hash", "plaintext_root_hash",
70 "crypttext_hash", "crypttext_root_hash",
71 "share_root_hash", "UEB_hash")
72 display_keys = {"size": "file_size"}
75 dk = display_keys.get(k, k)
76 print >>out, "%20s: %s" % (dk, unpacked[k])
80 dk = display_keys.get(k, k)
81 print >>out, "%20s: %s" % (dk, unpacked[k])
85 dk = display_keys.get(k, k)
86 print >>out, "%20s: %s" % (dk, unpacked[k])
88 leftover = set(unpacked.keys()) - set(keys1 + keys2 + keys3)
91 print >>out, "LEFTOVER:"
92 for k in sorted(leftover):
93 print >>out, "%20s: %s" % (k, unpacked[k])
95 # the storage index isn't stored in the share itself, so we depend upon
96 # knowing the parent directory name to get it
97 pieces = options['filename'].split(os.sep)
98 if len(pieces) >= 2 and base32.could_be_base32_encoded(pieces[-2]):
99 storage_index = base32.a2b(pieces[-2])
100 uri_extension_hash = base32.a2b(unpacked["UEB_hash"])
101 u = uri.CHKFileVerifierURI(storage_index, uri_extension_hash,
102 unpacked["needed_shares"],
103 unpacked["total_shares"], unpacked["size"])
104 verify_cap = u.to_string()
105 print >>out, "%20s: %s" % ("verify-cap", verify_cap)
108 sizes['data'] = (offsets['plaintext_hash_tree'] -
110 sizes['validation'] = (offsets['uri_extension'] -
111 offsets['plaintext_hash_tree'])
112 sizes['uri-extension'] = len(UEB_data)
114 print >>out, " Size of data within the share:"
115 for k in sorted(sizes):
116 print >>out, "%20s: %s" % (k, sizes[k])
118 if options['offsets']:
120 print >>out, " Section Offsets:"
121 print >>out, "%20s: %s" % ("share data", f._data_offset)
122 for k in ["data", "plaintext_hash_tree", "crypttext_hash_tree",
123 "block_hashes", "share_hashes", "uri_extension"]:
124 name = {"data": "block data"}.get(k,k)
125 offset = f._data_offset + offsets[k]
126 print >>out, " %20s: %s (0x%x)" % (name, offset, offset)
127 print >>out, "%20s: %s" % ("leases", f._lease_offset)
130 # display lease information too
132 leases = list(f.iter_leases())
134 for i,lease in enumerate(leases):
135 when = format_expiration_time(lease.expiration_time)
136 print >>out, " Lease #%d: owner=%d, expire in %s" \
137 % (i, lease.owner_num, when)
139 print >>out, " No leases."
144 def format_expiration_time(expiration_time):
146 remains = expiration_time - now
147 when = "%ds" % remains
148 if remains > 24*3600:
149 when += " (%d days)" % (remains / (24*3600))
151 when += " (%d hours)" % (remains / 3600)
155 def dump_mutable_share(options):
156 from allmydata import storage
157 from allmydata.util import base32, idlib
159 m = storage.MutableShareFile(options['filename'])
160 f = open(options['filename'], "rb")
161 WE, nodeid = m._read_write_enabler_and_nodeid(f)
162 num_extra_leases = m._read_num_extra_leases(f)
163 data_length = m._read_data_length(f)
164 extra_lease_offset = m._read_extra_lease_offset(f)
165 container_size = extra_lease_offset - m.DATA_OFFSET
166 leases = list(m._enumerate_leases(f))
168 share_type = "unknown"
169 f.seek(m.DATA_OFFSET)
170 if f.read(1) == "\x00":
171 # this slot contains an SMDF share
176 print >>out, "Mutable slot found:"
177 print >>out, " share_type: %s" % share_type
178 print >>out, " write_enabler: %s" % base32.b2a(WE)
179 print >>out, " WE for nodeid: %s" % idlib.nodeid_b2a(nodeid)
180 print >>out, " num_extra_leases: %d" % num_extra_leases
181 print >>out, " container_size: %d" % container_size
182 print >>out, " data_length: %d" % data_length
184 for (leasenum, lease) in leases:
186 print >>out, " Lease #%d:" % leasenum
187 print >>out, " ownerid: %d" % lease.owner_num
188 when = format_expiration_time(lease.expiration_time)
189 print >>out, " expires in %s" % when
190 print >>out, " renew_secret: %s" % base32.b2a(lease.renew_secret)
191 print >>out, " cancel_secret: %s" % base32.b2a(lease.cancel_secret)
192 print >>out, " secrets are for nodeid: %s" % idlib.nodeid_b2a(lease.nodeid)
194 print >>out, "No leases."
197 if share_type == "SDMF":
198 dump_SDMF_share(m, data_length, options)
202 def dump_SDMF_share(m, length, options):
203 from allmydata.mutable.layout import unpack_share, unpack_header
204 from allmydata.mutable.common import NeedMoreDataError
205 from allmydata.util import base32, hashutil
206 from allmydata.uri import SSKVerifierURI
208 offset = m.DATA_OFFSET
212 f = open(options['filename'], "rb")
214 data = f.read(min(length, 2000))
218 pieces = unpack_share(data)
219 except NeedMoreDataError, e:
220 # retry once with the larger size
221 size = e.needed_bytes
222 f = open(options['filename'], "rb")
224 data = f.read(min(length, size))
226 pieces = unpack_share(data)
228 (seqnum, root_hash, IV, k, N, segsize, datalen,
229 pubkey, signature, share_hash_chain, block_hash_tree,
230 share_data, enc_privkey) = pieces
231 (ig_version, ig_seqnum, ig_roothash, ig_IV, ig_k, ig_N, ig_segsize,
232 ig_datalen, offsets) = unpack_header(data)
234 print >>out, " SDMF contents:"
235 print >>out, " seqnum: %d" % seqnum
236 print >>out, " root_hash: %s" % base32.b2a(root_hash)
237 print >>out, " IV: %s" % base32.b2a(IV)
238 print >>out, " required_shares: %d" % k
239 print >>out, " total_shares: %d" % N
240 print >>out, " segsize: %d" % segsize
241 print >>out, " datalen: %d" % datalen
242 print >>out, " enc_privkey: %d bytes" % len(enc_privkey)
243 print >>out, " pubkey: %d bytes" % len(pubkey)
244 print >>out, " signature: %d bytes" % len(signature)
245 share_hash_ids = ",".join(sorted([str(hid)
246 for hid in share_hash_chain.keys()]))
247 print >>out, " share_hash_chain: %s" % share_hash_ids
248 print >>out, " block_hash_tree: %d nodes" % len(block_hash_tree)
250 # the storage index isn't stored in the share itself, so we depend upon
251 # knowing the parent directory name to get it
252 pieces = options['filename'].split(os.sep)
253 if len(pieces) >= 2 and base32.could_be_base32_encoded(pieces[-2]):
254 storage_index = base32.a2b(pieces[-2])
255 fingerprint = hashutil.ssk_pubkey_fingerprint_hash(pubkey)
256 u = SSKVerifierURI(storage_index, fingerprint)
257 verify_cap = u.to_string()
258 print >>out, " verify-cap:", verify_cap
260 if options['offsets']:
261 # NOTE: this offset-calculation code is fragile, and needs to be
262 # merged with MutableShareFile's internals.
264 print >>out, " Section Offsets:"
265 def printoffset(name, value, shift=0):
266 print >>out, "%s%20s: %s (0x%x)" % (" "*shift, name, value, value)
267 printoffset("first lease", m.HEADER_SIZE)
268 printoffset("share data", m.DATA_OFFSET)
269 o_seqnum = m.DATA_OFFSET + struct.calcsize(">B")
270 printoffset("seqnum", o_seqnum, 2)
271 o_root_hash = m.DATA_OFFSET + struct.calcsize(">BQ")
272 printoffset("root_hash", o_root_hash, 2)
273 for k in ["signature", "share_hash_chain", "block_hash_tree",
275 "enc_privkey", "EOF"]:
276 name = {"share_data": "block data",
277 "EOF": "end of share data"}.get(k,k)
278 offset = m.DATA_OFFSET + offsets[k]
279 printoffset(name, offset, 2)
280 f = open(options['filename'], "rb")
281 printoffset("extra leases", m._read_extra_lease_offset(f) + 4)
288 class DumpCapOptions(usage.Options):
289 def getSynopsis(self):
290 return "Usage: tahoe debug dump-cap [options] FILECAP"
293 None, "storage server nodeid (ascii), to construct WE and secrets."],
294 ["client-secret", "c", None,
295 "client's base secret (ascii), to construct secrets"],
296 ["client-dir", "d", None,
297 "client's base directory, from which a -c secret will be read"],
299 def parseArgs(self, cap):
302 def getUsage(self, width=None):
303 t = usage.Options.getUsage(self, width)
305 Print information about the given cap-string (aka: URI, file-cap, dir-cap,
306 read-cap, write-cap). The URI string is parsed and unpacked. This prints the
307 type of the cap, its storage index, and any derived keys.
309 tahoe debug dump-cap URI:SSK-Verifier:4vozh77tsrw7mdhnj7qvp5ky74:q7f3dwz76sjys4kqfdt3ocur2pay3a6rftnkqmi2uxu3vqsdsofq
311 This may be useful to determine if a read-cap and a write-cap refer to the
312 same time, or to extract the storage-index from a file-cap (to then use with
315 If additional information is provided (storage server nodeid and/or client
316 base secret), this command will compute the shared secrets used for the
317 write-enabler and for lease-renewal.
322 def dump_cap(options):
323 from allmydata import uri
324 from allmydata.util import base32
325 from base64 import b32decode
326 import urlparse, urllib
331 if options['nodeid']:
332 nodeid = b32decode(options['nodeid'].upper())
334 if options['client-secret']:
335 secret = base32.a2b(options['client-secret'])
336 elif options['client-dir']:
337 secretfile = os.path.join(options['client-dir'], "private", "secret")
339 secret = base32.a2b(open(secretfile, "r").read().strip())
340 except EnvironmentError:
343 if cap.startswith("http"):
344 scheme, netloc, path, params, query, fragment = urlparse.urlparse(cap)
345 assert path.startswith("/uri/")
346 cap = urllib.unquote(path[len("/uri/"):])
348 u = uri.from_string(cap)
351 dump_uri_instance(u, nodeid, secret, out)
353 def _dump_secrets(storage_index, secret, nodeid, out):
354 from allmydata.util import hashutil
355 from allmydata.util import base32
358 crs = hashutil.my_renewal_secret_hash(secret)
359 print >>out, " client renewal secret:", base32.b2a(crs)
360 frs = hashutil.file_renewal_secret_hash(crs, storage_index)
361 print >>out, " file renewal secret:", base32.b2a(frs)
363 renew = hashutil.bucket_renewal_secret_hash(frs, nodeid)
364 print >>out, " lease renewal secret:", base32.b2a(renew)
365 ccs = hashutil.my_cancel_secret_hash(secret)
366 print >>out, " client cancel secret:", base32.b2a(ccs)
367 fcs = hashutil.file_cancel_secret_hash(ccs, storage_index)
368 print >>out, " file cancel secret:", base32.b2a(fcs)
370 cancel = hashutil.bucket_cancel_secret_hash(fcs, nodeid)
371 print >>out, " lease cancel secret:", base32.b2a(cancel)
373 def dump_uri_instance(u, nodeid, secret, out, show_header=True):
374 from allmydata import storage, uri
375 from allmydata.util import base32, hashutil
377 if isinstance(u, uri.CHKFileURI):
379 print >>out, "CHK File:"
380 print >>out, " key:", base32.b2a(u.key)
381 print >>out, " UEB hash:", base32.b2a(u.uri_extension_hash)
382 print >>out, " size:", u.size
383 print >>out, " k/N: %d/%d" % (u.needed_shares, u.total_shares)
384 print >>out, " storage index:", storage.si_b2a(u.storage_index)
385 _dump_secrets(u.storage_index, secret, nodeid, out)
386 elif isinstance(u, uri.CHKFileVerifierURI):
388 print >>out, "CHK Verifier URI:"
389 print >>out, " UEB hash:", base32.b2a(u.uri_extension_hash)
390 print >>out, " size:", u.size
391 print >>out, " k/N: %d/%d" % (u.needed_shares, u.total_shares)
392 print >>out, " storage index:", storage.si_b2a(u.storage_index)
394 elif isinstance(u, uri.LiteralFileURI):
396 print >>out, "Literal File URI:"
397 print >>out, " data:", u.data
399 elif isinstance(u, uri.WriteableSSKFileURI):
401 print >>out, "SSK Writeable URI:"
402 print >>out, " writekey:", base32.b2a(u.writekey)
403 print >>out, " readkey:", base32.b2a(u.readkey)
404 print >>out, " storage index:", storage.si_b2a(u.storage_index)
405 print >>out, " fingerprint:", base32.b2a(u.fingerprint)
408 we = hashutil.ssk_write_enabler_hash(u.writekey, nodeid)
409 print >>out, " write_enabler:", base32.b2a(we)
411 _dump_secrets(u.storage_index, secret, nodeid, out)
413 elif isinstance(u, uri.ReadonlySSKFileURI):
415 print >>out, "SSK Read-only URI:"
416 print >>out, " readkey:", base32.b2a(u.readkey)
417 print >>out, " storage index:", storage.si_b2a(u.storage_index)
418 print >>out, " fingerprint:", base32.b2a(u.fingerprint)
419 elif isinstance(u, uri.SSKVerifierURI):
421 print >>out, "SSK Verifier URI:"
422 print >>out, " storage index:", storage.si_b2a(u.storage_index)
423 print >>out, " fingerprint:", base32.b2a(u.fingerprint)
425 elif isinstance(u, uri.NewDirectoryURI):
427 print >>out, "Directory Writeable URI:"
428 dump_uri_instance(u._filenode_uri, nodeid, secret, out, False)
429 elif isinstance(u, uri.ReadonlyNewDirectoryURI):
431 print >>out, "Directory Read-only URI:"
432 dump_uri_instance(u._filenode_uri, nodeid, secret, out, False)
433 elif isinstance(u, uri.NewDirectoryURIVerifier):
435 print >>out, "Directory Verifier URI:"
436 dump_uri_instance(u._filenode_uri, nodeid, secret, out, False)
438 print >>out, "unknown cap type"
440 class FindSharesOptions(usage.Options):
441 def getSynopsis(self):
442 return "Usage: tahoe debug find-shares STORAGE_INDEX NODEDIRS.."
443 def parseArgs(self, storage_index_s, *nodedirs):
444 self.si_s = storage_index_s
445 self.nodedirs = nodedirs
446 def getUsage(self, width=None):
447 t = usage.Options.getUsage(self, width)
449 Locate all shares for the given storage index. This command looks through one
450 or more node directories to find the shares. It returns a list of filenames,
451 one per line, for each share file found.
453 tahoe debug find-shares 4vozh77tsrw7mdhnj7qvp5ky74 testgrid/node-*
455 It may be useful during testing, when running a test grid in which all the
456 nodes are on a local disk. The share files thus located can be counted,
457 examined (with dump-share), or corrupted/deleted to test checker/repairer.
461 def find_shares(options):
462 """Given a storage index and a list of node directories, emit a list of
463 all matching shares to stdout, one per line. For example:
465 find-shares.py 44kai1tui348689nrw8fjegc8c ~/testnet/node-*
469 /home/warner/testnet/node-1/storage/shares/44k/44kai1tui348689nrw8fjegc8c/5
470 /home/warner/testnet/node-1/storage/shares/44k/44kai1tui348689nrw8fjegc8c/9
471 /home/warner/testnet/node-2/storage/shares/44k/44kai1tui348689nrw8fjegc8c/2
473 from allmydata import storage
476 sharedir = storage.storage_index_to_dir(storage.si_a2b(options.si_s))
477 for d in options.nodedirs:
478 d = os.path.join(os.path.expanduser(d), "storage/shares", sharedir)
479 if os.path.exists(d):
480 for shnum in os.listdir(d):
481 print >>out, os.path.join(d, shnum)
486 class CatalogSharesOptions(usage.Options):
490 def parseArgs(self, *nodedirs):
491 self.nodedirs = nodedirs
493 raise usage.UsageError("must specify at least one node directory")
495 def getSynopsis(self):
496 return "Usage: tahoe debug catalog-shares NODEDIRS.."
498 def getUsage(self, width=None):
499 t = usage.Options.getUsage(self, width)
501 Locate all shares in the given node directories, and emit a one-line summary
502 of each share. Run it like this:
504 tahoe debug catalog-shares testgrid/node-* >allshares.txt
506 The lines it emits will look like the following:
508 CHK $SI $k/$N $filesize $UEB_hash $expiration $abspath_sharefile
509 SDMF $SI $k/$N $filesize $seqnum/$roothash $expiration $abspath_sharefile
510 UNKNOWN $abspath_sharefile
512 This command can be used to build up a catalog of shares from many storage
513 servers and then sort the results to compare all shares for the same file. If
514 you see shares with the same SI but different parameters/filesize/UEB_hash,
515 then something is wrong. The misc/find-share/anomalies.py script may be
520 def call(c, *args, **kwargs):
521 # take advantage of the fact that ImmediateReadBucketProxy returns
522 # Deferreds that are already fired
525 d = defer.maybeDeferred(c, *args, **kwargs)
526 d.addCallbacks(results.append, failures.append)
528 failures[0].raiseException()
531 def describe_share(abs_sharefile, si_s, shnum_s, now, out):
532 from allmydata import uri, storage
533 from allmydata.mutable.layout import unpack_share
534 from allmydata.mutable.common import NeedMoreDataError
535 from allmydata.immutable.layout import ReadBucketProxy
536 from allmydata.util import base32
539 f = open(abs_sharefile, "rb")
542 if prefix == storage.MutableShareFile.MAGIC:
544 m = storage.MutableShareFile(abs_sharefile)
545 WE, nodeid = m._read_write_enabler_and_nodeid(f)
546 num_extra_leases = m._read_num_extra_leases(f)
547 data_length = m._read_data_length(f)
548 extra_lease_offset = m._read_extra_lease_offset(f)
549 container_size = extra_lease_offset - m.DATA_OFFSET
550 leases = list(m._enumerate_leases(f))
551 expiration_time = min( [lease[1].expiration_time
552 for lease in leases] )
553 expiration = max(0, expiration_time - now)
555 share_type = "unknown"
556 f.seek(m.DATA_OFFSET)
557 if f.read(1) == "\x00":
558 # this slot contains an SMDF share
561 if share_type == "SDMF":
562 f.seek(m.DATA_OFFSET)
563 data = f.read(min(data_length, 2000))
566 pieces = unpack_share(data)
567 except NeedMoreDataError, e:
568 # retry once with the larger size
569 size = e.needed_bytes
570 f.seek(m.DATA_OFFSET)
571 data = f.read(min(data_length, size))
572 pieces = unpack_share(data)
573 (seqnum, root_hash, IV, k, N, segsize, datalen,
574 pubkey, signature, share_hash_chain, block_hash_tree,
575 share_data, enc_privkey) = pieces
577 print >>out, "SDMF %s %d/%d %d #%d:%s %d %s" % \
578 (si_s, k, N, datalen,
579 seqnum, base32.b2a(root_hash),
580 expiration, abs_sharefile)
582 print >>out, "UNKNOWN mutable %s" % (abs_sharefile,)
584 elif struct.unpack(">L", prefix[:4]) == (1,):
587 class ImmediateReadBucketProxy(ReadBucketProxy):
588 def __init__(self, sf):
590 ReadBucketProxy.__init__(self, "", "", "")
592 return "<ImmediateReadBucketProxy>"
593 def _read(self, offset, size):
594 return defer.succeed(sf.read_share_data(offset, size))
596 # use a ReadBucketProxy to parse the bucket and find the uri extension
597 sf = storage.ShareFile(abs_sharefile)
598 bp = ImmediateReadBucketProxy(sf)
600 expiration_time = min( [lease.expiration_time
601 for lease in sf.iter_leases()] )
602 expiration = max(0, expiration_time - now)
604 UEB_data = call(bp.get_uri_extension)
605 unpacked = uri.unpack_extension_readable(UEB_data)
607 k = unpacked["needed_shares"]
608 N = unpacked["total_shares"]
609 filesize = unpacked["size"]
610 ueb_hash = unpacked["UEB_hash"]
612 print >>out, "CHK %s %d/%d %d %s %d %s" % (si_s, k, N, filesize,
613 ueb_hash, expiration,
617 print >>out, "UNKNOWN really-unknown %s" % (abs_sharefile,)
621 def catalog_shares(options):
625 for d in options.nodedirs:
626 d = os.path.join(os.path.expanduser(d), "storage/shares")
628 abbrevs = os.listdir(d)
629 except EnvironmentError:
630 # ignore nodes that have storage turned off altogether
633 for abbrevdir in abbrevs:
634 if abbrevdir == "incoming":
636 abbrevdir = os.path.join(d, abbrevdir)
637 # this tool may get run against bad disks, so we can't assume
638 # that os.listdir will always succeed. Try to catalog as much
641 sharedirs = os.listdir(abbrevdir)
642 for si_s in sharedirs:
643 si_dir = os.path.join(abbrevdir, si_s)
644 catalog_shares_one_abbrevdir(si_s, si_dir, now, out,err)
646 print >>err, "Error processing %s" % abbrevdir
647 failure.Failure().printTraceback(err)
651 def catalog_shares_one_abbrevdir(si_s, si_dir, now, out, err):
653 for shnum_s in os.listdir(si_dir):
654 abs_sharefile = os.path.join(si_dir, shnum_s)
655 abs_sharefile = os.path.abspath(abs_sharefile)
656 assert os.path.isfile(abs_sharefile)
658 describe_share(abs_sharefile, si_s, shnum_s, now,
661 print >>err, "Error processing %s" % abs_sharefile
662 failure.Failure().printTraceback(err)
664 print >>err, "Error processing %s" % si_dir
665 failure.Failure().printTraceback(err)
667 class CorruptShareOptions(usage.Options):
668 def getSynopsis(self):
669 return "Usage: tahoe debug corrupt-share SHARE_FILENAME"
672 ["offset", "o", "block-random", "Which bit to flip."],
675 def getUsage(self, width=None):
676 t = usage.Options.getUsage(self, width)
678 Corrupt the given share by flipping a bit. This will cause a
679 verifying/downloading client to log an integrity-check failure incident, and
680 downloads will proceed with a different share.
682 The --offset parameter controls which bit should be flipped. The default is
683 to flip a single random bit of the block data.
685 tahoe debug corrupt-share testgrid/node-3/storage/shares/4v/4vozh77tsrw7mdhnj7qvp5ky74/0
687 Obviously, this command should not be used in normal operation.
690 def parseArgs(self, filename):
691 self['filename'] = filename
693 def corrupt_share(options):
695 from allmydata import storage
696 from allmydata.mutable.layout import unpack_header
697 from allmydata.immutable.layout import ReadBucketProxy
699 fn = options['filename']
700 assert options["offset"] == "block-random", "other offsets not implemented"
701 # first, what kind of share is it?
703 def flip_bit(start, end):
704 offset = random.randrange(start, end)
705 bit = random.randrange(0, 8)
706 print >>out, "[%d..%d): %d.b%d" % (start, end, offset, bit)
710 d = chr(ord(d) ^ 0x01)
718 if prefix == storage.MutableShareFile.MAGIC:
720 m = storage.MutableShareFile(fn)
722 f.seek(m.DATA_OFFSET)
724 # make sure this slot contains an SMDF share
725 assert data[0] == "\x00", "non-SDMF mutable shares not supported"
728 (version, ig_seqnum, ig_roothash, ig_IV, ig_k, ig_N, ig_segsize,
729 ig_datalen, offsets) = unpack_header(data)
731 assert version == 0, "we only handle v0 SDMF files"
732 start = m.DATA_OFFSET + offsets["share_data"]
733 end = m.DATA_OFFSET + offsets["enc_privkey"]
736 # otherwise assume it's immutable
737 f = storage.ShareFile(fn)
738 bp = ReadBucketProxy(None, '', '')
739 offsets = bp._parse_offsets(f.read_share_data(0, 0x24))
740 start = f._data_offset + offsets["data"]
741 end = f._data_offset + offsets["plaintext_hash_tree"]
746 class ReplOptions(usage.Options):
751 return code.interact()
754 class DebugCommand(usage.Options):
756 ["dump-share", None, DumpOptions,
757 "Unpack and display the contents of a share (uri_extension and leases)."],
758 ["dump-cap", None, DumpCapOptions, "Unpack a read-cap or write-cap"],
759 ["find-shares", None, FindSharesOptions, "Locate sharefiles in node dirs"],
760 ["catalog-shares", None, CatalogSharesOptions, "Describe shares in node dirs"],
761 ["corrupt-share", None, CorruptShareOptions, "Corrupt a share"],
762 ["repl", None, ReplOptions, "Open a python interpreter"],
764 def postOptions(self):
765 if not hasattr(self, 'subOptions'):
766 raise usage.UsageError("must specify a subcommand")
767 def getSynopsis(self):
768 return "Usage: tahoe debug SUBCOMMAND"
769 def getUsage(self, width=None):
770 #t = usage.Options.getUsage(self, width)
773 tahoe debug dump-share Unpack and display the contents of a share
774 tahoe debug dump-cap Unpack a read-cap or write-cap
775 tahoe debug find-shares Locate sharefiles in node directories
776 tahoe debug catalog-shares Describe all shares in node dirs
777 tahoe debug corrupt-share Corrupt a share by flipping a bit.
779 Please run e.g. 'tahoe debug dump-share --help' for more details on each
785 "dump-share": dump_share,
786 "dump-cap": dump_cap,
787 "find-shares": find_shares,
788 "catalog-shares": catalog_shares,
789 "corrupt-share": corrupt_share,
794 def do_debug(options):
795 so = options.subOptions
796 so.stdout = options.stdout
797 so.stderr = options.stderr
798 f = subDispatch[options.subCommand]
803 ["debug", None, DebugCommand, "debug subcommands: use 'tahoe debug' for a list"],