2 # do not import any allmydata modules at this level. Do that from inside
3 # individual functions instead.
4 import struct, time, os
5 from twisted.python import usage, failure
6 from twisted.internet import defer
9 class DumpOptions(usage.Options):
10 def getSynopsis(self):
11 return "Usage: tahoe debug dump-share SHARE_FILENAME"
14 ["offsets", None, "Display a table of section offsets"],
15 ["leases-only", None, "Dump leases but not CHK contents"],
18 def getUsage(self, width=None):
19 t = usage.Options.getUsage(self, width)
21 Print lots of information about the given share, by parsing the share's
22 contents. This includes share type, lease information, encoding parameters,
23 hash-tree roots, public keys, and segment sizes. This command also emits a
24 verify-cap for the file that uses the share.
26 tahoe debug dump-share testgrid/node-3/storage/shares/4v/4vozh77tsrw7mdhnj7qvp5ky74/0
31 def parseArgs(self, filename):
32 from allmydata.util.encodingutil import argv_to_abspath
33 self['filename'] = argv_to_abspath(filename)
35 def dump_share(options):
36 from allmydata.storage.mutable import MutableShareFile
37 from allmydata.util.encodingutil import quote_output
41 # check the version, to see if we have a mutable or immutable share
42 print >>out, "share filename: %s" % quote_output(options['filename'])
44 f = open(options['filename'], "rb")
47 if prefix == MutableShareFile.MAGIC:
48 return dump_mutable_share(options)
49 # otherwise assume it's immutable
50 return dump_immutable_share(options)
52 def dump_immutable_share(options):
53 from allmydata.storage.immutable import ShareFile
56 f = ShareFile(options['filename'])
57 if not options["leases-only"]:
58 dump_immutable_chk_share(f, out, options)
59 dump_immutable_lease_info(f, out)
63 def dump_immutable_chk_share(f, out, options):
64 from allmydata import uri
65 from allmydata.util import base32
66 from allmydata.immutable.layout import ReadBucketProxy
67 from allmydata.util.encodingutil import quote_output, to_str
69 # use a ReadBucketProxy to parse the bucket and find the uri extension
70 bp = ReadBucketProxy(None, '', '')
71 offsets = bp._parse_offsets(f.read_share_data(0, 0x44))
72 print >>out, "%20s: %d" % ("version", bp._version)
73 seek = offsets['uri_extension']
74 length = struct.unpack(bp._fieldstruct,
75 f.read_share_data(seek, bp._fieldsize))[0]
77 UEB_data = f.read_share_data(seek, length)
79 unpacked = uri.unpack_extension_readable(UEB_data)
80 keys1 = ("size", "num_segments", "segment_size",
81 "needed_shares", "total_shares")
82 keys2 = ("codec_name", "codec_params", "tail_codec_params")
83 keys3 = ("plaintext_hash", "plaintext_root_hash",
84 "crypttext_hash", "crypttext_root_hash",
85 "share_root_hash", "UEB_hash")
86 display_keys = {"size": "file_size"}
89 dk = display_keys.get(k, k)
90 print >>out, "%20s: %s" % (dk, unpacked[k])
94 dk = display_keys.get(k, k)
95 print >>out, "%20s: %s" % (dk, unpacked[k])
99 dk = display_keys.get(k, k)
100 print >>out, "%20s: %s" % (dk, unpacked[k])
102 leftover = set(unpacked.keys()) - set(keys1 + keys2 + keys3)
105 print >>out, "LEFTOVER:"
106 for k in sorted(leftover):
107 print >>out, "%20s: %s" % (k, unpacked[k])
109 # the storage index isn't stored in the share itself, so we depend upon
110 # knowing the parent directory name to get it
111 pieces = options['filename'].split(os.sep)
113 piece = to_str(pieces[-2])
114 if base32.could_be_base32_encoded(piece):
115 storage_index = base32.a2b(piece)
116 uri_extension_hash = base32.a2b(unpacked["UEB_hash"])
117 u = uri.CHKFileVerifierURI(storage_index, uri_extension_hash,
118 unpacked["needed_shares"],
119 unpacked["total_shares"], unpacked["size"])
120 verify_cap = u.to_string()
121 print >>out, "%20s: %s" % ("verify-cap", quote_output(verify_cap, quotemarks=False))
124 sizes['data'] = (offsets['plaintext_hash_tree'] -
126 sizes['validation'] = (offsets['uri_extension'] -
127 offsets['plaintext_hash_tree'])
128 sizes['uri-extension'] = len(UEB_data)
130 print >>out, " Size of data within the share:"
131 for k in sorted(sizes):
132 print >>out, "%20s: %s" % (k, sizes[k])
134 if options['offsets']:
136 print >>out, " Section Offsets:"
137 print >>out, "%20s: %s" % ("share data", f._data_offset)
138 for k in ["data", "plaintext_hash_tree", "crypttext_hash_tree",
139 "block_hashes", "share_hashes", "uri_extension"]:
140 name = {"data": "block data"}.get(k,k)
141 offset = f._data_offset + offsets[k]
142 print >>out, " %20s: %s (0x%x)" % (name, offset, offset)
143 print >>out, "%20s: %s" % ("leases", f._lease_offset)
145 def dump_immutable_lease_info(f, out):
146 # display lease information too
148 leases = list(f.get_leases())
150 for i,lease in enumerate(leases):
151 when = format_expiration_time(lease.expiration_time)
152 print >>out, " Lease #%d: owner=%d, expire in %s" \
153 % (i, lease.owner_num, when)
155 print >>out, " No leases."
157 def format_expiration_time(expiration_time):
159 remains = expiration_time - now
160 when = "%ds" % remains
161 if remains > 24*3600:
162 when += " (%d days)" % (remains / (24*3600))
164 when += " (%d hours)" % (remains / 3600)
168 def dump_mutable_share(options):
169 from allmydata.storage.mutable import MutableShareFile
170 from allmydata.util import base32, idlib
172 m = MutableShareFile(options['filename'])
173 f = open(options['filename'], "rb")
174 WE, nodeid = m._read_write_enabler_and_nodeid(f)
175 num_extra_leases = m._read_num_extra_leases(f)
176 data_length = m._read_data_length(f)
177 extra_lease_offset = m._read_extra_lease_offset(f)
178 container_size = extra_lease_offset - m.DATA_OFFSET
179 leases = list(m._enumerate_leases(f))
181 share_type = "unknown"
182 f.seek(m.DATA_OFFSET)
183 if f.read(1) == "\x00":
184 # this slot contains an SMDF share
189 print >>out, "Mutable slot found:"
190 print >>out, " share_type: %s" % share_type
191 print >>out, " write_enabler: %s" % base32.b2a(WE)
192 print >>out, " WE for nodeid: %s" % idlib.nodeid_b2a(nodeid)
193 print >>out, " num_extra_leases: %d" % num_extra_leases
194 print >>out, " container_size: %d" % container_size
195 print >>out, " data_length: %d" % data_length
197 for (leasenum, lease) in leases:
199 print >>out, " Lease #%d:" % leasenum
200 print >>out, " ownerid: %d" % lease.owner_num
201 when = format_expiration_time(lease.expiration_time)
202 print >>out, " expires in %s" % when
203 print >>out, " renew_secret: %s" % base32.b2a(lease.renew_secret)
204 print >>out, " cancel_secret: %s" % base32.b2a(lease.cancel_secret)
205 print >>out, " secrets are for nodeid: %s" % idlib.nodeid_b2a(lease.nodeid)
207 print >>out, "No leases."
210 if share_type == "SDMF":
211 dump_SDMF_share(m, data_length, options)
215 def dump_SDMF_share(m, length, options):
216 from allmydata.mutable.layout import unpack_share, unpack_header
217 from allmydata.mutable.common import NeedMoreDataError
218 from allmydata.util import base32, hashutil
219 from allmydata.uri import SSKVerifierURI
220 from allmydata.util.encodingutil import quote_output, to_str
222 offset = m.DATA_OFFSET
226 f = open(options['filename'], "rb")
228 data = f.read(min(length, 2000))
232 pieces = unpack_share(data)
233 except NeedMoreDataError, e:
234 # retry once with the larger size
235 size = e.needed_bytes
236 f = open(options['filename'], "rb")
238 data = f.read(min(length, size))
240 pieces = unpack_share(data)
242 (seqnum, root_hash, IV, k, N, segsize, datalen,
243 pubkey, signature, share_hash_chain, block_hash_tree,
244 share_data, enc_privkey) = pieces
245 (ig_version, ig_seqnum, ig_roothash, ig_IV, ig_k, ig_N, ig_segsize,
246 ig_datalen, offsets) = unpack_header(data)
248 print >>out, " SDMF contents:"
249 print >>out, " seqnum: %d" % seqnum
250 print >>out, " root_hash: %s" % base32.b2a(root_hash)
251 print >>out, " IV: %s" % base32.b2a(IV)
252 print >>out, " required_shares: %d" % k
253 print >>out, " total_shares: %d" % N
254 print >>out, " segsize: %d" % segsize
255 print >>out, " datalen: %d" % datalen
256 print >>out, " enc_privkey: %d bytes" % len(enc_privkey)
257 print >>out, " pubkey: %d bytes" % len(pubkey)
258 print >>out, " signature: %d bytes" % len(signature)
259 share_hash_ids = ",".join(sorted([str(hid)
260 for hid in share_hash_chain.keys()]))
261 print >>out, " share_hash_chain: %s" % share_hash_ids
262 print >>out, " block_hash_tree: %d nodes" % len(block_hash_tree)
264 # the storage index isn't stored in the share itself, so we depend upon
265 # knowing the parent directory name to get it
266 pieces = options['filename'].split(os.sep)
268 piece = to_str(pieces[-2])
269 if base32.could_be_base32_encoded(piece):
270 storage_index = base32.a2b(piece)
271 fingerprint = hashutil.ssk_pubkey_fingerprint_hash(pubkey)
272 u = SSKVerifierURI(storage_index, fingerprint)
273 verify_cap = u.to_string()
274 print >>out, " verify-cap:", quote_output(verify_cap, quotemarks=False)
276 if options['offsets']:
277 # NOTE: this offset-calculation code is fragile, and needs to be
278 # merged with MutableShareFile's internals.
280 print >>out, " Section Offsets:"
281 def printoffset(name, value, shift=0):
282 print >>out, "%s%20s: %s (0x%x)" % (" "*shift, name, value, value)
283 printoffset("first lease", m.HEADER_SIZE)
284 printoffset("share data", m.DATA_OFFSET)
285 o_seqnum = m.DATA_OFFSET + struct.calcsize(">B")
286 printoffset("seqnum", o_seqnum, 2)
287 o_root_hash = m.DATA_OFFSET + struct.calcsize(">BQ")
288 printoffset("root_hash", o_root_hash, 2)
289 for k in ["signature", "share_hash_chain", "block_hash_tree",
291 "enc_privkey", "EOF"]:
292 name = {"share_data": "block data",
293 "EOF": "end of share data"}.get(k,k)
294 offset = m.DATA_OFFSET + offsets[k]
295 printoffset(name, offset, 2)
296 f = open(options['filename'], "rb")
297 printoffset("extra leases", m._read_extra_lease_offset(f) + 4)
304 class DumpCapOptions(usage.Options):
305 def getSynopsis(self):
306 return "Usage: tahoe debug dump-cap [options] FILECAP"
309 None, "storage server nodeid (ascii), to construct WE and secrets."],
310 ["client-secret", "c", None,
311 "client's base secret (ascii), to construct secrets"],
312 ["client-dir", "d", None,
313 "client's base directory, from which a -c secret will be read"],
315 def parseArgs(self, cap):
318 def getUsage(self, width=None):
319 t = usage.Options.getUsage(self, width)
321 Print information about the given cap-string (aka: URI, file-cap, dir-cap,
322 read-cap, write-cap). The URI string is parsed and unpacked. This prints the
323 type of the cap, its storage index, and any derived keys.
325 tahoe debug dump-cap URI:SSK-Verifier:4vozh77tsrw7mdhnj7qvp5ky74:q7f3dwz76sjys4kqfdt3ocur2pay3a6rftnkqmi2uxu3vqsdsofq
327 This may be useful to determine if a read-cap and a write-cap refer to the
328 same time, or to extract the storage-index from a file-cap (to then use with
331 If additional information is provided (storage server nodeid and/or client
332 base secret), this command will compute the shared secrets used for the
333 write-enabler and for lease-renewal.
338 def dump_cap(options):
339 from allmydata import uri
340 from allmydata.util import base32
341 from base64 import b32decode
342 import urlparse, urllib
347 if options['nodeid']:
348 nodeid = b32decode(options['nodeid'].upper())
350 if options['client-secret']:
351 secret = base32.a2b(options['client-secret'])
352 elif options['client-dir']:
353 secretfile = os.path.join(options['client-dir'], "private", "secret")
355 secret = base32.a2b(open(secretfile, "r").read().strip())
356 except EnvironmentError:
359 if cap.startswith("http"):
360 scheme, netloc, path, params, query, fragment = urlparse.urlparse(cap)
361 assert path.startswith("/uri/")
362 cap = urllib.unquote(path[len("/uri/"):])
364 u = uri.from_string(cap)
367 dump_uri_instance(u, nodeid, secret, out)
369 def _dump_secrets(storage_index, secret, nodeid, out):
370 from allmydata.util import hashutil
371 from allmydata.util import base32
374 crs = hashutil.my_renewal_secret_hash(secret)
375 print >>out, " client renewal secret:", base32.b2a(crs)
376 frs = hashutil.file_renewal_secret_hash(crs, storage_index)
377 print >>out, " file renewal secret:", base32.b2a(frs)
379 renew = hashutil.bucket_renewal_secret_hash(frs, nodeid)
380 print >>out, " lease renewal secret:", base32.b2a(renew)
381 ccs = hashutil.my_cancel_secret_hash(secret)
382 print >>out, " client cancel secret:", base32.b2a(ccs)
383 fcs = hashutil.file_cancel_secret_hash(ccs, storage_index)
384 print >>out, " file cancel secret:", base32.b2a(fcs)
386 cancel = hashutil.bucket_cancel_secret_hash(fcs, nodeid)
387 print >>out, " lease cancel secret:", base32.b2a(cancel)
389 def dump_uri_instance(u, nodeid, secret, out, show_header=True):
390 from allmydata import uri
391 from allmydata.storage.server import si_b2a
392 from allmydata.util import base32, hashutil
393 from allmydata.util.encodingutil import quote_output
395 if isinstance(u, uri.CHKFileURI):
397 print >>out, "CHK File:"
398 print >>out, " key:", base32.b2a(u.key)
399 print >>out, " UEB hash:", base32.b2a(u.uri_extension_hash)
400 print >>out, " size:", u.size
401 print >>out, " k/N: %d/%d" % (u.needed_shares, u.total_shares)
402 print >>out, " storage index:", si_b2a(u.get_storage_index())
403 _dump_secrets(u.get_storage_index(), secret, nodeid, out)
404 elif isinstance(u, uri.CHKFileVerifierURI):
406 print >>out, "CHK Verifier URI:"
407 print >>out, " UEB hash:", base32.b2a(u.uri_extension_hash)
408 print >>out, " size:", u.size
409 print >>out, " k/N: %d/%d" % (u.needed_shares, u.total_shares)
410 print >>out, " storage index:", si_b2a(u.get_storage_index())
412 elif isinstance(u, uri.LiteralFileURI):
414 print >>out, "Literal File URI:"
415 print >>out, " data:", quote_output(u.data)
417 elif isinstance(u, uri.WriteableSSKFileURI):
419 print >>out, "SSK Writeable URI:"
420 print >>out, " writekey:", base32.b2a(u.writekey)
421 print >>out, " readkey:", base32.b2a(u.readkey)
422 print >>out, " storage index:", si_b2a(u.get_storage_index())
423 print >>out, " fingerprint:", base32.b2a(u.fingerprint)
426 we = hashutil.ssk_write_enabler_hash(u.writekey, nodeid)
427 print >>out, " write_enabler:", base32.b2a(we)
429 _dump_secrets(u.get_storage_index(), secret, nodeid, out)
431 elif isinstance(u, uri.ReadonlySSKFileURI):
433 print >>out, "SSK Read-only URI:"
434 print >>out, " readkey:", base32.b2a(u.readkey)
435 print >>out, " storage index:", si_b2a(u.get_storage_index())
436 print >>out, " fingerprint:", base32.b2a(u.fingerprint)
437 elif isinstance(u, uri.SSKVerifierURI):
439 print >>out, "SSK Verifier URI:"
440 print >>out, " storage index:", si_b2a(u.get_storage_index())
441 print >>out, " fingerprint:", base32.b2a(u.fingerprint)
443 elif isinstance(u, uri.DirectoryURI):
445 print >>out, "Directory Writeable URI:"
446 dump_uri_instance(u._filenode_uri, nodeid, secret, out, False)
447 elif isinstance(u, uri.ReadonlyDirectoryURI):
449 print >>out, "Directory Read-only URI:"
450 dump_uri_instance(u._filenode_uri, nodeid, secret, out, False)
451 elif isinstance(u, uri.DirectoryURIVerifier):
453 print >>out, "Directory Verifier URI:"
454 dump_uri_instance(u._filenode_uri, nodeid, secret, out, False)
456 print >>out, "unknown cap type"
458 class FindSharesOptions(usage.Options):
459 def getSynopsis(self):
460 return "Usage: tahoe debug find-shares STORAGE_INDEX NODEDIRS.."
462 def parseArgs(self, storage_index_s, *nodedirs):
463 from allmydata.util.encodingutil import argv_to_abspath
464 self.si_s = storage_index_s
465 self.nodedirs = map(argv_to_abspath, nodedirs)
467 def getUsage(self, width=None):
468 t = usage.Options.getUsage(self, width)
470 Locate all shares for the given storage index. This command looks through one
471 or more node directories to find the shares. It returns a list of filenames,
472 one per line, for each share file found.
474 tahoe debug find-shares 4vozh77tsrw7mdhnj7qvp5ky74 testgrid/node-*
476 It may be useful during testing, when running a test grid in which all the
477 nodes are on a local disk. The share files thus located can be counted,
478 examined (with dump-share), or corrupted/deleted to test checker/repairer.
482 def find_shares(options):
483 """Given a storage index and a list of node directories, emit a list of
484 all matching shares to stdout, one per line. For example:
486 find-shares.py 44kai1tui348689nrw8fjegc8c ~/testnet/node-*
490 /home/warner/testnet/node-1/storage/shares/44k/44kai1tui348689nrw8fjegc8c/5
491 /home/warner/testnet/node-1/storage/shares/44k/44kai1tui348689nrw8fjegc8c/9
492 /home/warner/testnet/node-2/storage/shares/44k/44kai1tui348689nrw8fjegc8c/2
494 from allmydata.storage.server import si_a2b, storage_index_to_dir
495 from allmydata.util.encodingutil import listdir_unicode
498 sharedir = storage_index_to_dir(si_a2b(options.si_s))
499 for d in options.nodedirs:
500 d = os.path.join(d, "storage/shares", sharedir)
501 if os.path.exists(d):
502 for shnum in listdir_unicode(d):
503 print >>out, os.path.join(d, shnum)
508 class CatalogSharesOptions(usage.Options):
512 def parseArgs(self, *nodedirs):
513 from allmydata.util.encodingutil import argv_to_abspath
514 self.nodedirs = map(argv_to_abspath, nodedirs)
516 raise usage.UsageError("must specify at least one node directory")
518 def getSynopsis(self):
519 return "Usage: tahoe debug catalog-shares NODEDIRS.."
521 def getUsage(self, width=None):
522 t = usage.Options.getUsage(self, width)
524 Locate all shares in the given node directories, and emit a one-line summary
525 of each share. Run it like this:
527 tahoe debug catalog-shares testgrid/node-* >allshares.txt
529 The lines it emits will look like the following:
531 CHK $SI $k/$N $filesize $UEB_hash $expiration $abspath_sharefile
532 SDMF $SI $k/$N $filesize $seqnum/$roothash $expiration $abspath_sharefile
533 UNKNOWN $abspath_sharefile
535 This command can be used to build up a catalog of shares from many storage
536 servers and then sort the results to compare all shares for the same file. If
537 you see shares with the same SI but different parameters/filesize/UEB_hash,
538 then something is wrong. The misc/find-share/anomalies.py script may be
543 def call(c, *args, **kwargs):
544 # take advantage of the fact that ImmediateReadBucketProxy returns
545 # Deferreds that are already fired
548 d = defer.maybeDeferred(c, *args, **kwargs)
549 d.addCallbacks(results.append, failures.append)
551 failures[0].raiseException()
554 def describe_share(abs_sharefile, si_s, shnum_s, now, out):
555 from allmydata import uri
556 from allmydata.storage.mutable import MutableShareFile
557 from allmydata.storage.immutable import ShareFile
558 from allmydata.mutable.layout import unpack_share
559 from allmydata.mutable.common import NeedMoreDataError
560 from allmydata.immutable.layout import ReadBucketProxy
561 from allmydata.util import base32
562 from allmydata.util.encodingutil import quote_output
565 f = open(abs_sharefile, "rb")
568 if prefix == MutableShareFile.MAGIC:
570 m = MutableShareFile(abs_sharefile)
571 WE, nodeid = m._read_write_enabler_and_nodeid(f)
572 data_length = m._read_data_length(f)
573 expiration_time = min( [lease.expiration_time
574 for (i,lease) in m._enumerate_leases(f)] )
575 expiration = max(0, expiration_time - now)
577 share_type = "unknown"
578 f.seek(m.DATA_OFFSET)
579 if f.read(1) == "\x00":
580 # this slot contains an SMDF share
583 if share_type == "SDMF":
584 f.seek(m.DATA_OFFSET)
585 data = f.read(min(data_length, 2000))
588 pieces = unpack_share(data)
589 except NeedMoreDataError, e:
590 # retry once with the larger size
591 size = e.needed_bytes
592 f.seek(m.DATA_OFFSET)
593 data = f.read(min(data_length, size))
594 pieces = unpack_share(data)
595 (seqnum, root_hash, IV, k, N, segsize, datalen,
596 pubkey, signature, share_hash_chain, block_hash_tree,
597 share_data, enc_privkey) = pieces
599 print >>out, "SDMF %s %d/%d %d #%d:%s %d %s" % \
600 (si_s, k, N, datalen,
601 seqnum, base32.b2a(root_hash),
602 expiration, quote_output(abs_sharefile))
604 print >>out, "UNKNOWN mutable %s" % quote_output(abs_sharefile)
606 elif struct.unpack(">L", prefix[:4]) == (1,):
609 class ImmediateReadBucketProxy(ReadBucketProxy):
610 def __init__(self, sf):
612 ReadBucketProxy.__init__(self, "", "", "")
614 return "<ImmediateReadBucketProxy>"
615 def _read(self, offset, size):
616 return defer.succeed(sf.read_share_data(offset, size))
618 # use a ReadBucketProxy to parse the bucket and find the uri extension
619 sf = ShareFile(abs_sharefile)
620 bp = ImmediateReadBucketProxy(sf)
622 expiration_time = min( [lease.expiration_time
623 for lease in sf.get_leases()] )
624 expiration = max(0, expiration_time - now)
626 UEB_data = call(bp.get_uri_extension)
627 unpacked = uri.unpack_extension_readable(UEB_data)
629 k = unpacked["needed_shares"]
630 N = unpacked["total_shares"]
631 filesize = unpacked["size"]
632 ueb_hash = unpacked["UEB_hash"]
634 print >>out, "CHK %s %d/%d %d %s %d %s" % (si_s, k, N, filesize,
635 ueb_hash, expiration,
636 quote_output(abs_sharefile))
639 print >>out, "UNKNOWN really-unknown %s" % quote_output(abs_sharefile)
643 def catalog_shares(options):
644 from allmydata.util.encodingutil import listdir_unicode, quote_output
649 for d in options.nodedirs:
650 d = os.path.join(d, "storage/shares")
652 abbrevs = listdir_unicode(d)
653 except EnvironmentError:
654 # ignore nodes that have storage turned off altogether
657 for abbrevdir in abbrevs:
658 if abbrevdir == "incoming":
660 abbrevdir = os.path.join(d, abbrevdir)
661 # this tool may get run against bad disks, so we can't assume
662 # that listdir_unicode will always succeed. Try to catalog as much
665 sharedirs = listdir_unicode(abbrevdir)
666 for si_s in sharedirs:
667 si_dir = os.path.join(abbrevdir, si_s)
668 catalog_shares_one_abbrevdir(si_s, si_dir, now, out,err)
670 print >>err, "Error processing %s" % quote_output(abbrevdir)
671 failure.Failure().printTraceback(err)
675 def catalog_shares_one_abbrevdir(si_s, si_dir, now, out, err):
676 from allmydata.util.encodingutil import listdir_unicode, quote_output
679 for shnum_s in listdir_unicode(si_dir):
680 abs_sharefile = os.path.join(si_dir, shnum_s)
681 assert os.path.isfile(abs_sharefile)
683 describe_share(abs_sharefile, si_s, shnum_s, now,
686 print >>err, "Error processing %s" % quote_output(abs_sharefile)
687 failure.Failure().printTraceback(err)
689 print >>err, "Error processing %s" % quote_output(si_dir)
690 failure.Failure().printTraceback(err)
692 class CorruptShareOptions(usage.Options):
693 def getSynopsis(self):
694 return "Usage: tahoe debug corrupt-share SHARE_FILENAME"
697 ["offset", "o", "block-random", "Which bit to flip."],
700 def getUsage(self, width=None):
701 t = usage.Options.getUsage(self, width)
703 Corrupt the given share by flipping a bit. This will cause a
704 verifying/downloading client to log an integrity-check failure incident, and
705 downloads will proceed with a different share.
707 The --offset parameter controls which bit should be flipped. The default is
708 to flip a single random bit of the block data.
710 tahoe debug corrupt-share testgrid/node-3/storage/shares/4v/4vozh77tsrw7mdhnj7qvp5ky74/0
712 Obviously, this command should not be used in normal operation.
715 def parseArgs(self, filename):
716 self['filename'] = filename
718 def corrupt_share(options):
720 from allmydata.storage.mutable import MutableShareFile
721 from allmydata.storage.immutable import ShareFile
722 from allmydata.mutable.layout import unpack_header
723 from allmydata.immutable.layout import ReadBucketProxy
725 fn = options['filename']
726 assert options["offset"] == "block-random", "other offsets not implemented"
727 # first, what kind of share is it?
729 def flip_bit(start, end):
730 offset = random.randrange(start, end)
731 bit = random.randrange(0, 8)
732 print >>out, "[%d..%d): %d.b%d" % (start, end, offset, bit)
736 d = chr(ord(d) ^ 0x01)
744 if prefix == MutableShareFile.MAGIC:
746 m = MutableShareFile(fn)
748 f.seek(m.DATA_OFFSET)
750 # make sure this slot contains an SMDF share
751 assert data[0] == "\x00", "non-SDMF mutable shares not supported"
754 (version, ig_seqnum, ig_roothash, ig_IV, ig_k, ig_N, ig_segsize,
755 ig_datalen, offsets) = unpack_header(data)
757 assert version == 0, "we only handle v0 SDMF files"
758 start = m.DATA_OFFSET + offsets["share_data"]
759 end = m.DATA_OFFSET + offsets["enc_privkey"]
762 # otherwise assume it's immutable
764 bp = ReadBucketProxy(None, '', '')
765 offsets = bp._parse_offsets(f.read_share_data(0, 0x24))
766 start = f._data_offset + offsets["data"]
767 end = f._data_offset + offsets["plaintext_hash_tree"]
772 class ReplOptions(usage.Options):
777 return code.interact()
780 class DebugCommand(usage.Options):
782 ["dump-share", None, DumpOptions,
783 "Unpack and display the contents of a share (uri_extension and leases)."],
784 ["dump-cap", None, DumpCapOptions, "Unpack a read-cap or write-cap"],
785 ["find-shares", None, FindSharesOptions, "Locate sharefiles in node dirs"],
786 ["catalog-shares", None, CatalogSharesOptions, "Describe shares in node dirs"],
787 ["corrupt-share", None, CorruptShareOptions, "Corrupt a share"],
788 ["repl", None, ReplOptions, "Open a python interpreter"],
790 def postOptions(self):
791 if not hasattr(self, 'subOptions'):
792 raise usage.UsageError("must specify a subcommand")
793 def getSynopsis(self):
794 return "Usage: tahoe debug SUBCOMMAND"
795 def getUsage(self, width=None):
796 #t = usage.Options.getUsage(self, width)
799 tahoe debug dump-share Unpack and display the contents of a share
800 tahoe debug dump-cap Unpack a read-cap or write-cap
801 tahoe debug find-shares Locate sharefiles in node directories
802 tahoe debug catalog-shares Describe all shares in node dirs
803 tahoe debug corrupt-share Corrupt a share by flipping a bit.
805 Please run e.g. 'tahoe debug dump-share --help' for more details on each
811 "dump-share": dump_share,
812 "dump-cap": dump_cap,
813 "find-shares": find_shares,
814 "catalog-shares": catalog_shares,
815 "corrupt-share": corrupt_share,
820 def do_debug(options):
821 so = options.subOptions
822 so.stdout = options.stdout
823 so.stderr = options.stderr
824 f = subDispatch[options.subCommand]
829 ["debug", None, DebugCommand, "debug subcommands: use 'tahoe debug' for a list"],