2 # do not import any allmydata modules at this level. Do that from inside
3 # individual functions instead.
4 import struct, time, os
5 from twisted.python import usage, failure
6 from twisted.internet import defer
8 class DumpOptions(usage.Options):
10 return "Usage: tahoe debug dump-share SHARE_FILENAME"
13 ["offsets", None, "Display a table of section offsets"],
14 ["leases-only", None, "Dump leases but not CHK contents"],
17 def getUsage(self, width=None):
18 t = usage.Options.getUsage(self, width)
20 Print lots of information about the given share, by parsing the share's
21 contents. This includes share type, lease information, encoding parameters,
22 hash-tree roots, public keys, and segment sizes. This command also emits a
23 verify-cap for the file that uses the share.
25 tahoe debug dump-share testgrid/node-3/storage/shares/4v/4vozh77tsrw7mdhnj7qvp5ky74/0
30 def parseArgs(self, filename):
31 self['filename'] = filename
33 def dump_share(options):
34 from allmydata.storage.mutable import MutableShareFile
38 # check the version, to see if we have a mutable or immutable share
39 print >>out, "share filename: %s" % options['filename']
41 f = open(options['filename'], "rb")
44 if prefix == MutableShareFile.MAGIC:
45 return dump_mutable_share(options)
46 # otherwise assume it's immutable
47 return dump_immutable_share(options)
49 def dump_immutable_share(options):
50 from allmydata.storage.immutable import ShareFile
53 f = ShareFile(options['filename'])
54 if not options["leases-only"]:
55 dump_immutable_chk_share(f, out, options)
56 dump_immutable_lease_info(f, out)
60 def dump_immutable_chk_share(f, out, options):
61 from allmydata import uri
62 from allmydata.util import base32
63 from allmydata.immutable.layout import ReadBucketProxy
64 # use a ReadBucketProxy to parse the bucket and find the uri extension
65 bp = ReadBucketProxy(None, '', '')
66 offsets = bp._parse_offsets(f.read_share_data(0, 0x44))
67 print >>out, "%20s: %d" % ("version", bp._version)
68 seek = offsets['uri_extension']
69 length = struct.unpack(bp._fieldstruct,
70 f.read_share_data(seek, bp._fieldsize))[0]
72 UEB_data = f.read_share_data(seek, length)
74 unpacked = uri.unpack_extension_readable(UEB_data)
75 keys1 = ("size", "num_segments", "segment_size",
76 "needed_shares", "total_shares")
77 keys2 = ("codec_name", "codec_params", "tail_codec_params")
78 keys3 = ("plaintext_hash", "plaintext_root_hash",
79 "crypttext_hash", "crypttext_root_hash",
80 "share_root_hash", "UEB_hash")
81 display_keys = {"size": "file_size"}
84 dk = display_keys.get(k, k)
85 print >>out, "%20s: %s" % (dk, unpacked[k])
89 dk = display_keys.get(k, k)
90 print >>out, "%20s: %s" % (dk, unpacked[k])
94 dk = display_keys.get(k, k)
95 print >>out, "%20s: %s" % (dk, unpacked[k])
97 leftover = set(unpacked.keys()) - set(keys1 + keys2 + keys3)
100 print >>out, "LEFTOVER:"
101 for k in sorted(leftover):
102 print >>out, "%20s: %s" % (k, unpacked[k])
104 # the storage index isn't stored in the share itself, so we depend upon
105 # knowing the parent directory name to get it
106 pieces = options['filename'].split(os.sep)
107 if len(pieces) >= 2 and base32.could_be_base32_encoded(pieces[-2]):
108 storage_index = base32.a2b(pieces[-2])
109 uri_extension_hash = base32.a2b(unpacked["UEB_hash"])
110 u = uri.CHKFileVerifierURI(storage_index, uri_extension_hash,
111 unpacked["needed_shares"],
112 unpacked["total_shares"], unpacked["size"])
113 verify_cap = u.to_string()
114 print >>out, "%20s: %s" % ("verify-cap", verify_cap)
117 sizes['data'] = (offsets['plaintext_hash_tree'] -
119 sizes['validation'] = (offsets['uri_extension'] -
120 offsets['plaintext_hash_tree'])
121 sizes['uri-extension'] = len(UEB_data)
123 print >>out, " Size of data within the share:"
124 for k in sorted(sizes):
125 print >>out, "%20s: %s" % (k, sizes[k])
127 if options['offsets']:
129 print >>out, " Section Offsets:"
130 print >>out, "%20s: %s" % ("share data", f._data_offset)
131 for k in ["data", "plaintext_hash_tree", "crypttext_hash_tree",
132 "block_hashes", "share_hashes", "uri_extension"]:
133 name = {"data": "block data"}.get(k,k)
134 offset = f._data_offset + offsets[k]
135 print >>out, " %20s: %s (0x%x)" % (name, offset, offset)
136 print >>out, "%20s: %s" % ("leases", f._lease_offset)
138 def dump_immutable_lease_info(f, out):
139 # display lease information too
141 leases = list(f.get_leases())
143 for i,lease in enumerate(leases):
144 when = format_expiration_time(lease.expiration_time)
145 print >>out, " Lease #%d: owner=%d, expire in %s" \
146 % (i, lease.owner_num, when)
148 print >>out, " No leases."
150 def format_expiration_time(expiration_time):
152 remains = expiration_time - now
153 when = "%ds" % remains
154 if remains > 24*3600:
155 when += " (%d days)" % (remains / (24*3600))
157 when += " (%d hours)" % (remains / 3600)
161 def dump_mutable_share(options):
162 from allmydata.storage.mutable import MutableShareFile
163 from allmydata.util import base32, idlib
165 m = MutableShareFile(options['filename'])
166 f = open(options['filename'], "rb")
167 WE, nodeid = m._read_write_enabler_and_nodeid(f)
168 num_extra_leases = m._read_num_extra_leases(f)
169 data_length = m._read_data_length(f)
170 extra_lease_offset = m._read_extra_lease_offset(f)
171 container_size = extra_lease_offset - m.DATA_OFFSET
172 leases = list(m._enumerate_leases(f))
174 share_type = "unknown"
175 f.seek(m.DATA_OFFSET)
176 if f.read(1) == "\x00":
177 # this slot contains an SMDF share
182 print >>out, "Mutable slot found:"
183 print >>out, " share_type: %s" % share_type
184 print >>out, " write_enabler: %s" % base32.b2a(WE)
185 print >>out, " WE for nodeid: %s" % idlib.nodeid_b2a(nodeid)
186 print >>out, " num_extra_leases: %d" % num_extra_leases
187 print >>out, " container_size: %d" % container_size
188 print >>out, " data_length: %d" % data_length
190 for (leasenum, lease) in leases:
192 print >>out, " Lease #%d:" % leasenum
193 print >>out, " ownerid: %d" % lease.owner_num
194 when = format_expiration_time(lease.expiration_time)
195 print >>out, " expires in %s" % when
196 print >>out, " renew_secret: %s" % base32.b2a(lease.renew_secret)
197 print >>out, " cancel_secret: %s" % base32.b2a(lease.cancel_secret)
198 print >>out, " secrets are for nodeid: %s" % idlib.nodeid_b2a(lease.nodeid)
200 print >>out, "No leases."
203 if share_type == "SDMF":
204 dump_SDMF_share(m, data_length, options)
208 def dump_SDMF_share(m, length, options):
209 from allmydata.mutable.layout import unpack_share, unpack_header
210 from allmydata.mutable.common import NeedMoreDataError
211 from allmydata.util import base32, hashutil
212 from allmydata.uri import SSKVerifierURI
214 offset = m.DATA_OFFSET
218 f = open(options['filename'], "rb")
220 data = f.read(min(length, 2000))
224 pieces = unpack_share(data)
225 except NeedMoreDataError, e:
226 # retry once with the larger size
227 size = e.needed_bytes
228 f = open(options['filename'], "rb")
230 data = f.read(min(length, size))
232 pieces = unpack_share(data)
234 (seqnum, root_hash, IV, k, N, segsize, datalen,
235 pubkey, signature, share_hash_chain, block_hash_tree,
236 share_data, enc_privkey) = pieces
237 (ig_version, ig_seqnum, ig_roothash, ig_IV, ig_k, ig_N, ig_segsize,
238 ig_datalen, offsets) = unpack_header(data)
240 print >>out, " SDMF contents:"
241 print >>out, " seqnum: %d" % seqnum
242 print >>out, " root_hash: %s" % base32.b2a(root_hash)
243 print >>out, " IV: %s" % base32.b2a(IV)
244 print >>out, " required_shares: %d" % k
245 print >>out, " total_shares: %d" % N
246 print >>out, " segsize: %d" % segsize
247 print >>out, " datalen: %d" % datalen
248 print >>out, " enc_privkey: %d bytes" % len(enc_privkey)
249 print >>out, " pubkey: %d bytes" % len(pubkey)
250 print >>out, " signature: %d bytes" % len(signature)
251 share_hash_ids = ",".join(sorted([str(hid)
252 for hid in share_hash_chain.keys()]))
253 print >>out, " share_hash_chain: %s" % share_hash_ids
254 print >>out, " block_hash_tree: %d nodes" % len(block_hash_tree)
256 # the storage index isn't stored in the share itself, so we depend upon
257 # knowing the parent directory name to get it
258 pieces = options['filename'].split(os.sep)
259 if len(pieces) >= 2 and base32.could_be_base32_encoded(pieces[-2]):
260 storage_index = base32.a2b(pieces[-2])
261 fingerprint = hashutil.ssk_pubkey_fingerprint_hash(pubkey)
262 u = SSKVerifierURI(storage_index, fingerprint)
263 verify_cap = u.to_string()
264 print >>out, " verify-cap:", verify_cap
266 if options['offsets']:
267 # NOTE: this offset-calculation code is fragile, and needs to be
268 # merged with MutableShareFile's internals.
270 print >>out, " Section Offsets:"
271 def printoffset(name, value, shift=0):
272 print >>out, "%s%20s: %s (0x%x)" % (" "*shift, name, value, value)
273 printoffset("first lease", m.HEADER_SIZE)
274 printoffset("share data", m.DATA_OFFSET)
275 o_seqnum = m.DATA_OFFSET + struct.calcsize(">B")
276 printoffset("seqnum", o_seqnum, 2)
277 o_root_hash = m.DATA_OFFSET + struct.calcsize(">BQ")
278 printoffset("root_hash", o_root_hash, 2)
279 for k in ["signature", "share_hash_chain", "block_hash_tree",
281 "enc_privkey", "EOF"]:
282 name = {"share_data": "block data",
283 "EOF": "end of share data"}.get(k,k)
284 offset = m.DATA_OFFSET + offsets[k]
285 printoffset(name, offset, 2)
286 f = open(options['filename'], "rb")
287 printoffset("extra leases", m._read_extra_lease_offset(f) + 4)
294 class DumpCapOptions(usage.Options):
295 def getSynopsis(self):
296 return "Usage: tahoe debug dump-cap [options] FILECAP"
299 None, "storage server nodeid (ascii), to construct WE and secrets."],
300 ["client-secret", "c", None,
301 "client's base secret (ascii), to construct secrets"],
302 ["client-dir", "d", None,
303 "client's base directory, from which a -c secret will be read"],
305 def parseArgs(self, cap):
308 def getUsage(self, width=None):
309 t = usage.Options.getUsage(self, width)
311 Print information about the given cap-string (aka: URI, file-cap, dir-cap,
312 read-cap, write-cap). The URI string is parsed and unpacked. This prints the
313 type of the cap, its storage index, and any derived keys.
315 tahoe debug dump-cap URI:SSK-Verifier:4vozh77tsrw7mdhnj7qvp5ky74:q7f3dwz76sjys4kqfdt3ocur2pay3a6rftnkqmi2uxu3vqsdsofq
317 This may be useful to determine if a read-cap and a write-cap refer to the
318 same time, or to extract the storage-index from a file-cap (to then use with
321 If additional information is provided (storage server nodeid and/or client
322 base secret), this command will compute the shared secrets used for the
323 write-enabler and for lease-renewal.
328 def dump_cap(options):
329 from allmydata import uri
330 from allmydata.util import base32
331 from base64 import b32decode
332 import urlparse, urllib
337 if options['nodeid']:
338 nodeid = b32decode(options['nodeid'].upper())
340 if options['client-secret']:
341 secret = base32.a2b(options['client-secret'])
342 elif options['client-dir']:
343 secretfile = os.path.join(options['client-dir'], "private", "secret")
345 secret = base32.a2b(open(secretfile, "r").read().strip())
346 except EnvironmentError:
349 if cap.startswith("http"):
350 scheme, netloc, path, params, query, fragment = urlparse.urlparse(cap)
351 assert path.startswith("/uri/")
352 cap = urllib.unquote(path[len("/uri/"):])
354 u = uri.from_string(cap)
357 dump_uri_instance(u, nodeid, secret, out)
359 def _dump_secrets(storage_index, secret, nodeid, out):
360 from allmydata.util import hashutil
361 from allmydata.util import base32
364 crs = hashutil.my_renewal_secret_hash(secret)
365 print >>out, " client renewal secret:", base32.b2a(crs)
366 frs = hashutil.file_renewal_secret_hash(crs, storage_index)
367 print >>out, " file renewal secret:", base32.b2a(frs)
369 renew = hashutil.bucket_renewal_secret_hash(frs, nodeid)
370 print >>out, " lease renewal secret:", base32.b2a(renew)
371 ccs = hashutil.my_cancel_secret_hash(secret)
372 print >>out, " client cancel secret:", base32.b2a(ccs)
373 fcs = hashutil.file_cancel_secret_hash(ccs, storage_index)
374 print >>out, " file cancel secret:", base32.b2a(fcs)
376 cancel = hashutil.bucket_cancel_secret_hash(fcs, nodeid)
377 print >>out, " lease cancel secret:", base32.b2a(cancel)
379 def dump_uri_instance(u, nodeid, secret, out, show_header=True):
380 from allmydata import uri
381 from allmydata.storage.server import si_b2a
382 from allmydata.util import base32, hashutil
384 if isinstance(u, uri.CHKFileURI):
386 print >>out, "CHK File:"
387 print >>out, " key:", base32.b2a(u.key)
388 print >>out, " UEB hash:", base32.b2a(u.uri_extension_hash)
389 print >>out, " size:", u.size
390 print >>out, " k/N: %d/%d" % (u.needed_shares, u.total_shares)
391 print >>out, " storage index:", si_b2a(u.storage_index)
392 _dump_secrets(u.storage_index, secret, nodeid, out)
393 elif isinstance(u, uri.CHKFileVerifierURI):
395 print >>out, "CHK Verifier URI:"
396 print >>out, " UEB hash:", base32.b2a(u.uri_extension_hash)
397 print >>out, " size:", u.size
398 print >>out, " k/N: %d/%d" % (u.needed_shares, u.total_shares)
399 print >>out, " storage index:", si_b2a(u.storage_index)
401 elif isinstance(u, uri.LiteralFileURI):
403 print >>out, "Literal File URI:"
404 print >>out, " data:", u.data
406 elif isinstance(u, uri.WriteableSSKFileURI):
408 print >>out, "SSK Writeable URI:"
409 print >>out, " writekey:", base32.b2a(u.writekey)
410 print >>out, " readkey:", base32.b2a(u.readkey)
411 print >>out, " storage index:", si_b2a(u.storage_index)
412 print >>out, " fingerprint:", base32.b2a(u.fingerprint)
415 we = hashutil.ssk_write_enabler_hash(u.writekey, nodeid)
416 print >>out, " write_enabler:", base32.b2a(we)
418 _dump_secrets(u.storage_index, secret, nodeid, out)
420 elif isinstance(u, uri.ReadonlySSKFileURI):
422 print >>out, "SSK Read-only URI:"
423 print >>out, " readkey:", base32.b2a(u.readkey)
424 print >>out, " storage index:", si_b2a(u.storage_index)
425 print >>out, " fingerprint:", base32.b2a(u.fingerprint)
426 elif isinstance(u, uri.SSKVerifierURI):
428 print >>out, "SSK Verifier URI:"
429 print >>out, " storage index:", si_b2a(u.storage_index)
430 print >>out, " fingerprint:", base32.b2a(u.fingerprint)
432 elif isinstance(u, uri.NewDirectoryURI):
434 print >>out, "Directory Writeable URI:"
435 dump_uri_instance(u._filenode_uri, nodeid, secret, out, False)
436 elif isinstance(u, uri.ReadonlyNewDirectoryURI):
438 print >>out, "Directory Read-only URI:"
439 dump_uri_instance(u._filenode_uri, nodeid, secret, out, False)
440 elif isinstance(u, uri.NewDirectoryURIVerifier):
442 print >>out, "Directory Verifier URI:"
443 dump_uri_instance(u._filenode_uri, nodeid, secret, out, False)
445 print >>out, "unknown cap type"
447 class FindSharesOptions(usage.Options):
448 def getSynopsis(self):
449 return "Usage: tahoe debug find-shares STORAGE_INDEX NODEDIRS.."
450 def parseArgs(self, storage_index_s, *nodedirs):
451 self.si_s = storage_index_s
452 self.nodedirs = nodedirs
453 def getUsage(self, width=None):
454 t = usage.Options.getUsage(self, width)
456 Locate all shares for the given storage index. This command looks through one
457 or more node directories to find the shares. It returns a list of filenames,
458 one per line, for each share file found.
460 tahoe debug find-shares 4vozh77tsrw7mdhnj7qvp5ky74 testgrid/node-*
462 It may be useful during testing, when running a test grid in which all the
463 nodes are on a local disk. The share files thus located can be counted,
464 examined (with dump-share), or corrupted/deleted to test checker/repairer.
468 def find_shares(options):
469 """Given a storage index and a list of node directories, emit a list of
470 all matching shares to stdout, one per line. For example:
472 find-shares.py 44kai1tui348689nrw8fjegc8c ~/testnet/node-*
476 /home/warner/testnet/node-1/storage/shares/44k/44kai1tui348689nrw8fjegc8c/5
477 /home/warner/testnet/node-1/storage/shares/44k/44kai1tui348689nrw8fjegc8c/9
478 /home/warner/testnet/node-2/storage/shares/44k/44kai1tui348689nrw8fjegc8c/2
480 from allmydata.storage.server import si_a2b, storage_index_to_dir
483 sharedir = storage_index_to_dir(si_a2b(options.si_s))
484 for d in options.nodedirs:
485 d = os.path.join(os.path.expanduser(d), "storage/shares", sharedir)
486 if os.path.exists(d):
487 for shnum in os.listdir(d):
488 print >>out, os.path.join(d, shnum)
493 class CatalogSharesOptions(usage.Options):
497 def parseArgs(self, *nodedirs):
498 self.nodedirs = nodedirs
500 raise usage.UsageError("must specify at least one node directory")
502 def getSynopsis(self):
503 return "Usage: tahoe debug catalog-shares NODEDIRS.."
505 def getUsage(self, width=None):
506 t = usage.Options.getUsage(self, width)
508 Locate all shares in the given node directories, and emit a one-line summary
509 of each share. Run it like this:
511 tahoe debug catalog-shares testgrid/node-* >allshares.txt
513 The lines it emits will look like the following:
515 CHK $SI $k/$N $filesize $UEB_hash $expiration $abspath_sharefile
516 SDMF $SI $k/$N $filesize $seqnum/$roothash $expiration $abspath_sharefile
517 UNKNOWN $abspath_sharefile
519 This command can be used to build up a catalog of shares from many storage
520 servers and then sort the results to compare all shares for the same file. If
521 you see shares with the same SI but different parameters/filesize/UEB_hash,
522 then something is wrong. The misc/find-share/anomalies.py script may be
527 def call(c, *args, **kwargs):
528 # take advantage of the fact that ImmediateReadBucketProxy returns
529 # Deferreds that are already fired
532 d = defer.maybeDeferred(c, *args, **kwargs)
533 d.addCallbacks(results.append, failures.append)
535 failures[0].raiseException()
538 def describe_share(abs_sharefile, si_s, shnum_s, now, out):
539 from allmydata import uri
540 from allmydata.storage.mutable import MutableShareFile
541 from allmydata.storage.immutable import ShareFile
542 from allmydata.mutable.layout import unpack_share
543 from allmydata.mutable.common import NeedMoreDataError
544 from allmydata.immutable.layout import ReadBucketProxy
545 from allmydata.util import base32
548 f = open(abs_sharefile, "rb")
551 if prefix == MutableShareFile.MAGIC:
553 m = MutableShareFile(abs_sharefile)
554 WE, nodeid = m._read_write_enabler_and_nodeid(f)
555 num_extra_leases = m._read_num_extra_leases(f)
556 data_length = m._read_data_length(f)
557 extra_lease_offset = m._read_extra_lease_offset(f)
558 container_size = extra_lease_offset - m.DATA_OFFSET
559 expiration_time = min( [lease.expiration_time
560 for (i,lease) in m._enumerate_leases(f)] )
561 expiration = max(0, expiration_time - now)
563 share_type = "unknown"
564 f.seek(m.DATA_OFFSET)
565 if f.read(1) == "\x00":
566 # this slot contains an SMDF share
569 if share_type == "SDMF":
570 f.seek(m.DATA_OFFSET)
571 data = f.read(min(data_length, 2000))
574 pieces = unpack_share(data)
575 except NeedMoreDataError, e:
576 # retry once with the larger size
577 size = e.needed_bytes
578 f.seek(m.DATA_OFFSET)
579 data = f.read(min(data_length, size))
580 pieces = unpack_share(data)
581 (seqnum, root_hash, IV, k, N, segsize, datalen,
582 pubkey, signature, share_hash_chain, block_hash_tree,
583 share_data, enc_privkey) = pieces
585 print >>out, "SDMF %s %d/%d %d #%d:%s %d %s" % \
586 (si_s, k, N, datalen,
587 seqnum, base32.b2a(root_hash),
588 expiration, abs_sharefile)
590 print >>out, "UNKNOWN mutable %s" % (abs_sharefile,)
592 elif struct.unpack(">L", prefix[:4]) == (1,):
595 class ImmediateReadBucketProxy(ReadBucketProxy):
596 def __init__(self, sf):
598 ReadBucketProxy.__init__(self, "", "", "")
600 return "<ImmediateReadBucketProxy>"
601 def _read(self, offset, size):
602 return defer.succeed(sf.read_share_data(offset, size))
604 # use a ReadBucketProxy to parse the bucket and find the uri extension
605 sf = ShareFile(abs_sharefile)
606 bp = ImmediateReadBucketProxy(sf)
608 expiration_time = min( [lease.expiration_time
609 for lease in sf.get_leases()] )
610 expiration = max(0, expiration_time - now)
612 UEB_data = call(bp.get_uri_extension)
613 unpacked = uri.unpack_extension_readable(UEB_data)
615 k = unpacked["needed_shares"]
616 N = unpacked["total_shares"]
617 filesize = unpacked["size"]
618 ueb_hash = unpacked["UEB_hash"]
620 print >>out, "CHK %s %d/%d %d %s %d %s" % (si_s, k, N, filesize,
621 ueb_hash, expiration,
625 print >>out, "UNKNOWN really-unknown %s" % (abs_sharefile,)
629 def catalog_shares(options):
633 for d in options.nodedirs:
634 d = os.path.join(os.path.expanduser(d), "storage/shares")
636 abbrevs = os.listdir(d)
637 except EnvironmentError:
638 # ignore nodes that have storage turned off altogether
641 for abbrevdir in abbrevs:
642 if abbrevdir == "incoming":
644 abbrevdir = os.path.join(d, abbrevdir)
645 # this tool may get run against bad disks, so we can't assume
646 # that os.listdir will always succeed. Try to catalog as much
649 sharedirs = os.listdir(abbrevdir)
650 for si_s in sharedirs:
651 si_dir = os.path.join(abbrevdir, si_s)
652 catalog_shares_one_abbrevdir(si_s, si_dir, now, out,err)
654 print >>err, "Error processing %s" % abbrevdir
655 failure.Failure().printTraceback(err)
659 def catalog_shares_one_abbrevdir(si_s, si_dir, now, out, err):
661 for shnum_s in os.listdir(si_dir):
662 abs_sharefile = os.path.join(si_dir, shnum_s)
663 abs_sharefile = os.path.abspath(abs_sharefile)
664 assert os.path.isfile(abs_sharefile)
666 describe_share(abs_sharefile, si_s, shnum_s, now,
669 print >>err, "Error processing %s" % abs_sharefile
670 failure.Failure().printTraceback(err)
672 print >>err, "Error processing %s" % si_dir
673 failure.Failure().printTraceback(err)
675 class CorruptShareOptions(usage.Options):
676 def getSynopsis(self):
677 return "Usage: tahoe debug corrupt-share SHARE_FILENAME"
680 ["offset", "o", "block-random", "Which bit to flip."],
683 def getUsage(self, width=None):
684 t = usage.Options.getUsage(self, width)
686 Corrupt the given share by flipping a bit. This will cause a
687 verifying/downloading client to log an integrity-check failure incident, and
688 downloads will proceed with a different share.
690 The --offset parameter controls which bit should be flipped. The default is
691 to flip a single random bit of the block data.
693 tahoe debug corrupt-share testgrid/node-3/storage/shares/4v/4vozh77tsrw7mdhnj7qvp5ky74/0
695 Obviously, this command should not be used in normal operation.
698 def parseArgs(self, filename):
699 self['filename'] = filename
701 def corrupt_share(options):
703 from allmydata.storage.mutable import MutableShareFile
704 from allmydata.storage.immutable import ShareFile
705 from allmydata.mutable.layout import unpack_header
706 from allmydata.immutable.layout import ReadBucketProxy
708 fn = options['filename']
709 assert options["offset"] == "block-random", "other offsets not implemented"
710 # first, what kind of share is it?
712 def flip_bit(start, end):
713 offset = random.randrange(start, end)
714 bit = random.randrange(0, 8)
715 print >>out, "[%d..%d): %d.b%d" % (start, end, offset, bit)
719 d = chr(ord(d) ^ 0x01)
727 if prefix == MutableShareFile.MAGIC:
729 m = MutableShareFile(fn)
731 f.seek(m.DATA_OFFSET)
733 # make sure this slot contains an SMDF share
734 assert data[0] == "\x00", "non-SDMF mutable shares not supported"
737 (version, ig_seqnum, ig_roothash, ig_IV, ig_k, ig_N, ig_segsize,
738 ig_datalen, offsets) = unpack_header(data)
740 assert version == 0, "we only handle v0 SDMF files"
741 start = m.DATA_OFFSET + offsets["share_data"]
742 end = m.DATA_OFFSET + offsets["enc_privkey"]
745 # otherwise assume it's immutable
747 bp = ReadBucketProxy(None, '', '')
748 offsets = bp._parse_offsets(f.read_share_data(0, 0x24))
749 start = f._data_offset + offsets["data"]
750 end = f._data_offset + offsets["plaintext_hash_tree"]
755 class ReplOptions(usage.Options):
760 return code.interact()
763 class DebugCommand(usage.Options):
765 ["dump-share", None, DumpOptions,
766 "Unpack and display the contents of a share (uri_extension and leases)."],
767 ["dump-cap", None, DumpCapOptions, "Unpack a read-cap or write-cap"],
768 ["find-shares", None, FindSharesOptions, "Locate sharefiles in node dirs"],
769 ["catalog-shares", None, CatalogSharesOptions, "Describe shares in node dirs"],
770 ["corrupt-share", None, CorruptShareOptions, "Corrupt a share"],
771 ["repl", None, ReplOptions, "Open a python interpreter"],
773 def postOptions(self):
774 if not hasattr(self, 'subOptions'):
775 raise usage.UsageError("must specify a subcommand")
776 def getSynopsis(self):
777 return "Usage: tahoe debug SUBCOMMAND"
778 def getUsage(self, width=None):
779 #t = usage.Options.getUsage(self, width)
782 tahoe debug dump-share Unpack and display the contents of a share
783 tahoe debug dump-cap Unpack a read-cap or write-cap
784 tahoe debug find-shares Locate sharefiles in node directories
785 tahoe debug catalog-shares Describe all shares in node dirs
786 tahoe debug corrupt-share Corrupt a share by flipping a bit.
788 Please run e.g. 'tahoe debug dump-share --help' for more details on each
794 "dump-share": dump_share,
795 "dump-cap": dump_cap,
796 "find-shares": find_shares,
797 "catalog-shares": catalog_shares,
798 "corrupt-share": corrupt_share,
803 def do_debug(options):
804 so = options.subOptions
805 so.stdout = options.stdout
806 so.stderr = options.stderr
807 f = subDispatch[options.subCommand]
812 ["debug", None, DebugCommand, "debug subcommands: use 'tahoe debug' for a list"],