2 # do not import any allmydata modules at this level. Do that from inside
3 # individual functions instead.
4 import struct, time, os
5 from twisted.python import usage, failure
6 from twisted.internet import defer
7 from allmydata.scripts.cli import VDriveOptions
9 class DumpOptions(usage.Options):
10 def getSynopsis(self):
11 return "Usage: tahoe debug dump-share SHARE_FILENAME"
14 ["offsets", None, "Display a table of section offsets"],
15 ["leases-only", None, "Dump leases but not CHK contents"],
18 def getUsage(self, width=None):
19 t = usage.Options.getUsage(self, width)
21 Print lots of information about the given share, by parsing the share's
22 contents. This includes share type, lease information, encoding parameters,
23 hash-tree roots, public keys, and segment sizes. This command also emits a
24 verify-cap for the file that uses the share.
26 tahoe debug dump-share testgrid/node-3/storage/shares/4v/4vozh77tsrw7mdhnj7qvp5ky74/0
31 def parseArgs(self, filename):
32 self['filename'] = filename
34 def dump_share(options):
35 from allmydata.storage.mutable import MutableShareFile
39 # check the version, to see if we have a mutable or immutable share
40 print >>out, "share filename: %s" % options['filename']
42 f = open(options['filename'], "rb")
45 if prefix == MutableShareFile.MAGIC:
46 return dump_mutable_share(options)
47 # otherwise assume it's immutable
48 return dump_immutable_share(options)
50 def dump_immutable_share(options):
51 from allmydata.storage.immutable import ShareFile
54 f = ShareFile(options['filename'])
55 if not options["leases-only"]:
56 dump_immutable_chk_share(f, out, options)
57 dump_immutable_lease_info(f, out)
61 def dump_immutable_chk_share(f, out, options):
62 from allmydata import uri
63 from allmydata.util import base32
64 from allmydata.immutable.layout import ReadBucketProxy
65 # use a ReadBucketProxy to parse the bucket and find the uri extension
66 bp = ReadBucketProxy(None, '', '')
67 offsets = bp._parse_offsets(f.read_share_data(0, 0x44))
68 print >>out, "%20s: %d" % ("version", bp._version)
69 seek = offsets['uri_extension']
70 length = struct.unpack(bp._fieldstruct,
71 f.read_share_data(seek, bp._fieldsize))[0]
73 UEB_data = f.read_share_data(seek, length)
75 unpacked = uri.unpack_extension_readable(UEB_data)
76 keys1 = ("size", "num_segments", "segment_size",
77 "needed_shares", "total_shares")
78 keys2 = ("codec_name", "codec_params", "tail_codec_params")
79 keys3 = ("plaintext_hash", "plaintext_root_hash",
80 "crypttext_hash", "crypttext_root_hash",
81 "share_root_hash", "UEB_hash")
82 display_keys = {"size": "file_size"}
85 dk = display_keys.get(k, k)
86 print >>out, "%20s: %s" % (dk, unpacked[k])
90 dk = display_keys.get(k, k)
91 print >>out, "%20s: %s" % (dk, unpacked[k])
95 dk = display_keys.get(k, k)
96 print >>out, "%20s: %s" % (dk, unpacked[k])
98 leftover = set(unpacked.keys()) - set(keys1 + keys2 + keys3)
101 print >>out, "LEFTOVER:"
102 for k in sorted(leftover):
103 print >>out, "%20s: %s" % (k, unpacked[k])
105 # the storage index isn't stored in the share itself, so we depend upon
106 # knowing the parent directory name to get it
107 pieces = options['filename'].split(os.sep)
108 if len(pieces) >= 2 and base32.could_be_base32_encoded(pieces[-2]):
109 storage_index = base32.a2b(pieces[-2])
110 uri_extension_hash = base32.a2b(unpacked["UEB_hash"])
111 u = uri.CHKFileVerifierURI(storage_index, uri_extension_hash,
112 unpacked["needed_shares"],
113 unpacked["total_shares"], unpacked["size"])
114 verify_cap = u.to_string()
115 print >>out, "%20s: %s" % ("verify-cap", verify_cap)
118 sizes['data'] = (offsets['plaintext_hash_tree'] -
120 sizes['validation'] = (offsets['uri_extension'] -
121 offsets['plaintext_hash_tree'])
122 sizes['uri-extension'] = len(UEB_data)
124 print >>out, " Size of data within the share:"
125 for k in sorted(sizes):
126 print >>out, "%20s: %s" % (k, sizes[k])
128 if options['offsets']:
130 print >>out, " Section Offsets:"
131 print >>out, "%20s: %s" % ("share data", f._data_offset)
132 for k in ["data", "plaintext_hash_tree", "crypttext_hash_tree",
133 "block_hashes", "share_hashes", "uri_extension"]:
134 name = {"data": "block data"}.get(k,k)
135 offset = f._data_offset + offsets[k]
136 print >>out, " %20s: %s (0x%x)" % (name, offset, offset)
137 print >>out, "%20s: %s" % ("leases", f._lease_offset)
139 def dump_immutable_lease_info(f, out):
140 # display lease information too
142 leases = list(f.get_leases())
144 for i,lease in enumerate(leases):
145 when = format_expiration_time(lease.expiration_time)
146 print >>out, " Lease #%d: owner=%d, expire in %s" \
147 % (i, lease.owner_num, when)
149 print >>out, " No leases."
151 def format_expiration_time(expiration_time):
153 remains = expiration_time - now
154 when = "%ds" % remains
155 if remains > 24*3600:
156 when += " (%d days)" % (remains / (24*3600))
158 when += " (%d hours)" % (remains / 3600)
162 def dump_mutable_share(options):
163 from allmydata.storage.mutable import MutableShareFile
164 from allmydata.util import base32, idlib
166 m = MutableShareFile(options['filename'])
167 f = open(options['filename'], "rb")
168 WE, nodeid = m._read_write_enabler_and_nodeid(f)
169 num_extra_leases = m._read_num_extra_leases(f)
170 data_length = m._read_data_length(f)
171 extra_lease_offset = m._read_extra_lease_offset(f)
172 container_size = extra_lease_offset - m.DATA_OFFSET
173 leases = list(m._enumerate_leases(f))
175 share_type = "unknown"
176 f.seek(m.DATA_OFFSET)
177 if f.read(1) == "\x00":
178 # this slot contains an SMDF share
183 print >>out, "Mutable slot found:"
184 print >>out, " share_type: %s" % share_type
185 print >>out, " write_enabler: %s" % base32.b2a(WE)
186 print >>out, " WE for nodeid: %s" % idlib.nodeid_b2a(nodeid)
187 print >>out, " num_extra_leases: %d" % num_extra_leases
188 print >>out, " container_size: %d" % container_size
189 print >>out, " data_length: %d" % data_length
191 for (leasenum, lease) in leases:
193 print >>out, " Lease #%d:" % leasenum
194 print >>out, " ownerid: %d" % lease.owner_num
195 when = format_expiration_time(lease.expiration_time)
196 print >>out, " expires in %s" % when
197 print >>out, " renew_secret: %s" % base32.b2a(lease.renew_secret)
198 print >>out, " cancel_secret: %s" % base32.b2a(lease.cancel_secret)
199 print >>out, " secrets are for nodeid: %s" % idlib.nodeid_b2a(lease.nodeid)
201 print >>out, "No leases."
204 if share_type == "SDMF":
205 dump_SDMF_share(m, data_length, options)
209 def dump_SDMF_share(m, length, options):
210 from allmydata.mutable.layout import unpack_share, unpack_header
211 from allmydata.mutable.common import NeedMoreDataError
212 from allmydata.util import base32, hashutil
213 from allmydata.uri import SSKVerifierURI
215 offset = m.DATA_OFFSET
219 f = open(options['filename'], "rb")
221 data = f.read(min(length, 2000))
225 pieces = unpack_share(data)
226 except NeedMoreDataError, e:
227 # retry once with the larger size
228 size = e.needed_bytes
229 f = open(options['filename'], "rb")
231 data = f.read(min(length, size))
233 pieces = unpack_share(data)
235 (seqnum, root_hash, IV, k, N, segsize, datalen,
236 pubkey, signature, share_hash_chain, block_hash_tree,
237 share_data, enc_privkey) = pieces
238 (ig_version, ig_seqnum, ig_roothash, ig_IV, ig_k, ig_N, ig_segsize,
239 ig_datalen, offsets) = unpack_header(data)
241 print >>out, " SDMF contents:"
242 print >>out, " seqnum: %d" % seqnum
243 print >>out, " root_hash: %s" % base32.b2a(root_hash)
244 print >>out, " IV: %s" % base32.b2a(IV)
245 print >>out, " required_shares: %d" % k
246 print >>out, " total_shares: %d" % N
247 print >>out, " segsize: %d" % segsize
248 print >>out, " datalen: %d" % datalen
249 print >>out, " enc_privkey: %d bytes" % len(enc_privkey)
250 print >>out, " pubkey: %d bytes" % len(pubkey)
251 print >>out, " signature: %d bytes" % len(signature)
252 share_hash_ids = ",".join(sorted([str(hid)
253 for hid in share_hash_chain.keys()]))
254 print >>out, " share_hash_chain: %s" % share_hash_ids
255 print >>out, " block_hash_tree: %d nodes" % len(block_hash_tree)
257 # the storage index isn't stored in the share itself, so we depend upon
258 # knowing the parent directory name to get it
259 pieces = options['filename'].split(os.sep)
260 if len(pieces) >= 2 and base32.could_be_base32_encoded(pieces[-2]):
261 storage_index = base32.a2b(pieces[-2])
262 fingerprint = hashutil.ssk_pubkey_fingerprint_hash(pubkey)
263 u = SSKVerifierURI(storage_index, fingerprint)
264 verify_cap = u.to_string()
265 print >>out, " verify-cap:", verify_cap
267 if options['offsets']:
268 # NOTE: this offset-calculation code is fragile, and needs to be
269 # merged with MutableShareFile's internals.
271 print >>out, " Section Offsets:"
272 def printoffset(name, value, shift=0):
273 print >>out, "%s%20s: %s (0x%x)" % (" "*shift, name, value, value)
274 printoffset("first lease", m.HEADER_SIZE)
275 printoffset("share data", m.DATA_OFFSET)
276 o_seqnum = m.DATA_OFFSET + struct.calcsize(">B")
277 printoffset("seqnum", o_seqnum, 2)
278 o_root_hash = m.DATA_OFFSET + struct.calcsize(">BQ")
279 printoffset("root_hash", o_root_hash, 2)
280 for k in ["signature", "share_hash_chain", "block_hash_tree",
282 "enc_privkey", "EOF"]:
283 name = {"share_data": "block data",
284 "EOF": "end of share data"}.get(k,k)
285 offset = m.DATA_OFFSET + offsets[k]
286 printoffset(name, offset, 2)
287 f = open(options['filename'], "rb")
288 printoffset("extra leases", m._read_extra_lease_offset(f) + 4)
295 class DumpCapOptions(usage.Options):
296 def getSynopsis(self):
297 return "Usage: tahoe debug dump-cap [options] FILECAP"
300 None, "storage server nodeid (ascii), to construct WE and secrets."],
301 ["client-secret", "c", None,
302 "client's base secret (ascii), to construct secrets"],
303 ["client-dir", "d", None,
304 "client's base directory, from which a -c secret will be read"],
306 def parseArgs(self, cap):
309 def getUsage(self, width=None):
310 t = usage.Options.getUsage(self, width)
312 Print information about the given cap-string (aka: URI, file-cap, dir-cap,
313 read-cap, write-cap). The URI string is parsed and unpacked. This prints the
314 type of the cap, its storage index, and any derived keys.
316 tahoe debug dump-cap URI:SSK-Verifier:4vozh77tsrw7mdhnj7qvp5ky74:q7f3dwz76sjys4kqfdt3ocur2pay3a6rftnkqmi2uxu3vqsdsofq
318 This may be useful to determine if a read-cap and a write-cap refer to the
319 same time, or to extract the storage-index from a file-cap (to then use with
322 If additional information is provided (storage server nodeid and/or client
323 base secret), this command will compute the shared secrets used for the
324 write-enabler and for lease-renewal.
329 def dump_cap(options):
330 from allmydata import uri
331 from allmydata.util import base32
332 from base64 import b32decode
333 import urlparse, urllib
338 if options['nodeid']:
339 nodeid = b32decode(options['nodeid'].upper())
341 if options['client-secret']:
342 secret = base32.a2b(options['client-secret'])
343 elif options['client-dir']:
344 secretfile = os.path.join(options['client-dir'], "private", "secret")
346 secret = base32.a2b(open(secretfile, "r").read().strip())
347 except EnvironmentError:
350 if cap.startswith("http"):
351 scheme, netloc, path, params, query, fragment = urlparse.urlparse(cap)
352 assert path.startswith("/uri/")
353 cap = urllib.unquote(path[len("/uri/"):])
355 u = uri.from_string(cap)
358 dump_uri_instance(u, nodeid, secret, out)
360 def _dump_secrets(storage_index, secret, nodeid, out):
361 from allmydata.util import hashutil
362 from allmydata.util import base32
365 crs = hashutil.my_renewal_secret_hash(secret)
366 print >>out, " client renewal secret:", base32.b2a(crs)
367 frs = hashutil.file_renewal_secret_hash(crs, storage_index)
368 print >>out, " file renewal secret:", base32.b2a(frs)
370 renew = hashutil.bucket_renewal_secret_hash(frs, nodeid)
371 print >>out, " lease renewal secret:", base32.b2a(renew)
372 ccs = hashutil.my_cancel_secret_hash(secret)
373 print >>out, " client cancel secret:", base32.b2a(ccs)
374 fcs = hashutil.file_cancel_secret_hash(ccs, storage_index)
375 print >>out, " file cancel secret:", base32.b2a(fcs)
377 cancel = hashutil.bucket_cancel_secret_hash(fcs, nodeid)
378 print >>out, " lease cancel secret:", base32.b2a(cancel)
380 def dump_uri_instance(u, nodeid, secret, out, show_header=True):
381 from allmydata import uri
382 from allmydata.storage.server import si_b2a
383 from allmydata.util import base32, hashutil
385 if isinstance(u, uri.CHKFileURI):
387 print >>out, "CHK File:"
388 print >>out, " key:", base32.b2a(u.key)
389 print >>out, " UEB hash:", base32.b2a(u.uri_extension_hash)
390 print >>out, " size:", u.size
391 print >>out, " k/N: %d/%d" % (u.needed_shares, u.total_shares)
392 print >>out, " storage index:", si_b2a(u.storage_index)
393 _dump_secrets(u.storage_index, secret, nodeid, out)
394 elif isinstance(u, uri.CHKFileVerifierURI):
396 print >>out, "CHK Verifier URI:"
397 print >>out, " UEB hash:", base32.b2a(u.uri_extension_hash)
398 print >>out, " size:", u.size
399 print >>out, " k/N: %d/%d" % (u.needed_shares, u.total_shares)
400 print >>out, " storage index:", si_b2a(u.storage_index)
402 elif isinstance(u, uri.LiteralFileURI):
404 print >>out, "Literal File URI:"
405 print >>out, " data:", u.data
407 elif isinstance(u, uri.WriteableSSKFileURI):
409 print >>out, "SSK Writeable URI:"
410 print >>out, " writekey:", base32.b2a(u.writekey)
411 print >>out, " readkey:", base32.b2a(u.readkey)
412 print >>out, " storage index:", si_b2a(u.storage_index)
413 print >>out, " fingerprint:", base32.b2a(u.fingerprint)
416 we = hashutil.ssk_write_enabler_hash(u.writekey, nodeid)
417 print >>out, " write_enabler:", base32.b2a(we)
419 _dump_secrets(u.storage_index, secret, nodeid, out)
421 elif isinstance(u, uri.ReadonlySSKFileURI):
423 print >>out, "SSK Read-only URI:"
424 print >>out, " readkey:", base32.b2a(u.readkey)
425 print >>out, " storage index:", si_b2a(u.storage_index)
426 print >>out, " fingerprint:", base32.b2a(u.fingerprint)
427 elif isinstance(u, uri.SSKVerifierURI):
429 print >>out, "SSK Verifier URI:"
430 print >>out, " storage index:", si_b2a(u.storage_index)
431 print >>out, " fingerprint:", base32.b2a(u.fingerprint)
433 elif isinstance(u, uri.DirectoryURI):
435 print >>out, "Directory Writeable URI:"
436 dump_uri_instance(u._filenode_uri, nodeid, secret, out, False)
437 elif isinstance(u, uri.ReadonlyDirectoryURI):
439 print >>out, "Directory Read-only URI:"
440 dump_uri_instance(u._filenode_uri, nodeid, secret, out, False)
441 elif isinstance(u, uri.DirectoryURIVerifier):
443 print >>out, "Directory Verifier URI:"
444 dump_uri_instance(u._filenode_uri, nodeid, secret, out, False)
446 print >>out, "unknown cap type"
448 class FindSharesOptions(usage.Options):
449 def getSynopsis(self):
450 return "Usage: tahoe debug find-shares STORAGE_INDEX NODEDIRS.."
451 def parseArgs(self, storage_index_s, *nodedirs):
452 self.si_s = storage_index_s
453 self.nodedirs = nodedirs
454 def getUsage(self, width=None):
455 t = usage.Options.getUsage(self, width)
457 Locate all shares for the given storage index. This command looks through one
458 or more node directories to find the shares. It returns a list of filenames,
459 one per line, for each share file found.
461 tahoe debug find-shares 4vozh77tsrw7mdhnj7qvp5ky74 testgrid/node-*
463 It may be useful during testing, when running a test grid in which all the
464 nodes are on a local disk. The share files thus located can be counted,
465 examined (with dump-share), or corrupted/deleted to test checker/repairer.
469 def find_shares(options):
470 """Given a storage index and a list of node directories, emit a list of
471 all matching shares to stdout, one per line. For example:
473 find-shares.py 44kai1tui348689nrw8fjegc8c ~/testnet/node-*
477 /home/warner/testnet/node-1/storage/shares/44k/44kai1tui348689nrw8fjegc8c/5
478 /home/warner/testnet/node-1/storage/shares/44k/44kai1tui348689nrw8fjegc8c/9
479 /home/warner/testnet/node-2/storage/shares/44k/44kai1tui348689nrw8fjegc8c/2
481 from allmydata.storage.server import si_a2b, storage_index_to_dir
484 sharedir = storage_index_to_dir(si_a2b(options.si_s))
485 for d in options.nodedirs:
486 d = os.path.join(os.path.expanduser(d), "storage/shares", sharedir)
487 if os.path.exists(d):
488 for shnum in os.listdir(d):
489 print >>out, os.path.join(d, shnum)
494 class CatalogSharesOptions(usage.Options):
498 def parseArgs(self, *nodedirs):
499 self.nodedirs = nodedirs
501 raise usage.UsageError("must specify at least one node directory")
503 def getSynopsis(self):
504 return "Usage: tahoe debug catalog-shares NODEDIRS.."
506 def getUsage(self, width=None):
507 t = usage.Options.getUsage(self, width)
509 Locate all shares in the given node directories, and emit a one-line summary
510 of each share. Run it like this:
512 tahoe debug catalog-shares testgrid/node-* >allshares.txt
514 The lines it emits will look like the following:
516 CHK $SI $k/$N $filesize $UEB_hash $expiration $abspath_sharefile
517 SDMF $SI $k/$N $filesize $seqnum/$roothash $expiration $abspath_sharefile
518 UNKNOWN $abspath_sharefile
520 This command can be used to build up a catalog of shares from many storage
521 servers and then sort the results to compare all shares for the same file. If
522 you see shares with the same SI but different parameters/filesize/UEB_hash,
523 then something is wrong. The misc/find-share/anomalies.py script may be
528 def call(c, *args, **kwargs):
529 # take advantage of the fact that ImmediateReadBucketProxy returns
530 # Deferreds that are already fired
533 d = defer.maybeDeferred(c, *args, **kwargs)
534 d.addCallbacks(results.append, failures.append)
536 failures[0].raiseException()
539 def describe_share(abs_sharefile, si_s, shnum_s, now, out):
540 from allmydata import uri
541 from allmydata.storage.mutable import MutableShareFile
542 from allmydata.storage.immutable import ShareFile
543 from allmydata.mutable.layout import unpack_share
544 from allmydata.mutable.common import NeedMoreDataError
545 from allmydata.immutable.layout import ReadBucketProxy
546 from allmydata.util import base32
549 f = open(abs_sharefile, "rb")
552 if prefix == MutableShareFile.MAGIC:
554 m = MutableShareFile(abs_sharefile)
555 WE, nodeid = m._read_write_enabler_and_nodeid(f)
556 num_extra_leases = m._read_num_extra_leases(f)
557 data_length = m._read_data_length(f)
558 extra_lease_offset = m._read_extra_lease_offset(f)
559 container_size = extra_lease_offset - m.DATA_OFFSET
560 expiration_time = min( [lease.expiration_time
561 for (i,lease) in m._enumerate_leases(f)] )
562 expiration = max(0, expiration_time - now)
564 share_type = "unknown"
565 f.seek(m.DATA_OFFSET)
566 if f.read(1) == "\x00":
567 # this slot contains an SMDF share
570 if share_type == "SDMF":
571 f.seek(m.DATA_OFFSET)
572 data = f.read(min(data_length, 2000))
575 pieces = unpack_share(data)
576 except NeedMoreDataError, e:
577 # retry once with the larger size
578 size = e.needed_bytes
579 f.seek(m.DATA_OFFSET)
580 data = f.read(min(data_length, size))
581 pieces = unpack_share(data)
582 (seqnum, root_hash, IV, k, N, segsize, datalen,
583 pubkey, signature, share_hash_chain, block_hash_tree,
584 share_data, enc_privkey) = pieces
586 print >>out, "SDMF %s %d/%d %d #%d:%s %d %s" % \
587 (si_s, k, N, datalen,
588 seqnum, base32.b2a(root_hash),
589 expiration, abs_sharefile)
591 print >>out, "UNKNOWN mutable %s" % (abs_sharefile,)
593 elif struct.unpack(">L", prefix[:4]) == (1,):
596 class ImmediateReadBucketProxy(ReadBucketProxy):
597 def __init__(self, sf):
599 ReadBucketProxy.__init__(self, "", "", "")
601 return "<ImmediateReadBucketProxy>"
602 def _read(self, offset, size):
603 return defer.succeed(sf.read_share_data(offset, size))
605 # use a ReadBucketProxy to parse the bucket and find the uri extension
606 sf = ShareFile(abs_sharefile)
607 bp = ImmediateReadBucketProxy(sf)
609 expiration_time = min( [lease.expiration_time
610 for lease in sf.get_leases()] )
611 expiration = max(0, expiration_time - now)
613 UEB_data = call(bp.get_uri_extension)
614 unpacked = uri.unpack_extension_readable(UEB_data)
616 k = unpacked["needed_shares"]
617 N = unpacked["total_shares"]
618 filesize = unpacked["size"]
619 ueb_hash = unpacked["UEB_hash"]
621 print >>out, "CHK %s %d/%d %d %s %d %s" % (si_s, k, N, filesize,
622 ueb_hash, expiration,
626 print >>out, "UNKNOWN really-unknown %s" % (abs_sharefile,)
630 def catalog_shares(options):
634 for d in options.nodedirs:
635 d = os.path.join(os.path.expanduser(d), "storage/shares")
637 abbrevs = os.listdir(d)
638 except EnvironmentError:
639 # ignore nodes that have storage turned off altogether
642 for abbrevdir in abbrevs:
643 if abbrevdir == "incoming":
645 abbrevdir = os.path.join(d, abbrevdir)
646 # this tool may get run against bad disks, so we can't assume
647 # that os.listdir will always succeed. Try to catalog as much
650 sharedirs = os.listdir(abbrevdir)
651 for si_s in sharedirs:
652 si_dir = os.path.join(abbrevdir, si_s)
653 catalog_shares_one_abbrevdir(si_s, si_dir, now, out,err)
655 print >>err, "Error processing %s" % abbrevdir
656 failure.Failure().printTraceback(err)
660 def catalog_shares_one_abbrevdir(si_s, si_dir, now, out, err):
662 for shnum_s in os.listdir(si_dir):
663 abs_sharefile = os.path.join(si_dir, shnum_s)
664 abs_sharefile = os.path.abspath(abs_sharefile)
665 assert os.path.isfile(abs_sharefile)
667 describe_share(abs_sharefile, si_s, shnum_s, now,
670 print >>err, "Error processing %s" % abs_sharefile
671 failure.Failure().printTraceback(err)
673 print >>err, "Error processing %s" % si_dir
674 failure.Failure().printTraceback(err)
676 class CorruptShareOptions(usage.Options):
677 def getSynopsis(self):
678 return "Usage: tahoe debug corrupt-share SHARE_FILENAME"
681 ["offset", "o", "block-random", "Which bit to flip."],
684 def getUsage(self, width=None):
685 t = usage.Options.getUsage(self, width)
687 Corrupt the given share by flipping a bit. This will cause a
688 verifying/downloading client to log an integrity-check failure incident, and
689 downloads will proceed with a different share.
691 The --offset parameter controls which bit should be flipped. The default is
692 to flip a single random bit of the block data.
694 tahoe debug corrupt-share testgrid/node-3/storage/shares/4v/4vozh77tsrw7mdhnj7qvp5ky74/0
696 Obviously, this command should not be used in normal operation.
699 def parseArgs(self, filename):
700 self['filename'] = filename
702 def corrupt_share(options):
704 from allmydata.storage.mutable import MutableShareFile
705 from allmydata.storage.immutable import ShareFile
706 from allmydata.mutable.layout import unpack_header
707 from allmydata.immutable.layout import ReadBucketProxy
709 fn = options['filename']
710 assert options["offset"] == "block-random", "other offsets not implemented"
711 # first, what kind of share is it?
713 def flip_bit(start, end):
714 offset = random.randrange(start, end)
715 bit = random.randrange(0, 8)
716 print >>out, "[%d..%d): %d.b%d" % (start, end, offset, bit)
720 d = chr(ord(d) ^ 0x01)
728 if prefix == MutableShareFile.MAGIC:
730 m = MutableShareFile(fn)
732 f.seek(m.DATA_OFFSET)
734 # make sure this slot contains an SMDF share
735 assert data[0] == "\x00", "non-SDMF mutable shares not supported"
738 (version, ig_seqnum, ig_roothash, ig_IV, ig_k, ig_N, ig_segsize,
739 ig_datalen, offsets) = unpack_header(data)
741 assert version == 0, "we only handle v0 SDMF files"
742 start = m.DATA_OFFSET + offsets["share_data"]
743 end = m.DATA_OFFSET + offsets["enc_privkey"]
746 # otherwise assume it's immutable
748 bp = ReadBucketProxy(None, '', '')
749 offsets = bp._parse_offsets(f.read_share_data(0, 0x24))
750 start = f._data_offset + offsets["data"]
751 end = f._data_offset + offsets["plaintext_hash_tree"]
756 class ReplOptions(usage.Options):
761 return code.interact()
764 class ConsolidateOptions(VDriveOptions):
766 ("dbfile", None, None, "persistent file for reusable dirhashes"),
767 ("backupfile", "b", None, "file to store backup of Archives/ contents"),
770 ("really", None, "Really remove old snapshot directories"),
771 ("verbose", "v", "Emit a line for every directory examined"),
773 def parseArgs(self, where):
776 def consolidate(options):
777 from consolidate import main; return main(options)
780 class DebugCommand(usage.Options):
782 ["dump-share", None, DumpOptions,
783 "Unpack and display the contents of a share (uri_extension and leases)."],
784 ["dump-cap", None, DumpCapOptions, "Unpack a read-cap or write-cap"],
785 ["find-shares", None, FindSharesOptions, "Locate sharefiles in node dirs"],
786 ["catalog-shares", None, CatalogSharesOptions, "Describe shares in node dirs"],
787 ["corrupt-share", None, CorruptShareOptions, "Corrupt a share"],
788 ["repl", None, ReplOptions, "Open a python interpreter"],
789 ["consolidate", None, ConsolidateOptions, "Consolidate non-shared backups"],
791 def postOptions(self):
792 if not hasattr(self, 'subOptions'):
793 raise usage.UsageError("must specify a subcommand")
794 def getSynopsis(self):
795 return "Usage: tahoe debug SUBCOMMAND"
796 def getUsage(self, width=None):
797 #t = usage.Options.getUsage(self, width)
800 tahoe debug dump-share Unpack and display the contents of a share
801 tahoe debug dump-cap Unpack a read-cap or write-cap
802 tahoe debug find-shares Locate sharefiles in node directories
803 tahoe debug catalog-shares Describe all shares in node dirs
804 tahoe debug corrupt-share Corrupt a share by flipping a bit.
805 tahoe debug consolidate Consolidate old non-shared backups into shared ones.
807 Please run e.g. 'tahoe debug dump-share --help' for more details on each
813 "dump-share": dump_share,
814 "dump-cap": dump_cap,
815 "find-shares": find_shares,
816 "catalog-shares": catalog_shares,
817 "corrupt-share": corrupt_share,
819 "consolidate": consolidate,
823 def do_debug(options):
824 so = options.subOptions
825 so.stdout = options.stdout
826 so.stderr = options.stderr
827 f = subDispatch[options.subCommand]
832 ["debug", None, DebugCommand, "debug subcommands: use 'tahoe debug' for a list"],