2 # do not import any allmydata modules at this level. Do that from inside
3 # individual functions instead.
4 import sys, struct, time, os
5 from twisted.python import usage
7 class DumpOptions(usage.Options):
8 """tahoe dump-share SHARE_FILENAME"""
10 def parseArgs(self, filename):
11 self['filename'] = filename
13 def dump_share(config, out=sys.stdout, err=sys.stderr):
14 from allmydata import uri, storage
16 # check the version, to see if we have a mutable or immutable share
17 print >>out, "share filename: %s" % config['filename']
19 f = open(config['filename'], "rb")
22 if prefix == storage.MutableShareFile.MAGIC:
23 return dump_mutable_share(config, out, err)
24 # otherwise assume it's immutable
25 f = storage.ShareFile(config['filename'])
26 # use a ReadBucketProxy to parse the bucket and find the uri extension
27 bp = storage.ReadBucketProxy(None)
28 offsets = bp._parse_offsets(f.read_share_data(0, 0x24))
29 seek = offsets['uri_extension']
30 length = struct.unpack(">L", f.read_share_data(seek, 4))[0]
32 UEB_data = f.read_share_data(seek, length)
34 unpacked = uri.unpack_extension_readable(UEB_data)
35 keys1 = ("size", "num_segments", "segment_size",
36 "needed_shares", "total_shares")
37 keys2 = ("codec_name", "codec_params", "tail_codec_params")
38 keys3 = ("plaintext_hash", "plaintext_root_hash",
39 "crypttext_hash", "crypttext_root_hash",
40 "share_root_hash", "UEB_hash")
41 display_keys = {"size": "file_size"}
44 dk = display_keys.get(k, k)
45 print >>out, "%20s: %s" % (dk, unpacked[k])
49 dk = display_keys.get(k, k)
50 print >>out, "%20s: %s" % (dk, unpacked[k])
54 dk = display_keys.get(k, k)
55 print >>out, "%20s: %s" % (dk, unpacked[k])
57 leftover = set(unpacked.keys()) - set(keys1 + keys2 + keys3)
60 print >>out, "LEFTOVER:"
61 for k in sorted(leftover):
62 print >>out, "%20s: %s" % (k, unpacked[k])
65 sizes['data'] = bp._data_size
66 sizes['validation'] = (offsets['uri_extension'] -
67 offsets['plaintext_hash_tree'])
68 sizes['uri-extension'] = len(UEB_data)
70 print >>out, " Size of data within the share:"
71 for k in sorted(sizes):
72 print >>out, "%20s: %s" % (k, sizes[k])
74 # display lease information too
75 leases = list(f.iter_leases())
77 for i,lease in enumerate(leases):
78 (owner_num, renew_secret, cancel_secret, expiration_time) = lease
79 when = format_expiration_time(expiration_time)
80 print >>out, " Lease #%d: owner=%d, expire in %s" % (i, owner_num,
83 print >>out, " No leases."
88 def format_expiration_time(expiration_time):
90 remains = expiration_time - now
91 when = "%ds" % remains
93 when += " (%d days)" % (remains / (24*3600))
95 when += " (%d hours)" % (remains / 3600)
99 def dump_mutable_share(config, out, err):
100 from allmydata import storage
101 from allmydata.util import base32, idlib
102 m = storage.MutableShareFile(config['filename'])
103 f = open(config['filename'], "rb")
104 WE, nodeid = m._read_write_enabler_and_nodeid(f)
105 num_extra_leases = m._read_num_extra_leases(f)
106 data_length = m._read_data_length(f)
107 extra_lease_offset = m._read_extra_lease_offset(f)
108 container_size = extra_lease_offset - m.DATA_OFFSET
109 leases = list(m._enumerate_leases(f))
111 share_type = "unknown"
112 f.seek(m.DATA_OFFSET)
113 if f.read(1) == "\x00":
114 # this slot contains an SMDF share
119 print >>out, "Mutable slot found:"
120 print >>out, " share_type: %s" % share_type
121 print >>out, " write_enabler: %s" % base32.b2a(WE)
122 print >>out, " WE for nodeid: %s" % idlib.nodeid_b2a(nodeid)
123 print >>out, " num_extra_leases: %d" % num_extra_leases
124 print >>out, " container_size: %d" % container_size
125 print >>out, " data_length: %d" % data_length
127 for (leasenum, (oid,et,rs,cs,anid)) in leases:
129 print >>out, " Lease #%d:" % leasenum
130 print >>out, " ownerid: %d" % oid
131 when = format_expiration_time(et)
132 print >>out, " expires in %s" % when
133 print >>out, " renew_secret: %s" % base32.b2a(rs)
134 print >>out, " cancel_secret: %s" % base32.b2a(cs)
135 print >>out, " secrets are for nodeid: %s" % idlib.nodeid_b2a(anid)
137 print >>out, "No leases."
140 if share_type == "SDMF":
141 dump_SDMF_share(m.DATA_OFFSET, data_length, config, out, err)
145 def dump_SDMF_share(offset, length, config, out, err):
146 from allmydata.mutable.layout import unpack_share
147 from allmydata.mutable.common import NeedMoreDataError
148 from allmydata.util import base32
150 f = open(config['filename'], "rb")
152 data = f.read(min(length, 2000))
156 pieces = unpack_share(data)
157 except NeedMoreDataError, e:
158 # retry once with the larger size
159 size = e.needed_bytes
160 f = open(config['filename'], "rb")
162 data = f.read(min(length, size))
164 pieces = unpack_share(data)
166 (seqnum, root_hash, IV, k, N, segsize, datalen,
167 pubkey, signature, share_hash_chain, block_hash_tree,
168 share_data, enc_privkey) = pieces
170 print >>out, " SDMF contents:"
171 print >>out, " seqnum: %d" % seqnum
172 print >>out, " root_hash: %s" % base32.b2a(root_hash)
173 print >>out, " IV: %s" % base32.b2a(IV)
174 print >>out, " required_shares: %d" % k
175 print >>out, " total_shares: %d" % N
176 print >>out, " segsize: %d" % segsize
177 print >>out, " datalen: %d" % datalen
178 share_hash_ids = ",".join(sorted([str(hid)
179 for hid in share_hash_chain.keys()]))
180 print >>out, " share_hash_chain: %s" % share_hash_ids
181 print >>out, " block_hash_tree: %d nodes" % len(block_hash_tree)
187 class DumpCapOptions(usage.Options):
189 ["nodeid", "n", None, "storage server nodeid (ascii), to construct WE and secrets."],
190 ["client-secret", "c", None, "client's base secret (ascii), to construct secrets"],
191 ["client-dir", "d", None, "client's base directory, from which a -c secret will be read"],
193 def parseArgs(self, cap):
196 def dump_cap(config, out=sys.stdout, err=sys.stderr):
197 from allmydata import uri
198 from allmydata.util import base32
199 from base64 import b32decode
200 import urlparse, urllib
205 nodeid = b32decode(config['nodeid'].upper())
207 if config['client-secret']:
208 secret = base32.a2b(config['client-secret'])
209 elif config['client-dir']:
210 secretfile = os.path.join(config['client-dir'], "private", "secret")
212 secret = base32.a2b(open(secretfile, "r").read().strip())
213 except EnvironmentError:
216 if cap.startswith("http"):
217 scheme, netloc, path, params, query, fragment = urlparse.urlparse(cap)
218 assert path.startswith("/uri/")
219 cap = urllib.unquote(path[len("/uri/"):])
221 u = uri.from_string(cap)
224 dump_uri_instance(u, nodeid, secret, out, err)
226 def _dump_secrets(storage_index, secret, nodeid, out):
227 from allmydata.util import hashutil
228 from allmydata.util import base32
231 crs = hashutil.my_renewal_secret_hash(secret)
232 print >>out, " client renewal secret:", base32.b2a(crs)
233 frs = hashutil.file_renewal_secret_hash(crs, storage_index)
234 print >>out, " file renewal secret:", base32.b2a(frs)
236 renew = hashutil.bucket_renewal_secret_hash(frs, nodeid)
237 print >>out, " lease renewal secret:", base32.b2a(renew)
238 ccs = hashutil.my_cancel_secret_hash(secret)
239 print >>out, " client cancel secret:", base32.b2a(ccs)
240 fcs = hashutil.file_cancel_secret_hash(ccs, storage_index)
241 print >>out, " file cancel secret:", base32.b2a(fcs)
243 cancel = hashutil.bucket_cancel_secret_hash(fcs, nodeid)
244 print >>out, " lease cancel secret:", base32.b2a(cancel)
246 def dump_uri_instance(u, nodeid, secret, out, err, show_header=True):
247 from allmydata import storage, uri
248 from allmydata.util import base32, hashutil
250 if isinstance(u, uri.CHKFileURI):
252 print >>out, "CHK File:"
253 print >>out, " key:", base32.b2a(u.key)
254 print >>out, " UEB hash:", base32.b2a(u.uri_extension_hash)
255 print >>out, " size:", u.size
256 print >>out, " k/N: %d/%d" % (u.needed_shares, u.total_shares)
257 print >>out, " storage index:", storage.si_b2a(u.storage_index)
258 _dump_secrets(u.storage_index, secret, nodeid, out)
259 elif isinstance(u, uri.CHKFileVerifierURI):
261 print >>out, "CHK Verifier URI:"
262 print >>out, " UEB hash:", base32.b2a(u.uri_extension_hash)
263 print >>out, " size:", u.size
264 print >>out, " k/N: %d/%d" % (u.needed_shares, u.total_shares)
265 print >>out, " storage index:", storage.si_b2a(u.storage_index)
267 elif isinstance(u, uri.LiteralFileURI):
269 print >>out, "Literal File URI:"
270 print >>out, " data:", u.data
272 elif isinstance(u, uri.WriteableSSKFileURI):
274 print >>out, "SSK Writeable URI:"
275 print >>out, " writekey:", base32.b2a(u.writekey)
276 print >>out, " readkey:", base32.b2a(u.readkey)
277 print >>out, " storage index:", storage.si_b2a(u.storage_index)
278 print >>out, " fingerprint:", base32.b2a(u.fingerprint)
281 we = hashutil.ssk_write_enabler_hash(u.writekey, nodeid)
282 print >>out, " write_enabler:", base32.b2a(we)
284 _dump_secrets(u.storage_index, secret, nodeid, out)
286 elif isinstance(u, uri.ReadonlySSKFileURI):
288 print >>out, "SSK Read-only URI:"
289 print >>out, " readkey:", base32.b2a(u.readkey)
290 print >>out, " storage index:", storage.si_b2a(u.storage_index)
291 print >>out, " fingerprint:", base32.b2a(u.fingerprint)
292 elif isinstance(u, uri.SSKVerifierURI):
294 print >>out, "SSK Verifier URI:"
295 print >>out, " storage index:", storage.si_b2a(u.storage_index)
296 print >>out, " fingerprint:", base32.b2a(u.fingerprint)
298 elif isinstance(u, uri.NewDirectoryURI):
300 print >>out, "Directory Writeable URI:"
301 dump_uri_instance(u._filenode_uri, nodeid, secret, out, err, False)
302 elif isinstance(u, uri.ReadonlyNewDirectoryURI):
304 print >>out, "Directory Read-only URI:"
305 dump_uri_instance(u._filenode_uri, nodeid, secret, out, err, False)
306 elif isinstance(u, uri.NewDirectoryURIVerifier):
308 print >>out, "Directory Verifier URI:"
309 dump_uri_instance(u._filenode_uri, nodeid, secret, out, err, False)
311 print >>out, "unknown cap type"
313 class FindSharesOptions(usage.Options):
314 def parseArgs(self, storage_index_s, *nodedirs):
315 self.si_s = storage_index_s
316 self.nodedirs = nodedirs
318 def find_shares(config, out=sys.stdout, err=sys.stderr):
319 """Given a storage index and a list of node directories, emit a list of
320 all matching shares to stdout, one per line. For example:
322 find-shares.py 44kai1tui348689nrw8fjegc8c ~/testnet/node-*
326 /home/warner/testnet/node-1/storage/shares/44k/44kai1tui348689nrw8fjegc8c/5
327 /home/warner/testnet/node-1/storage/shares/44k/44kai1tui348689nrw8fjegc8c/9
328 /home/warner/testnet/node-2/storage/shares/44k/44kai1tui348689nrw8fjegc8c/2
330 from allmydata import storage
332 sharedir = storage.storage_index_to_dir(storage.si_a2b(config.si_s))
333 for d in config.nodedirs:
334 d = os.path.join(os.path.expanduser(d), "storage/shares", sharedir)
335 if os.path.exists(d):
336 for shnum in os.listdir(d):
337 print >>out, os.path.join(d, shnum)
342 class CatalogSharesOptions(usage.Options):
344 Run this as 'catalog-shares NODEDIRS..', and it will emit a line to stdout
345 for each share it finds:
347 CHK $SI $k/$N $filesize $UEB_hash $expiration $abspath_sharefile
348 SDMF $SI $k/$N $filesize $seqnum/$roothash $expiration $abspath_sharefile
349 UNKNOWN $abspath_sharefile
351 It may be useful to build up a catalog of shares from many storage servers
352 and then sort the results. If you see shares with the same SI but different
353 parameters/filesize/UEB_hash, then something is wrong.
356 def parseArgs(self, *nodedirs):
357 self.nodedirs = nodedirs
359 def describe_share(abs_sharefile, si_s, shnum_s, now, out, err):
360 from allmydata import uri, storage
361 from allmydata.mutable.layout import unpack_share
362 from allmydata.mutable.common import NeedMoreDataError
363 from allmydata.util import base32
366 f = open(abs_sharefile, "rb")
369 if prefix == storage.MutableShareFile.MAGIC:
371 m = storage.MutableShareFile(abs_sharefile)
372 WE, nodeid = m._read_write_enabler_and_nodeid(f)
373 num_extra_leases = m._read_num_extra_leases(f)
374 data_length = m._read_data_length(f)
375 extra_lease_offset = m._read_extra_lease_offset(f)
376 container_size = extra_lease_offset - m.DATA_OFFSET
377 leases = list(m._enumerate_leases(f))
378 expiration_time = min( [expiration_time
380 (ownerid, expiration_time, rs, cs, nodeid))
382 expiration = max(0, expiration_time - now)
384 share_type = "unknown"
385 f.seek(m.DATA_OFFSET)
386 if f.read(1) == "\x00":
387 # this slot contains an SMDF share
390 if share_type == "SDMF":
391 f.seek(m.DATA_OFFSET)
392 data = f.read(min(data_length, 2000))
395 pieces = unpack_share(data)
396 except NeedMoreDataError, e:
397 # retry once with the larger size
398 size = e.needed_bytes
399 f.seek(m.DATA_OFFSET)
400 data = f.read(min(data_length, size))
401 pieces = unpack_share(data)
402 (seqnum, root_hash, IV, k, N, segsize, datalen,
403 pubkey, signature, share_hash_chain, block_hash_tree,
404 share_data, enc_privkey) = pieces
406 print >>out, "SDMF %s %d/%d %d #%d:%s %d %s" % \
407 (si_s, k, N, datalen,
408 seqnum, base32.b2a(root_hash),
409 expiration, abs_sharefile)
411 print >>out, "UNKNOWN mutable %s" % (abs_sharefile,)
413 elif struct.unpack(">L", prefix[:4]) == (1,):
416 sf = storage.ShareFile(abs_sharefile)
417 # use a ReadBucketProxy to parse the bucket and find the uri extension
418 bp = storage.ReadBucketProxy(None)
419 offsets = bp._parse_offsets(sf.read_share_data(0, 0x24))
420 seek = offsets['uri_extension']
421 length = struct.unpack(">L", sf.read_share_data(seek, 4))[0]
423 UEB_data = sf.read_share_data(seek, length)
424 expiration_time = min( [expiration_time
425 for (ownerid, rs, cs, expiration_time)
426 in sf.iter_leases()] )
427 expiration = max(0, expiration_time - now)
429 unpacked = uri.unpack_extension_readable(UEB_data)
430 k = unpacked["needed_shares"]
431 N = unpacked["total_shares"]
432 filesize = unpacked["size"]
433 ueb_hash = unpacked["UEB_hash"]
435 print >>out, "CHK %s %d/%d %d %s %d %s" % (si_s, k, N, filesize,
436 ueb_hash, expiration,
440 print >>out, "UNKNOWN really-unknown %s" % (abs_sharefile,)
445 def catalog_shares(config, out=sys.stdout, err=sys.stderr):
447 for d in config.nodedirs:
448 d = os.path.join(os.path.expanduser(d), "storage/shares")
449 if os.path.exists(d):
450 for abbrevdir in os.listdir(d):
451 abbrevdir = os.path.join(d, abbrevdir)
452 for si_s in os.listdir(abbrevdir):
453 si_dir = os.path.join(abbrevdir, si_s)
454 for shnum_s in os.listdir(si_dir):
455 abs_sharefile = os.path.join(si_dir, shnum_s)
456 abs_sharefile = os.path.abspath(abs_sharefile)
457 assert os.path.isfile(abs_sharefile)
458 describe_share(abs_sharefile, si_s, shnum_s, now,
465 ["dump-share", None, DumpOptions,
466 "Unpack and display the contents of a share (uri_extension and leases)."],
467 ["dump-cap", None, DumpCapOptions, "Unpack a read-cap or write-cap"],
468 ["find-shares", None, FindSharesOptions, "Locate sharefiles in node dirs"],
469 ["catalog-shares", None, CatalogSharesOptions, "Describe shares in node dirs"],
473 "dump-share": dump_share,
474 "dump-cap": dump_cap,
475 "find-shares": find_shares,
476 "catalog-shares": catalog_shares,