3 from twisted.internet import defer
4 from nevow import rend, tags as T
5 from allmydata.util import base32, idlib
6 from allmydata.web.common import IClient, getxmlfile, abbreviate_time, \
8 from allmydata.interfaces import IUploadStatus, IDownloadStatus, \
9 IPublishStatus, IRetrieveStatus
11 def plural(sequence_or_length):
12 if isinstance(sequence_or_length, int):
13 length = sequence_or_length
15 length = len(sequence_or_length)
20 class RateAndTimeMixin:
22 def render_time(self, ctx, data):
23 return abbreviate_time(data)
25 def render_rate(self, ctx, data):
26 return abbreviate_rate(data)
28 class UploadResultsRendererMixin(RateAndTimeMixin):
29 # this requires a method named 'upload_results'
31 def render_pushed_shares(self, ctx, data):
32 d = self.upload_results()
33 d.addCallback(lambda res: res.pushed_shares)
36 def render_preexisting_shares(self, ctx, data):
37 d = self.upload_results()
38 d.addCallback(lambda res: res.preexisting_shares)
41 def render_sharemap(self, ctx, data):
42 d = self.upload_results()
43 d.addCallback(lambda res: res.sharemap)
44 def _render(sharemap):
48 for shnum in sorted(sharemap.keys()):
49 l[T.li["%d -> %s" % (shnum, sharemap[shnum])]]
51 d.addCallback(_render)
54 def render_servermap(self, ctx, data):
55 d = self.upload_results()
56 d.addCallback(lambda res: res.servermap)
57 def _render(servermap):
61 for peerid in sorted(servermap.keys()):
62 peerid_s = idlib.shortnodeid_b2a(peerid)
63 shares_s = ",".join(["#%d" % shnum
64 for shnum in servermap[peerid]])
65 l[T.li["[%s] got share%s: %s" % (peerid_s,
66 plural(servermap[peerid]),
69 d.addCallback(_render)
72 def data_file_size(self, ctx, data):
73 d = self.upload_results()
74 d.addCallback(lambda res: res.file_size)
77 def _get_time(self, name):
78 d = self.upload_results()
79 d.addCallback(lambda res: res.timings.get(name))
82 def data_time_total(self, ctx, data):
83 return self._get_time("total")
85 def data_time_storage_index(self, ctx, data):
86 return self._get_time("storage_index")
88 def data_time_contacting_helper(self, ctx, data):
89 return self._get_time("contacting_helper")
91 def data_time_existence_check(self, ctx, data):
92 return self._get_time("existence_check")
94 def data_time_cumulative_fetch(self, ctx, data):
95 return self._get_time("cumulative_fetch")
97 def data_time_helper_total(self, ctx, data):
98 return self._get_time("helper_total")
100 def data_time_peer_selection(self, ctx, data):
101 return self._get_time("peer_selection")
103 def data_time_total_encode_and_push(self, ctx, data):
104 return self._get_time("total_encode_and_push")
106 def data_time_cumulative_encoding(self, ctx, data):
107 return self._get_time("cumulative_encoding")
109 def data_time_cumulative_sending(self, ctx, data):
110 return self._get_time("cumulative_sending")
112 def data_time_hashes_and_close(self, ctx, data):
113 return self._get_time("hashes_and_close")
115 def _get_rate(self, name):
116 d = self.upload_results()
118 file_size = r.file_size
119 time = r.timings.get(name)
123 return 1.0 * file_size / time
124 except ZeroDivisionError:
126 d.addCallback(_convert)
129 def data_rate_total(self, ctx, data):
130 return self._get_rate("total")
132 def data_rate_storage_index(self, ctx, data):
133 return self._get_rate("storage_index")
135 def data_rate_encode(self, ctx, data):
136 return self._get_rate("cumulative_encoding")
138 def data_rate_push(self, ctx, data):
139 return self._get_rate("cumulative_sending")
141 def data_rate_encode_and_push(self, ctx, data):
142 d = self.upload_results()
144 file_size = r.file_size
145 if file_size is None:
147 time1 = r.timings.get("cumulative_encoding")
150 time2 = r.timings.get("cumulative_sending")
154 return 1.0 * file_size / (time1+time2)
155 except ZeroDivisionError:
157 d.addCallback(_convert)
160 def data_rate_ciphertext_fetch(self, ctx, data):
161 d = self.upload_results()
163 fetch_size = r.ciphertext_fetched
164 if fetch_size is None:
166 time = r.timings.get("cumulative_fetch")
170 return 1.0 * fetch_size / time
171 except ZeroDivisionError:
173 d.addCallback(_convert)
176 class UploadStatusPage(UploadResultsRendererMixin, rend.Page):
177 docFactory = getxmlfile("upload-status.xhtml")
179 def __init__(self, data):
180 rend.Page.__init__(self, data)
181 self.upload_status = data
183 def upload_results(self):
184 return defer.maybeDeferred(self.upload_status.get_results)
186 def render_results(self, ctx, data):
187 d = self.upload_results()
188 def _got_results(results):
192 d.addCallback(_got_results)
195 def render_started(self, ctx, data):
196 TIME_FORMAT = "%H:%M:%S %d-%b-%Y"
197 started_s = time.strftime(TIME_FORMAT,
198 time.localtime(data.get_started()))
201 def render_si(self, ctx, data):
202 si_s = base32.b2a_or_none(data.get_storage_index())
207 def render_helper(self, ctx, data):
209 False: "No"}[data.using_helper()]
211 def render_total_size(self, ctx, data):
212 size = data.get_size()
217 def render_progress_hash(self, ctx, data):
218 progress = data.get_progress()[0]
219 # TODO: make an ascii-art bar
220 return "%.1f%%" % (100.0 * progress)
222 def render_progress_ciphertext(self, ctx, data):
223 progress = data.get_progress()[1]
224 # TODO: make an ascii-art bar
225 return "%.1f%%" % (100.0 * progress)
227 def render_progress_encode_push(self, ctx, data):
228 progress = data.get_progress()[2]
229 # TODO: make an ascii-art bar
230 return "%.1f%%" % (100.0 * progress)
232 def render_status(self, ctx, data):
233 return data.get_status()
235 class DownloadResultsRendererMixin(RateAndTimeMixin):
236 # this requires a method named 'download_results'
238 def render_servermap(self, ctx, data):
239 d = self.download_results()
240 d.addCallback(lambda res: res.servermap)
241 def _render(servermap):
242 if servermap is None:
245 for peerid in sorted(servermap.keys()):
246 peerid_s = idlib.shortnodeid_b2a(peerid)
247 shares_s = ",".join(["#%d" % shnum
248 for shnum in servermap[peerid]])
249 l[T.li["[%s] has share%s: %s" % (peerid_s,
250 plural(servermap[peerid]),
253 d.addCallback(_render)
256 def render_servers_used(self, ctx, data):
257 d = self.download_results()
258 d.addCallback(lambda res: res.servers_used)
259 def _got(servers_used):
262 peerids_s = ", ".join(["[%s]" % idlib.shortnodeid_b2a(peerid)
263 for peerid in servers_used])
264 return T.li["Servers Used: ", peerids_s]
268 def render_problems(self, ctx, data):
269 d = self.download_results()
270 d.addCallback(lambda res: res.server_problems)
271 def _got(server_problems):
272 if not server_problems:
275 for peerid in sorted(server_problems.keys()):
276 peerid_s = idlib.shortnodeid_b2a(peerid)
277 l[T.li["[%s]: %s" % (peerid_s, server_problems[peerid])]]
278 return T.li["Server Problems:", l]
282 def data_file_size(self, ctx, data):
283 d = self.download_results()
284 d.addCallback(lambda res: res.file_size)
287 def _get_time(self, name):
288 d = self.download_results()
289 d.addCallback(lambda res: res.timings.get(name))
292 def data_time_total(self, ctx, data):
293 return self._get_time("total")
295 def data_time_peer_selection(self, ctx, data):
296 return self._get_time("peer_selection")
298 def data_time_uri_extension(self, ctx, data):
299 return self._get_time("uri_extension")
301 def data_time_hashtrees(self, ctx, data):
302 return self._get_time("hashtrees")
304 def data_time_segments(self, ctx, data):
305 return self._get_time("segments")
307 def data_time_cumulative_fetch(self, ctx, data):
308 return self._get_time("cumulative_fetch")
310 def data_time_cumulative_decode(self, ctx, data):
311 return self._get_time("cumulative_decode")
313 def data_time_cumulative_decrypt(self, ctx, data):
314 return self._get_time("cumulative_decrypt")
316 def _get_rate(self, name):
317 d = self.download_results()
319 file_size = r.file_size
320 time = r.timings.get(name)
324 return 1.0 * file_size / time
325 except ZeroDivisionError:
327 d.addCallback(_convert)
330 def data_rate_total(self, ctx, data):
331 return self._get_rate("total")
333 def data_rate_segments(self, ctx, data):
334 return self._get_rate("segments")
336 def data_rate_fetch(self, ctx, data):
337 return self._get_rate("cumulative_fetch")
339 def data_rate_decode(self, ctx, data):
340 return self._get_rate("cumulative_decode")
342 def data_rate_decrypt(self, ctx, data):
343 return self._get_rate("cumulative_decrypt")
345 def render_server_timings(self, ctx, data):
346 d = self.download_results()
347 d.addCallback(lambda res: res.timings.get("fetch_per_server"))
348 def _render(per_server):
349 if per_server is None:
352 for peerid in sorted(per_server.keys()):
353 peerid_s = idlib.shortnodeid_b2a(peerid)
354 times_s = ", ".join([self.render_time(None, t)
355 for t in per_server[peerid]])
356 l[T.li["[%s]: %s" % (peerid_s, times_s)]]
357 return T.li["Per-Server Segment Fetch Response Times: ", l]
358 d.addCallback(_render)
361 class DownloadStatusPage(DownloadResultsRendererMixin, rend.Page):
362 docFactory = getxmlfile("download-status.xhtml")
364 def __init__(self, data):
365 rend.Page.__init__(self, data)
366 self.download_status = data
368 def download_results(self):
369 return defer.maybeDeferred(self.download_status.get_results)
371 def render_results(self, ctx, data):
372 d = self.download_results()
373 def _got_results(results):
377 d.addCallback(_got_results)
380 def render_started(self, ctx, data):
381 TIME_FORMAT = "%H:%M:%S %d-%b-%Y"
382 started_s = time.strftime(TIME_FORMAT,
383 time.localtime(data.get_started()))
386 def render_si(self, ctx, data):
387 si_s = base32.b2a_or_none(data.get_storage_index())
392 def render_helper(self, ctx, data):
394 False: "No"}[data.using_helper()]
396 def render_total_size(self, ctx, data):
397 size = data.get_size()
402 def render_progress(self, ctx, data):
403 progress = data.get_progress()
404 # TODO: make an ascii-art bar
405 return "%.1f%%" % (100.0 * progress)
407 def render_status(self, ctx, data):
408 return data.get_status()
410 class RetrieveStatusPage(rend.Page, RateAndTimeMixin):
411 docFactory = getxmlfile("retrieve-status.xhtml")
413 def __init__(self, data):
414 rend.Page.__init__(self, data)
415 self.retrieve_status = data
417 def render_started(self, ctx, data):
418 TIME_FORMAT = "%H:%M:%S %d-%b-%Y"
419 started_s = time.strftime(TIME_FORMAT,
420 time.localtime(data.get_started()))
423 def render_si(self, ctx, data):
424 si_s = base32.b2a_or_none(data.get_storage_index())
429 def render_helper(self, ctx, data):
431 False: "No"}[data.using_helper()]
433 def render_current_size(self, ctx, data):
434 size = data.get_size()
439 def render_progress(self, ctx, data):
440 progress = data.get_progress()
441 # TODO: make an ascii-art bar
442 return "%.1f%%" % (100.0 * progress)
444 def render_status(self, ctx, data):
445 return data.get_status()
447 def render_encoding(self, ctx, data):
448 k, n = data.get_encoding()
449 return ctx.tag["Encoding: %s of %s" % (k, n)]
451 def render_search_distance(self, ctx, data):
452 d = data.get_search_distance()
453 return ctx.tag["Search Distance: %s peer%s" % (d, plural(d))]
455 def render_problems(self, ctx, data):
456 problems = data.problems
460 for peerid in sorted(problems.keys()):
461 peerid_s = idlib.shortnodeid_b2a(peerid)
462 l[T.li["[%s]: %s" % (peerid_s, problems[peerid])]]
463 return ctx.tag["Server Problems:", l]
465 def _get_rate(self, data, name):
466 file_size = self.retrieve_status.get_size()
467 time = self.retrieve_status.timings.get(name)
471 return 1.0 * file_size / time
472 except ZeroDivisionError:
475 def data_time_total(self, ctx, data):
476 return self.retrieve_status.timings.get("total")
477 def data_rate_total(self, ctx, data):
478 return self._get_rate(data, "total")
480 def data_time_peer_selection(self, ctx, data):
481 return self.retrieve_status.timings.get("peer_selection")
483 def data_time_fetch(self, ctx, data):
484 return self.retrieve_status.timings.get("fetch")
485 def data_rate_fetch(self, ctx, data):
486 return self._get_rate(data, "fetch")
488 def data_time_cumulative_verify(self, ctx, data):
489 return self.retrieve_status.timings.get("cumulative_verify")
491 def data_time_decode(self, ctx, data):
492 return self.retrieve_status.timings.get("decode")
493 def data_rate_decode(self, ctx, data):
494 return self._get_rate(data, "decode")
496 def data_time_decrypt(self, ctx, data):
497 return self.retrieve_status.timings.get("decrypt")
498 def data_rate_decrypt(self, ctx, data):
499 return self._get_rate(data, "decrypt")
501 def render_server_timings(self, ctx, data):
502 per_server = self.retrieve_status.timings.get("fetch_per_server")
506 for peerid in sorted(per_server.keys()):
507 peerid_s = idlib.shortnodeid_b2a(peerid)
508 times_s = ", ".join([self.render_time(None, t)
509 for t in per_server[peerid]])
510 l[T.li["[%s]: %s" % (peerid_s, times_s)]]
511 return T.li["Per-Server Fetch Response Times: ", l]
514 class PublishStatusPage(rend.Page, RateAndTimeMixin):
515 docFactory = getxmlfile("publish-status.xhtml")
517 def __init__(self, data):
518 rend.Page.__init__(self, data)
519 self.publish_status = data
521 def render_started(self, ctx, data):
522 TIME_FORMAT = "%H:%M:%S %d-%b-%Y"
523 started_s = time.strftime(TIME_FORMAT,
524 time.localtime(data.get_started()))
527 def render_si(self, ctx, data):
528 si_s = base32.b2a_or_none(data.get_storage_index())
533 def render_helper(self, ctx, data):
535 False: "No"}[data.using_helper()]
537 def render_current_size(self, ctx, data):
538 size = data.get_size()
543 def render_progress(self, ctx, data):
544 progress = data.get_progress()
545 # TODO: make an ascii-art bar
546 return "%.1f%%" % (100.0 * progress)
548 def render_status(self, ctx, data):
549 return data.get_status()
551 def render_encoding(self, ctx, data):
552 k, n = data.get_encoding()
553 return ctx.tag["Encoding: %s of %s" % (k, n)]
555 def render_peers_queried(self, ctx, data):
556 return ctx.tag["Peers Queried: ", data.peers_queried]
558 def render_sharemap(self, ctx, data):
559 sharemap = data.sharemap
561 return ctx.tag["None"]
563 for shnum in sorted(sharemap.keys()):
564 l[T.li["%d -> Placed on " % shnum,
565 ", ".join(["[%s]" % idlib.shortnodeid_b2a(peerid)
566 for (peerid,seqnum,root_hash)
567 in sharemap[shnum]])]]
568 return ctx.tag["Sharemap:", l]
570 def render_problems(self, ctx, data):
571 problems = data.problems
575 for peerid in sorted(problems.keys()):
576 peerid_s = idlib.shortnodeid_b2a(peerid)
577 l[T.li["[%s]: %s" % (peerid_s, problems[peerid])]]
578 return ctx.tag["Server Problems:", l]
580 def _get_rate(self, data, name):
581 file_size = self.publish_status.get_size()
582 time = self.publish_status.timings.get(name)
586 return 1.0 * file_size / time
587 except ZeroDivisionError:
590 def data_time_total(self, ctx, data):
591 return self.publish_status.timings.get("total")
592 def data_rate_total(self, ctx, data):
593 return self._get_rate(data, "total")
595 def data_time_setup(self, ctx, data):
596 return self.publish_status.timings.get("setup")
598 def data_time_query(self, ctx, data):
599 return self.publish_status.timings.get("query")
601 def data_time_privkey(self, ctx, data):
602 return self.publish_status.timings.get("privkey")
604 def data_time_privkey_fetch(self, ctx, data):
605 return self.publish_status.timings.get("privkey_fetch")
606 def render_privkey_from(self, ctx, data):
607 peerid = data.privkey_from
609 return " (got from [%s])" % idlib.shortnodeid_b2a(peerid)
613 def data_time_encrypt(self, ctx, data):
614 return self.publish_status.timings.get("encrypt")
615 def data_rate_encrypt(self, ctx, data):
616 return self._get_rate(data, "encrypt")
618 def data_time_encode(self, ctx, data):
619 return self.publish_status.timings.get("encode")
620 def data_rate_encode(self, ctx, data):
621 return self._get_rate(data, "encode")
623 def data_time_pack(self, ctx, data):
624 return self.publish_status.timings.get("pack")
625 def data_rate_pack(self, ctx, data):
626 return self._get_rate(data, "pack")
627 def data_time_sign(self, ctx, data):
628 return self.publish_status.timings.get("sign")
630 def data_time_push(self, ctx, data):
631 return self.publish_status.timings.get("push")
632 def data_rate_push(self, ctx, data):
633 return self._get_rate(data, "push")
635 def data_initial_read_size(self, ctx, data):
636 return self.publish_status.initial_read_size
638 def render_server_timings(self, ctx, data):
639 per_server = self.publish_status.timings.get("per_server")
643 for peerid in sorted(per_server.keys()):
644 peerid_s = idlib.shortnodeid_b2a(peerid)
646 for op,t in per_server[peerid]:
648 times.append( "(" + self.render_time(None, t) + ")" )
650 times.append( self.render_time(None, t) )
651 times_s = ", ".join(times)
652 l[T.li["[%s]: %s" % (peerid_s, times_s)]]
653 return T.li["Per-Server Response Times: ", l]
656 class Status(rend.Page):
657 docFactory = getxmlfile("status.xhtml")
660 def data_active_operations(self, ctx, data):
661 active = (IClient(ctx).list_active_uploads() +
662 IClient(ctx).list_active_downloads() +
663 IClient(ctx).list_active_publish() +
664 IClient(ctx).list_active_retrieve())
667 def data_recent_operations(self, ctx, data):
668 recent = [o for o in (IClient(ctx).list_recent_uploads() +
669 IClient(ctx).list_recent_downloads() +
670 IClient(ctx).list_recent_publish() +
671 IClient(ctx).list_recent_retrieve())
672 if not o.get_active()]
673 recent.sort(lambda a,b: cmp(a.get_started(), b.get_started()))
677 def render_row(self, ctx, data):
680 TIME_FORMAT = "%H:%M:%S %d-%b-%Y"
681 started_s = time.strftime(TIME_FORMAT,
682 time.localtime(s.get_started()))
683 ctx.fillSlots("started", started_s)
685 si_s = base32.b2a_or_none(s.get_storage_index())
688 ctx.fillSlots("si", si_s)
689 ctx.fillSlots("helper", {True: "Yes",
690 False: "No"}[s.using_helper()])
695 ctx.fillSlots("total_size", size)
697 progress = data.get_progress()
698 if IUploadStatus.providedBy(data):
699 link = "up-%d" % data.get_counter()
700 ctx.fillSlots("type", "upload")
701 # TODO: make an ascii-art bar
702 (chk, ciphertext, encandpush) = progress
703 progress_s = ("hash: %.1f%%, ciphertext: %.1f%%, encode: %.1f%%" %
705 (100.0 * ciphertext),
706 (100.0 * encandpush) ))
707 ctx.fillSlots("progress", progress_s)
708 elif IDownloadStatus.providedBy(data):
709 link = "down-%d" % data.get_counter()
710 ctx.fillSlots("type", "download")
711 ctx.fillSlots("progress", "%.1f%%" % (100.0 * progress))
712 elif IPublishStatus.providedBy(data):
713 link = "publish-%d" % data.get_counter()
714 ctx.fillSlots("type", "publish")
715 ctx.fillSlots("progress", "%.1f%%" % (100.0 * progress))
717 assert IRetrieveStatus.providedBy(data)
718 ctx.fillSlots("type", "retrieve")
719 link = "retrieve-%d" % data.get_counter()
720 ctx.fillSlots("progress", "%.1f%%" % (100.0 * progress))
721 ctx.fillSlots("status", T.a(href=link)[s.get_status()])
724 def childFactory(self, ctx, name):
725 client = IClient(ctx)
726 stype,count_s = name.split("-")
729 for s in client.list_recent_uploads():
730 if s.get_counter() == count:
731 return UploadStatusPage(s)
732 for s in client.list_all_uploads():
733 if s.get_counter() == count:
734 return UploadStatusPage(s)
736 for s in client.list_recent_downloads():
737 if s.get_counter() == count:
738 return DownloadStatusPage(s)
739 for s in client.list_all_downloads():
740 if s.get_counter() == count:
741 return DownloadStatusPage(s)
742 if stype == "publish":
743 for s in client.list_recent_publish():
744 if s.get_counter() == count:
745 return PublishStatusPage(s)
746 for s in client.list_all_publish():
747 if s.get_counter() == count:
748 return PublishStatusPage(s)
749 if stype == "retrieve":
750 for s in client.list_recent_retrieve():
751 if s.get_counter() == count:
752 return RetrieveStatusPage(s)
753 for s in client.list_all_retrieve():
754 if s.get_counter() == count:
755 return RetrieveStatusPage(s)