-import time, pprint, itertools
+import pprint, itertools
import simplejson
from twisted.internet import defer
from nevow import rend, inevow, tags as T
from allmydata.util import base32, idlib
from allmydata.web.common import getxmlfile, get_arg, \
- abbreviate_time, abbreviate_rate, abbreviate_size, plural, compute_rate
+ abbreviate_time, abbreviate_rate, abbreviate_size, plural, compute_rate, render_time
from allmydata.interfaces import IUploadStatus, IDownloadStatus, \
IPublishStatus, IRetrieveStatus, IServermapUpdaterStatus
def render_pushed_shares(self, ctx, data):
d = self.upload_results()
- d.addCallback(lambda res: res.pushed_shares)
+ d.addCallback(lambda res: res.get_pushed_shares())
return d
def render_preexisting_shares(self, ctx, data):
d = self.upload_results()
- d.addCallback(lambda res: res.preexisting_shares)
+ d.addCallback(lambda res: res.get_preexisting_shares())
return d
def render_sharemap(self, ctx, data):
d = self.upload_results()
- d.addCallback(lambda res: res.sharemap)
+ d.addCallback(lambda res: res.get_sharemap())
def _render(sharemap):
if sharemap is None:
return "None"
l = T.ul()
- for shnum, peerids in sorted(sharemap.items()):
- peerids = ', '.join([idlib.shortnodeid_b2a(i) for i in peerids])
- l[T.li["%d -> placed on [%s]" % (shnum, peerids)]]
+ for shnum, servers in sorted(sharemap.items()):
+ server_names = ', '.join([s.get_name() for s in servers])
+ l[T.li["%d -> placed on [%s]" % (shnum, server_names)]]
return l
d.addCallback(_render)
return d
def render_servermap(self, ctx, data):
d = self.upload_results()
- d.addCallback(lambda res: res.servermap)
+ d.addCallback(lambda res: res.get_servermap())
def _render(servermap):
if servermap is None:
return "None"
l = T.ul()
- for peerid in sorted(servermap.keys()):
- peerid_s = idlib.shortnodeid_b2a(peerid)
- shares_s = ",".join(["#%d" % shnum
- for shnum in servermap[peerid]])
- l[T.li["[%s] got share%s: %s" % (peerid_s,
- plural(servermap[peerid]),
- shares_s)]]
+ for server, shnums in sorted(servermap.items()):
+ shares_s = ",".join(["#%d" % shnum for shnum in shnums])
+ l[T.li["[%s] got share%s: %s" % (server.get_name(),
+ plural(shnums), shares_s)]]
return l
d.addCallback(_render)
return d
def data_file_size(self, ctx, data):
d = self.upload_results()
- d.addCallback(lambda res: res.file_size)
+ d.addCallback(lambda res: res.get_file_size())
return d
def _get_time(self, name):
d = self.upload_results()
- d.addCallback(lambda res: res.timings.get(name))
+ d.addCallback(lambda res: res.get_timings().get(name))
return d
def data_time_total(self, ctx, data):
def data_time_contacting_helper(self, ctx, data):
return self._get_time("contacting_helper")
- def data_time_existence_check(self, ctx, data):
- return self._get_time("existence_check")
-
def data_time_cumulative_fetch(self, ctx, data):
return self._get_time("cumulative_fetch")
def _get_rate(self, name):
d = self.upload_results()
def _convert(r):
- file_size = r.file_size
- time = r.timings.get(name)
- return compute_rate(file_size, time)
+ file_size = r.get_file_size()
+ duration = r.get_timings().get(name)
+ return compute_rate(file_size, duration)
d.addCallback(_convert)
return d
def data_rate_encode_and_push(self, ctx, data):
d = self.upload_results()
def _convert(r):
- file_size = r.file_size
- time1 = r.timings.get("cumulative_encoding")
- time2 = r.timings.get("cumulative_sending")
+ file_size = r.get_file_size()
+ time1 = r.get_timings().get("cumulative_encoding")
+ time2 = r.get_timings().get("cumulative_sending")
if (time1 is None or time2 is None):
return None
else:
def data_rate_ciphertext_fetch(self, ctx, data):
d = self.upload_results()
def _convert(r):
- fetch_size = r.ciphertext_fetched
- time = r.timings.get("cumulative_fetch")
- return compute_rate(fetch_size, time)
+ fetch_size = r.get_ciphertext_fetched()
+ duration = r.get_timings().get("cumulative_fetch")
+ return compute_rate(fetch_size, duration)
d.addCallback(_convert)
return d
return d
def render_started(self, ctx, data):
- TIME_FORMAT = "%H:%M:%S %d-%b-%Y"
- started_s = time.strftime(TIME_FORMAT,
- time.localtime(data.get_started()))
+ started_s = render_time(data.get_started())
return started_s
def render_si(self, ctx, data):
d = self.download_results()
def _convert(r):
file_size = r.file_size
- time = r.timings.get(name)
- return compute_rate(file_size, time)
+ duration = r.timings.get(name)
+ return compute_rate(file_size, duration)
d.addCallback(_convert)
return d
def _find_overlap(self, events, start_key, end_key):
# given a list of event dicts, return a new list in which each event
- # has an extra "row" key (an int, starting at 0). This is a hint to
- # our JS frontend about how to overlap the parts of the graph it is
- # drawing.
+ # has an extra "row" key (an int, starting at 0), and if appropriate
+ # a "serverid" key (ascii-encoded server id), replacing the "server"
+ # key. This is a hint to our JS frontend about how to overlap the
+ # parts of the graph it is drawing.
- # we must always make a copy, since we're going to be adding "row"
- # keys and don't want to change the original objects. If we're
+ # we must always make a copy, since we're going to be adding keys
+ # and don't want to change the original objects. If we're
# stringifying serverids, we'll also be changing the serverid keys.
new_events = []
rows = []
for ev in events:
ev = ev.copy()
- if "serverid" in ev:
- ev["serverid"] = base32.b2a(ev["serverid"])
+ if ev.has_key('server'):
+ ev["serverid"] = ev["server"].get_longname()
+ del ev["server"]
# find an empty slot in the rows
free_slot = None
for row,finished in enumerate(rows):
for ev in events:
# DownloadStatus promises to give us events in temporal order
ev = ev.copy()
- ev["serverid"] = base32.b2a(ev["server"].get_serverid())
+ ev["serverid"] = ev["server"].get_longname()
+ del ev["server"]
if ev["serverid"] not in serverid_to_group:
groupnum = len(serverid_to_group)
serverid_to_group[ev["serverid"]] = groupnum
rows[free_slot] = ev["finish_time"]
ev["row"] = (groupnum, free_slot)
new_events.append(ev)
+ del groupnum
# maybe also return serverid_to_group, groupnum_to_rows, and some
# indication of the highest finish_time
#
data = { } # this will be returned to the GET
ds = self.download_status
+ data["misc"] = self._find_overlap(ds.misc_events,
+ "start_time", "finish_time")
data["read"] = self._find_overlap(ds.read_events,
"start_time", "finish_time")
data["segment"] = self._find_overlap(ds.segment_events,
"start_time", "finish_time")
+ # TODO: overlap on DYHB isn't very useful, and usually gets in the
+ # way. So don't do it.
data["dyhb"] = self._find_overlap(ds.dyhb_requests,
"start_time", "finish_time")
data["block"],data["block_rownums"] = self._find_overlap_requests(ds.block_requests)
- servernums = {}
- serverid_strings = {}
- for d_ev in data["dyhb"]:
- if d_ev["serverid"] not in servernums:
- servernum = len(servernums)
- servernums[d_ev["serverid"]] = servernum
- #title= "%s: %s" % ( ",".join([str(shnum) for shnum in shnums]))
- serverid_strings[servernum] = d_ev["serverid"][:4]
- data["server_info"] = dict([(serverid, {"num": servernums[serverid],
- "color": self.color(base32.a2b(serverid)),
- "short": serverid_strings[servernums[serverid]],
- })
- for serverid in servernums.keys()])
- data["num_serverids"] = len(serverid_strings)
+ server_info = {} # maps longname to {num,color,short}
+ server_shortnames = {} # maps servernum to shortname
+ for d_ev in ds.dyhb_requests:
+ s = d_ev["server"]
+ longname = s.get_longname()
+ if longname not in server_info:
+ num = len(server_info)
+ server_info[longname] = {"num": num,
+ "color": self.color(s),
+ "short": s.get_name() }
+ server_shortnames[str(num)] = s.get_name()
+
+ data["server_info"] = server_info
+ data["num_serverids"] = len(server_info)
# we'd prefer the keys of serverids[] to be ints, but this is JSON,
# so they get converted to strings. Stupid javascript.
- data["serverids"] = serverid_strings
+ data["serverids"] = server_shortnames
data["bounds"] = {"min": ds.first_timestamp, "max": ds.last_timestamp}
return simplejson.dumps(data, indent=1) + "\n"
t[T.tr[T.th["serverid"], T.th["sent"], T.th["received"],
T.th["shnums"], T.th["RTT"]]]
for d_ev in self.download_status.dyhb_requests:
- serverid = d_ev["serverid"]
+ server = d_ev["server"]
sent = d_ev["start_time"]
shnums = d_ev["response_shnums"]
received = d_ev["finish_time"]
- serverid_s = idlib.shortnodeid_b2a(serverid)
rtt = None
if received is not None:
rtt = received - sent
if not shnums:
shnums = ["-"]
- t[T.tr(style="background: %s" % self.color(serverid))[
- [T.td[serverid_s], T.td[srt(sent)], T.td[srt(received)],
+ t[T.tr(style="background: %s" % self.color(server))[
+ [T.td[server.get_name()], T.td[srt(sent)], T.td[srt(received)],
T.td[",".join([str(shnum) for shnum in shnums])],
T.td[self.render_time(None, rtt)],
]]]
rtt = None
if r_ev["finish_time"] is not None:
rtt = r_ev["finish_time"] - r_ev["start_time"]
- color = self.color(server.get_serverid())
+ color = self.color(server)
t[T.tr(style="background: %s" % color)[
T.td[server.get_name()], T.td[r_ev["shnum"]],
T.td["[%d:+%d]" % (r_ev["start"], r_ev["length"])],
return l
- def color(self, peerid):
+ def color(self, server):
+ peerid = server.get_serverid() # binary
def m(c):
return min(ord(c) / 2 + 0x80, 0xff)
return "#%02x%02x%02x" % (m(peerid[0]), m(peerid[1]), m(peerid[2]))
return d
def render_started(self, ctx, data):
- TIME_FORMAT = "%H:%M:%S %d-%b-%Y"
- started_s = time.strftime(TIME_FORMAT,
- time.localtime(data.get_started()))
+ started_s = render_time(data.get_started())
return started_s + " (%s)" % data.get_started()
def render_si(self, ctx, data):
docFactory = getxmlfile("download-status-timeline.xhtml")
def render_started(self, ctx, data):
- TIME_FORMAT = "%H:%M:%S %d-%b-%Y"
- started_s = time.strftime(TIME_FORMAT,
- time.localtime(data.get_started()))
+ started_s = render_time(data.get_started())
return started_s + " (%s)" % data.get_started()
def render_si(self, ctx, data):
self.retrieve_status = data
def render_started(self, ctx, data):
- TIME_FORMAT = "%H:%M:%S %d-%b-%Y"
- started_s = time.strftime(TIME_FORMAT,
- time.localtime(data.get_started()))
+ started_s = render_time(data.get_started())
return started_s
def render_si(self, ctx, data):
return ctx.tag["Encoding: %s of %s" % (k, n)]
def render_problems(self, ctx, data):
- problems = data.problems
+ problems = data.get_problems()
if not problems:
return ""
l = T.ul()
def _get_rate(self, data, name):
file_size = self.retrieve_status.get_size()
- time = self.retrieve_status.timings.get(name)
- return compute_rate(file_size, time)
+ duration = self.retrieve_status.timings.get(name)
+ return compute_rate(file_size, duration)
def data_time_total(self, ctx, data):
return self.retrieve_status.timings.get("total")
if not per_server:
return ""
l = T.ul()
- for peerid in sorted(per_server.keys()):
- peerid_s = idlib.shortnodeid_b2a(peerid)
+ for server in sorted(per_server.keys(), key=lambda s: s.get_name()):
times_s = ", ".join([self.render_time(None, t)
- for t in per_server[peerid]])
- l[T.li["[%s]: %s" % (peerid_s, times_s)]]
+ for t in per_server[server]])
+ l[T.li["[%s]: %s" % (server.get_name(), times_s)]]
return T.li["Per-Server Fetch Response Times: ", l]
self.publish_status = data
def render_started(self, ctx, data):
- TIME_FORMAT = "%H:%M:%S %d-%b-%Y"
- started_s = time.strftime(TIME_FORMAT,
- time.localtime(data.get_started()))
+ started_s = render_time(data.get_started())
return started_s
def render_si(self, ctx, data):
sharemap = servermap.make_sharemap()
for shnum in sorted(sharemap.keys()):
l[T.li["%d -> Placed on " % shnum,
- ", ".join(["[%s]" % idlib.shortnodeid_b2a(peerid)
- for peerid in sharemap[shnum]])]]
+ ", ".join(["[%s]" % server.get_name()
+ for server in sharemap[shnum]])]]
return ctx.tag["Sharemap:", l]
def render_problems(self, ctx, data):
- problems = data.problems
+ problems = data.get_problems()
if not problems:
return ""
l = T.ul()
+ # XXX: is this exercised? I don't think PublishStatus.problems is
+ # ever populated
for peerid in sorted(problems.keys()):
peerid_s = idlib.shortnodeid_b2a(peerid)
l[T.li["[%s]: %s" % (peerid_s, problems[peerid])]]
def _get_rate(self, data, name):
file_size = self.publish_status.get_size()
- time = self.publish_status.timings.get(name)
- return compute_rate(file_size, time)
+ duration = self.publish_status.timings.get(name)
+ return compute_rate(file_size, duration)
def data_time_total(self, ctx, data):
return self.publish_status.timings.get("total")
if not per_server:
return ""
l = T.ul()
- for peerid in sorted(per_server.keys()):
- peerid_s = idlib.shortnodeid_b2a(peerid)
+ for server in sorted(per_server.keys(), key=lambda s: s.get_name()):
times_s = ", ".join([self.render_time(None, t)
- for t in per_server[peerid]])
- l[T.li["[%s]: %s" % (peerid_s, times_s)]]
+ for t in per_server[server]])
+ l[T.li["[%s]: %s" % (server.get_name(), times_s)]]
return T.li["Per-Server Response Times: ", l]
class MapupdateStatusPage(rend.Page, RateAndTimeMixin):
self.update_status = data
def render_started(self, ctx, data):
- TIME_FORMAT = "%H:%M:%S %d-%b-%Y"
- started_s = time.strftime(TIME_FORMAT,
- time.localtime(data.get_started()))
+ started_s = render_time(data.get_started())
return started_s
def render_finished(self, ctx, data):
when = data.get_finished()
if not when:
return "not yet"
- TIME_FORMAT = "%H:%M:%S %d-%b-%Y"
- started_s = time.strftime(TIME_FORMAT,
- time.localtime(data.get_finished()))
+ started_s = render_time(data.get_finished())
return started_s
def render_si(self, ctx, data):
return ctx.tag["Server Problems:", l]
def render_privkey_from(self, ctx, data):
- peerid = data.get_privkey_from()
- if peerid:
- return ctx.tag["Got privkey from: [%s]"
- % idlib.shortnodeid_b2a(peerid)]
+ server = data.get_privkey_from()
+ if server:
+ return ctx.tag["Got privkey from: [%s]" % server.get_name()]
else:
return ""
if not per_server:
return ""
l = T.ul()
- for peerid in sorted(per_server.keys()):
- peerid_s = idlib.shortnodeid_b2a(peerid)
+ for server in sorted(per_server.keys(), key=lambda s: s.get_name()):
times = []
- for op,started,t in per_server[peerid]:
+ for op,started,t in per_server[server]:
#times.append("%s/%.4fs/%s/%s" % (op,
# started,
# self.render_time(None, started - self.update_status.get_started()),
else:
times.append( "privkey(" + self.render_time(None, t) + ")" )
times_s = ", ".join(times)
- l[T.li["[%s]: %s" % (peerid_s, times_s)]]
+ l[T.li["[%s]: %s" % (server.get_name(), times_s)]]
return T.li["Per-Server Response Times: ", l]
def render_timing_chart(self, ctx, data):
started = self.update_status.get_started()
total = self.update_status.timings.get("total")
per_server = self.update_status.timings.get("per_server")
+ # We'd like to use an https: URL here, but the site has a domain/cert mismatch.
base = "http://chart.apis.google.com/chart?"
pieces = ["cht=bhs"]
pieces.append("chco=ffffff,4d89f9,c6d9fd") # colors
nb_nodes = 0
graph_botom_margin= 21
graph_top_margin = 5
- peerids_s = []
+ server_names = []
top_abs = started
# we sort the queries by the time at which we sent the first request
- sorttable = [ (times[0][1], peerid)
- for peerid, times in per_server.items() ]
+ sorttable = [ (times[0][1], server)
+ for server, times in per_server.items() ]
sorttable.sort()
- peerids = [t[1] for t in sorttable]
+ servers = [t[1] for t in sorttable]
- for peerid in peerids:
+ for server in servers:
nb_nodes += 1
- times = per_server[peerid]
- peerid_s = idlib.shortnodeid_b2a(peerid)
- peerids_s.append(peerid_s)
+ times = per_server[server]
+ name = server.get_name()
+ server_names.append(name)
# for servermap updates, there are either one or two queries per
# peer. The second (if present) is to get the privkey.
op,q_started,q_elapsed = times[0]
pieces.append(chds)
pieces.append("chxt=x,y")
pieces.append("chxr=0,0.0,%0.3f" % top_rel)
- pieces.append("chxl=1:|" + "|".join(reversed(peerids_s)))
+ pieces.append("chxl=1:|" + "|".join(reversed(server_names)))
# use up to 10 grid lines, at decimal multiples.
# mathutil.next_power_of_k doesn't handle numbers smaller than one,
# unfortunately.
def render_row(self, ctx, data):
s = data
- TIME_FORMAT = "%H:%M:%S %d-%b-%Y"
- started_s = time.strftime(TIME_FORMAT,
- time.localtime(s.get_started()))
+ started_s = render_time(s.get_started())
ctx.fillSlots("started", started_s)
si_s = base32.b2a_or_none(s.get_storage_index())
return "%s files / %s bytes (%s)" % (files, bytes,
abbreviate_size(bytes))
+ def render_drop_monitored(self, ctx, data):
+ dirs = data["counters"].get("drop_upload.dirs_monitored", 0)
+ return "%s directories" % (dirs,)
+
+ def render_drop_uploads(self, ctx, data):
+ # TODO: bytes uploaded
+ files = data["counters"].get("drop_upload.files_uploaded", 0)
+ return "%s files" % (files,)
+
+ def render_drop_queued(self, ctx, data):
+ files = data["counters"].get("drop_upload.files_queued", 0)
+ return "%s files" % (files,)
+
+ def render_drop_failed(self, ctx, data):
+ files = data["counters"].get("drop_upload.files_failed", 0)
+ return "%s files" % (files,)
+
def render_raw(self, ctx, data):
raw = pprint.pformat(data)
return ctx.tag[raw]