*~
*.DS_Store
.*.kate-swp
+*.py.bak
/build/
/support/
#!/bin/false # You must specify a python interpreter.
-import sys; assert sys.version_info < (3,), ur"Tahoe-LAFS does not run under Python 3. Please use a version of Python between 2.6 and 2.7.x inclusive."
-import os, subprocess
+import os, subprocess, sys
where = os.path.realpath(sys.argv[0])
base = os.path.dirname(os.path.dirname(where))
def mangle(s):
return str(re.sub(u'[^\\x20-\\x7F]', lambda m: u'\x7F%x;' % (ord(m.group(0)),), s))
- argv = [mangle(argv_unicode[i]) for i in xrange(0, argc.value)]
+ argv = [mangle(argv_unicode[i]) for i in range(0, argc.value)]
# Take only the suffix with the same number of arguments as sys.argv.
# This accounts for anything that can cause initial arguments to be stripped,
def _subst(a):
if a == '@tahoe': return script
return a
- command = prefix + [runner] + map(_subst, args[1:])
+ command = prefix + [runner] + [_subst(arg) for arg in args[1:]]
else:
runner = script
command = prefix + [script] + args
#!/bin/false # invoke this with a specific python
+from __future__ import print_function
import sys, shutil, os.path
from subprocess import Popen, PIPE
def get_output(*cmd, **kwargs):
tolerate_stderr = kwargs.get("tolerate_stderr", False)
- print " " + " ".join(cmd)
+ print(" " + " ".join(cmd))
p = Popen(cmd, stdout=PIPE)
(out,err) = p.communicate()
rc = p.returncode
if rc != 0:
- print >>sys.stderr, err
+ print(err, file=sys.stderr)
raise SubprocessError("command %s exited with rc=%s", (cmd, rc))
if err and not tolerate_stderr:
- print >>sys.stderr, "stderr:", err
+ print("stderr:", err, file=sys.stderr)
raise SubprocessError("command emitted unexpected stderr")
- print " =>", out,
+ print(" =>", out, end=' ')
return out
def run(*cmd, **kwargs):
- print " " + " ".join(cmd)
+ print(" " + " ".join(cmd))
# if "stdin" in kwargs:
# stdin = kwargs.pop("stdin")
# p = Popen(cmd, stdin=PIPE, **kwargs)
shutil.copyfile(fn, os.path.join(DEBDIR, n))
if n == "rules":
- os.chmod(os.path.join(DEBDIR, n), 0755) # +x
+ os.chmod(os.path.join(DEBDIR, n), 0o755) # +x
# We put "local package" on the first line of the changelog entry to suppress
# the lintian NMU warnings (since debchange's new entry's "author" will
# This helper script is used with the 'test-desert-island' Makefile target.
+from __future__ import print_function
import sys
good = True
build_out = sys.argv[1]
mode = sys.argv[2]
-print
+print()
for line in open(build_out, "r"):
if mode == "no-downloads":
# if it has all the packages that it needs locally, but we
# currently don't enforce that stronger requirement.
if line.startswith("Downloading http:"):
- print line,
+ print(line, end=' ')
good = False
if good:
if mode == "no-downloads":
- print "Good: build did not try to download any files"
+ print("Good: build did not try to download any files")
sys.exit(0)
else:
if mode == "no-downloads":
- print "Failed: build tried to download files"
+ print("Failed: build tried to download files")
sys.exit(1)
# This script generates a table of dependencies in HTML format on stdout.
# It expects to be run in the tahoe-lafs-dep-eggs directory.
+from __future__ import print_function
import re, os, sys
import pkg_resources
greybgstyle = '; background-color: #E0E0E0'
nobgstyle = ''
-print '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">'
-print '<html>'
-print '<head>'
-print ' <meta http-equiv="Content-Type" content="text/html;charset=us-ascii">'
-print ' <title>Software packages that Tahoe-LAFS depends on</title>'
-print '</head>'
-print '<body>'
-print '<h2>What is this?</h2>'
-print '<p>See <a href="https://tahoe-lafs.org/trac/tahoe-lafs/browser/docs/quickstart.rst">quickstart.rst</a>, <a href="https://tahoe-lafs.org/trac/tahoe-lafs/wiki/Installation">wiki:Installation</a>, and <a href="https://tahoe-lafs.org/trac/tahoe-lafs/wiki/CompileError">wiki:CompileError</a>.'
-print '<h2>Software packages that Tahoe-LAFS depends on</h2>'
-print
+print('<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">')
+print('<html>')
+print('<head>')
+print(' <meta http-equiv="Content-Type" content="text/html;charset=us-ascii">')
+print(' <title>Software packages that Tahoe-LAFS depends on</title>')
+print('</head>')
+print('<body>')
+print('<h2>What is this?</h2>')
+print('<p>See <a href="https://tahoe-lafs.org/trac/tahoe-lafs/browser/docs/quickstart.rst">quickstart.rst</a>, <a href="https://tahoe-lafs.org/trac/tahoe-lafs/wiki/Installation">wiki:Installation</a>, and <a href="https://tahoe-lafs.org/trac/tahoe-lafs/wiki/CompileError">wiki:CompileError</a>.')
+print('<h2>Software packages that Tahoe-LAFS depends on</h2>')
+print()
for pyver in reversed(sorted(python_versions)):
greybackground = False
if pyver:
- print '<p>Packages for Python %s that have compiled C/C++ code:</p>' % (pyver,)
- print '<table border="1">'
- print ' <tr>'
- print ' <th style="background-color: #FFFFD0" width="%d%%"> Platform </th>' % (width,)
+ print('<p>Packages for Python %s that have compiled C/C++ code:</p>' % (pyver,))
+ print('<table border="1">')
+ print(' <tr>')
+ print(' <th style="background-color: #FFFFD0" width="%d%%"> Platform </th>' % (width,))
for pkg in sorted(platform_dependent_pkgs):
- print ' <th style="background-color:#FFE8FF;" width="%d%%"> %s </th>' % (width, pkg)
- print ' </tr>'
+ print(' <th style="background-color:#FFE8FF;" width="%d%%"> %s </th>' % (width, pkg))
+ print(' </tr>')
first = True
for platform in sorted(matrix[pyver]):
style1 += bgstyle
style2 = first and 'border-top: 2px solid #000000' or ''
style2 += bgstyle
- print ' <tr>'
- print ' <td style="%s"> %s </td>' % (style1, platform,)
+ print(' <tr>')
+ print(' <td style="%s"> %s </td>' % (style1, platform,))
for pkg in sorted(platform_dependent_pkgs):
files = [n for (p, n) in row_files if pkg == p]
bestfile = files and max([(pkg_resources.parse_version(x), x) for x in files])[1] or None
if pkg == 'pywin32' and not platform.startswith('windows'):
- print ' <td style="border: 0; text-align: center; %s"> n/a </td>' % (style2,)
+ print(' <td style="border: 0; text-align: center; %s"> n/a </td>' % (style2,))
else:
- print ' <td style="%s"> %s</td>' % (style2,
- bestfile and '<a href="%s">%s</a>' % (bestfile, bestfile) or '')
- print ' </tr>'
+ print(' <td style="%s"> %s</td>' % (style2,
+ bestfile and '<a href="%s">%s</a>' % (bestfile, bestfile) or ''))
+ print(' </tr>')
first = False
- print '</table>'
- print
+ print('</table>')
+ print()
-print '<p>Packages that are platform-independent or source-only:</p>'
-print '<table border="1">'
-print ' <tr>'
-print ' <th style="background-color:#FFFFD0;"> Package </th>'
-print ' <th style="background-color:#FFE8FF;"> All Python versions </th>'
-print ' </tr>'
+print('<p>Packages that are platform-independent or source-only:</p>')
+print('<table border="1">')
+print(' <tr>')
+print(' <th style="background-color:#FFFFD0;"> Package </th>')
+print(' <th style="background-color:#FFE8FF;"> All Python versions </th>')
+print(' </tr>')
style1 = 'border-top: 2px solid #000000; background-color:#FFFFF0;'
style2 = 'border-top: 2px solid #000000;'
m = matrix['']['']
for pkg in sorted(platform_independent_pkgs):
- print ' <tr>'
- print ' <th style="%s"> %s </th>' % (style1, pkg)
+ print(' <tr>')
+ print(' <th style="%s"> %s </th>' % (style1, pkg))
files = [n for (p, n) in m if pkg == p]
- print ' <td style="%s"> %s</td>' % (style2, '<br> '.join(['<a href="%s">%s</a>' % (f, f) for f in files]))
- print ' </tr>'
+ print(' <td style="%s"> %s</td>' % (style2, '<br> '.join(['<a href="%s">%s</a>' % (f, f) for f in files])))
+ print(' </tr>')
-print '</table>'
+print('</table>')
# The document does validate, but not when it is included at the bottom of a directory listing.
#print '<hr>'
#print '<a href="http://validator.w3.org/check?uri=referer" target="_blank"><img border="0" src="http://www.w3.org/Icons/valid-html401-blue" alt="Valid HTML 4.01 Transitional" height="31" width="88"></a>'
-print '</body></html>'
+print('</body></html>')
"""
+from __future__ import print_function
import os.path, re
def get_version():
if __name__ == '__main__':
verstr = get_version()
- print verstr
+ print(verstr)
#!/usr/bin/env python
+from __future__ import print_function
import sys
-print "python%d.%d" % (sys.version_info[:2])
+print("python%d.%d" % (sys.version_info[:2]))
python misc/build_helpers/run-with-pythonpath.py python foo.py
"""
+from __future__ import print_function
import os, sys
# figure out where support/lib/pythonX.X/site-packages is
if cmd and cmd[0] not in "/~.":
cmds = which(cmd)
if not cmds:
- print >>sys.stderr, "'%s' not found on PATH" % (cmd,)
+ print("'%s' not found on PATH" % (cmd,), file=sys.stderr)
sys.exit(-1)
cmd = cmds[0]
#!/usr/bin/env python
+from __future__ import print_function
import os, sys, re, glob
APPNAME='allmydata-tahoe'
adglobals = {}
-execfile(os.path.join('..', 'src', 'allmydata', '_auto_deps.py'), adglobals)
+exec(compile(open(os.path.join('..', 'src', 'allmydata', '_auto_deps.py')).read(), os.path.join('..', 'src', 'allmydata', '_auto_deps.py'), 'exec'), adglobals)
install_requires = adglobals['install_requires']
test_requires = adglobals.get('test_requires', ['mock'])
__requires__ = [APPNAME + '==' + version] + install_requires + test_requires
-print "Requirements: %r" % (__requires__,)
+print("Requirements: %r" % (__requires__,))
eggz = glob.glob(os.path.join('..', 'setuptools-*.egg'))
if len(eggz) > 0:
egg = os.path.realpath(eggz[0])
- print "Inserting egg on sys.path: %r" % (egg,)
+ print("Inserting egg on sys.path: %r" % (egg,))
sys.path.insert(0, egg)
import pkg_resources
pkg_resources # hush pyflakes
modulename = None
-for i in xrange(1, len(sys.argv)):
+for i in range(1, len(sys.argv)):
if not sys.argv[i].startswith('-'):
modulename = sys.argv[i]
break
if not same:
try:
same = os.path.samefile(root_from_cwd, rootdir)
- except AttributeError, e:
+ except AttributeError as e:
e # hush pyflakes
if not same:
#! /usr/bin/env python
+from __future__ import print_function
import locale, os, platform, subprocess, sys, traceback
added_zetuptoolz_egg = False
eggz = glob.glob('setuptools-*.egg')
if len(eggz) > 0:
egg = os.path.realpath(eggz[0])
- print >>sys.stderr, "Inserting egg on sys.path: %r" % (egg,)
+ print("Inserting egg on sys.path: %r" % (egg,), file=sys.stderr)
added_zetuptoolz_egg = True
sys.path.insert(0, egg)
try:
import platform
out = platform.platform()
- print "platform:", foldlines(out)
- print "machine: ", platform.machine()
+ print("platform:", foldlines(out))
+ print("machine: ", platform.machine())
if hasattr(platform, 'linux_distribution'):
- print "linux_distribution:", repr(platform.linux_distribution())
+ print("linux_distribution:", repr(platform.linux_distribution()))
except EnvironmentError:
sys.stderr.write("\nGot exception using 'platform'. Exception follows\n")
traceback.print_exc(file=sys.stderr)
pass
def print_python_ver():
- print "python:", foldlines(sys.version)
- print 'maxunicode: ' + str(sys.maxunicode)
+ print("python:", foldlines(sys.version))
+ print('maxunicode: ' + str(sys.maxunicode))
def print_python_encoding_settings():
- print 'filesystem.encoding: ' + str(sys.getfilesystemencoding())
- print 'locale.getpreferredencoding: ' + str(locale.getpreferredencoding())
+ print('filesystem.encoding: ' + str(sys.getfilesystemencoding()))
+ print('locale.getpreferredencoding: ' + str(locale.getpreferredencoding()))
try:
- print 'locale.defaultlocale: ' + str(locale.getdefaultlocale())
- except ValueError, e:
- print 'got exception from locale.getdefaultlocale(): ', e
- print 'locale.locale: ' + str(locale.getlocale())
+ print('locale.defaultlocale: ' + str(locale.getdefaultlocale()))
+ except ValueError as e:
+ print('got exception from locale.getdefaultlocale(): ', e)
+ print('locale.locale: ' + str(locale.getlocale()))
def print_stdout(cmdlist, label=None, numlines=None):
try:
label = cmdlist[0]
res = subprocess.Popen(cmdlist, stdin=open(os.devnull),
stdout=subprocess.PIPE).communicate()[0]
- print label + ': ' + foldlines(res, numlines)
- except EnvironmentError, e:
+ print(label + ': ' + foldlines(res, numlines))
+ except EnvironmentError as e:
if isinstance(e, OSError) and e.errno == 2:
- print label + ': no such file or directory'
+ print(label + ': no such file or directory')
return
sys.stderr.write("\nGot exception invoking '%s'. Exception follows.\n" % (cmdlist[0],))
traceback.print_exc(file=sys.stderr)
def print_as_ver():
if os.path.exists('a.out'):
- print "WARNING: a file named a.out exists, and getting the version of the 'as' assembler writes to that filename, so I'm not attempting to get the version of 'as'."
+ print("WARNING: a file named a.out exists, and getting the version of the 'as' assembler writes to that filename, so I'm not attempting to get the version of 'as'.")
return
try:
res = subprocess.Popen(['as', '-version'], stdin=open(os.devnull),
stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
- print 'as: ' + foldlines(res[0]+' '+res[1])
+ print('as: ' + foldlines(res[0]+' '+res[1]))
if os.path.exists('a.out'):
os.remove('a.out')
except EnvironmentError:
def print_setuptools_ver():
if added_zetuptoolz_egg:
# it would be misleading to report the bundled version of zetuptoolz as the installed version
- print "setuptools: using bundled egg"
+ print("setuptools: using bundled egg")
return
try:
import pkg_resources
out = str(pkg_resources.require("setuptools"))
- print "setuptools:", foldlines(out)
+ print("setuptools:", foldlines(out))
except (ImportError, EnvironmentError):
sys.stderr.write("\nGot exception using 'pkg_resources' to get the version of setuptools. Exception follows\n")
traceback.print_exc(file=sys.stderr)
sys.stderr.flush()
pass
except pkg_resources.DistributionNotFound:
- print 'setuptools: DistributionNotFound'
+ print('setuptools: DistributionNotFound')
pass
def print_py_pkg_ver(pkgname, modulename=None):
if modulename is None:
modulename = pkgname
- print
+ print()
try:
import pkg_resources
out = str(pkg_resources.require(pkgname))
- print pkgname + ': ' + foldlines(out)
+ print(pkgname + ': ' + foldlines(out))
except (ImportError, EnvironmentError):
sys.stderr.write("\nGot exception using 'pkg_resources' to get the version of %s. Exception follows.\n" % (pkgname,))
traceback.print_exc(file=sys.stderr)
sys.stderr.flush()
pass
except pkg_resources.DistributionNotFound:
- print pkgname + ': DistributionNotFound'
+ print(pkgname + ': DistributionNotFound')
pass
try:
__import__(modulename)
pass
else:
modobj = sys.modules.get(modulename)
- print pkgname + ' module: ' + str(modobj)
+ print(pkgname + ' module: ' + str(modobj))
try:
- print pkgname + ' __version__: ' + str(modobj.__version__)
+ print(pkgname + ' __version__: ' + str(modobj.__version__))
except AttributeError:
pass
print_platform()
-print
+print()
print_python_ver()
-print
+print()
print_stdout(['locale'])
print_python_encoding_settings()
-print
+print()
print_stdout(['buildbot', '--version'])
print_stdout(['buildslave', '--version'])
if 'windows' in platform.system().lower():
#!/usr/bin/env python
+from __future__ import print_function
from allmydata import __version__ as v
import sys
}
for line in input.readlines():
- print line % vern,
+ print(line % vern, end=' ')
#!/usr/bin/env python
+from __future__ import print_function
import sys
from subprocess import Popen, PIPE
cmd = ["darcs", "whatsnew", "-l"]
p = Popen(cmd, stdout=PIPE)
output = p.communicate()[0]
-print output
+print(output)
if output == "No changes!\n":
sys.exit(0)
sys.exit(1)
#!/usr/bin/env python
+from __future__ import print_function
import sys
from subprocess import Popen, PIPE
cmd = ["git", "status", "--porcelain"]
p = Popen(cmd, stdout=PIPE)
output = p.communicate()[0]
-print output
+print(output)
if output == "":
sys.exit(0)
sys.exit(1)
#
# bin/tahoe @misc/coding_tools/check-interfaces.py
+from __future__ import print_function
import os, sys, re, platform
import zope.interface as zi
for interface in interfaces:
try:
verifyClass(interface, cls)
- except Exception, e:
- print >>_err, ("%s.%s does not correctly implement %s.%s:\n%s"
- % (cls.__module__, cls.__name__,
- interface.__module__, interface.__name__, e))
+ except Exception as e:
+ print("%s.%s does not correctly implement %s.%s:\n%s"
+ % (cls.__module__, cls.__name__,
+ interface.__module__, interface.__name__, e), file=_err)
else:
_other_modules_with_violations.add(cls.__module__)
return cls
if len(sys.argv) >= 2:
if sys.argv[1] == '--help' or len(sys.argv) > 2:
- print >>_err, "Usage: check-miscaptures.py [SOURCEDIR]"
+ print("Usage: check-miscaptures.py [SOURCEDIR]", file=_err)
return
srcdir = sys.argv[1]
else:
for fn in filenames:
(basename, ext) = os.path.splitext(fn)
if ext in ('.pyc', '.pyo') and not os.path.exists(os.path.join(dirpath, basename+'.py')):
- print >>_err, ("Warning: no .py source file for %r.\n"
- % (os.path.join(dirpath, fn),))
+ print(("Warning: no .py source file for %r.\n"
+ % (os.path.join(dirpath, fn),)), file=_err)
if ext == '.py' and not excluded_file_basenames.match(basename):
relpath = os.path.join(dirpath[len(srcdir)+1:], basename)
module = relpath.replace(os.sep, '/').replace('/', '.')
try:
__import__(module)
- except ImportError, e:
+ except ImportError as e:
if not is_windows and (' _win' in str(e) or 'win32' in str(e)):
- print >>_err, ("Warning: %r imports a Windows-specific module, so we cannot check it (%s).\n"
- % (module, str(e)))
+ print(("Warning: %r imports a Windows-specific module, so we cannot check it (%s).\n"
+ % (module, str(e))), file=_err)
else:
import traceback
traceback.print_exc(file=_err)
- print >>_err
+ print(file=_err)
others = list(_other_modules_with_violations)
others.sort()
- print >>_err, "There were also interface violations in:\n", ", ".join(others), "\n"
+ print("There were also interface violations in:\n", ", ".join(others), "\n", file=_err)
# Forked from
# should never get here, since classes should not provide functions
meth = fromFunction(attr, iface, name=name)
elif (isinstance(attr, MethodTypes)
- and type(attr.im_func) is FunctionType):
+ and type(attr.__func__) is FunctionType):
meth = fromMethod(attr, iface, name)
else:
if not callable(attr):
#! /usr/bin/python
+from __future__ import print_function
import os, sys, compiler
from compiler.ast import Node, For, While, ListComp, AssName, Name, Lambda, Function
def check_thing(parser, thing):
try:
ast = parser(thing)
- except SyntaxError, e:
+ except SyntaxError as e:
return e
else:
results = []
def report(out, path, results):
for r in results:
- print >>out, path + (":%r %s captures %r assigned at line %d" % r)
+ print(path + (":%r %s captures %r assigned at line %d" % r), file=out)
def check(sources, out):
class Counts:
def _process(path):
results = check_file(path)
if isinstance(results, SyntaxError):
- print >>out, path + (" NOT ANALYSED due to syntax error: %s" % results)
+ print(path + (" NOT ANALYSED due to syntax error: %s" % results), file=out)
counts.error_files += 1
else:
report(out, path, results)
counts.suspect_files += 1
for source in sources:
- print >>out, "Checking %s..." % (source,)
+ print("Checking %s..." % (source,), file=out)
if os.path.isfile(source):
_process(source)
else:
if ext == '.py':
_process(os.path.join(dirpath, fn))
- print >>out, ("%d suspiciously captured variables in %d out of %d file(s)."
- % (counts.n, counts.suspect_files, counts.processed_files))
+ print("%d suspiciously captured variables in %d out of %d file(s)."
+ % (counts.n, counts.suspect_files, counts.processed_files), file=out)
if counts.error_files > 0:
- print >>out, ("%d file(s) not processed due to syntax errors."
- % (counts.error_files,))
+ print("%d file(s) not processed due to syntax errors."
+ % (counts.error_files,), file=out)
return counts.n
# ./rumid.py foo.py
+from __future__ import print_function
import sys, re, os
ok = True
umid = mo.group(1)
if umid in umids:
oldfn, oldlineno = umids[umid]
- print "%s:%d: duplicate umid '%s'" % (fn, lineno, umid)
- print "%s:%d: first used here" % (oldfn, oldlineno)
+ print("%s:%d: duplicate umid '%s'" % (fn, lineno, umid))
+ print("%s:%d: first used here" % (oldfn, oldlineno))
ok = False
umids[umid] = (fn,lineno)
if ok:
- print "all umids are unique"
+ print("all umids are unique")
else:
- print "some umids were duplicates"
+ print("some umids were duplicates")
sys.exit(1)
#!/usr/bin/env python
+from __future__ import print_function
import os, sys
from twisted.python import usage
line = line[:-1]
if line.rstrip() != line:
# the %s:%d:%d: lets emacs' compile-mode jump to those locations
- print "%s:%d:%d: trailing whitespace" % (fn, i+1, len(line)+1)
+ print("%s:%d:%d: trailing whitespace" % (fn, i+1, len(line)+1))
found[0] = True
f.close()
setup.py run_with_pythonpath -p -c 'misc/make-canary-files.py ARGS..'
"""
+from __future__ import print_function
import os, sha
from twisted.python import usage
nodes[nodeid] = nickname
if opts["k"] != 3 or opts["N"] != 10:
- print "note: using non-default k/N requires patching the Tahoe code"
- print "src/allmydata/client.py line 55, DEFAULT_ENCODING_PARAMETERS"
+ print("note: using non-default k/N requires patching the Tahoe code")
+ print("src/allmydata/client.py line 55, DEFAULT_ENCODING_PARAMETERS")
convergence_file = os.path.expanduser(opts["convergence"])
convergence_s = open(convergence_file, "rb").read().strip()
while True:
attempts += 1
suffix = base32.b2a(os.urandom(10))
- if verbose: print " trying", suffix,
+ if verbose: print(" trying", suffix, end=' ')
data = prefix + suffix + "\n"
assert len(data) > 55 # no LIT files
# now, what storage index will this get?
eu = upload.EncryptAnUploadable(u)
d = eu.get_storage_index() # this happens to run synchronously
def _got_si(si, data=data):
- if verbose: print "SI", base32.b2a(si),
+ if verbose: print("SI", base32.b2a(si), end=' ')
peerlist = get_permuted_peers(si)
if peerlist[0] == target:
# great!
- if verbose: print " yay!"
+ if verbose: print(" yay!")
fn = base32.b2a(target)
if nodes[target]:
nickname = nodes[target].replace("/", "_")
open(fn, "w").write(data)
return True
# nope, must try again
- if verbose: print " boo"
+ if verbose: print(" boo")
return False
d.addCallback(_got_si)
# get sneaky and look inside the Deferred for the synchronous result
attempts = []
for target in nodes:
target_s = base32.b2a(target)
- print "working on", target_s
+ print("working on", target_s)
attempts.append(find_share_for_target(target))
-print "done"
-print "%d attempts total, avg %d per target, max %d" % \
- (sum(attempts), 1.0* sum(attempts) / len(nodes), max(attempts))
+print("done")
+print("%d attempts total, avg %d per target, max %d" % \
+ (sum(attempts), 1.0* sum(attempts) / len(nodes), max(attempts)))
#!/usr/bin/env python
+from __future__ import print_function
from foolscap import Tub, eventual
from twisted.internet import reactor
import sys
import pprint
def oops(f):
- print "ERROR"
- print f
+ print("ERROR")
+ print(f)
def fetch(furl):
t = Tub()
# -*- python -*-
+from __future__ import print_function
from twisted.internet import reactor
import sys
tub.connectTo(furl, self.connected)
def connected(self, rref):
- print "subscribing"
+ print("subscribing")
d = rref.callRemote("get_averages")
d.addCallback(self.remote_averages)
d.addErrback(log.err)
row.append(self._average_N(pid, avg))
current.append(tuple(row))
self.current = current
- print current
+ print(current)
for ob in self.observers:
eventual.eventually(self.notify, ob)
# feed this the results of 'tahoe catalog-shares' for all servers
+from __future__ import print_function
import sys
chk_encodings = {}
sdmf_multiple_versions.sort()
if chk_multiple_encodings:
- print
- print "CHK multiple encodings:"
+ print()
+ print("CHK multiple encodings:")
for (si,lines) in chk_multiple_encodings:
- print " " + si
+ print(" " + si)
for line in sorted(lines):
- print " " + line
+ print(" " + line)
if sdmf_multiple_encodings:
- print
- print "SDMF multiple encodings:"
+ print()
+ print("SDMF multiple encodings:")
for (si,lines) in sdmf_multiple_encodings:
- print " " + si
+ print(" " + si)
for line in sorted(lines):
- print " " + line
+ print(" " + line)
if sdmf_multiple_versions:
- print
- print "SDMF multiple versions:"
+ print()
+ print("SDMF multiple versions:")
for (si,lines) in sdmf_multiple_versions:
- print " " + si
+ print(" " + si)
for line in sorted(lines):
- print " " + line
+ print(" " + line)
#! /usr/bin/env python
+from __future__ import print_function
from foolscap import Tub
from foolscap.eventual import eventually
import sys
d = t.getReference(sys.argv[1])
d.addCallback(lambda rref: rref.callRemote("get_memory_usage"))
def _got(res):
- print res
+ print(res)
reactor.stop()
d.addCallback(_got)
factorial(n) with n<0 is -factorial(abs(n))
"""
result = 1
- for i in xrange(1, abs(n)+1):
+ for i in range(1, abs(n)+1):
result *= i
assert n >= 0
return result
# calculate n!/k! as one product, avoiding factors that
# just get canceled
P = k+1
- for i in xrange(k+2, n+1):
+ for i in range(k+2, n+1):
P *= i
# if you are paranoid:
# C, rem = divmod(P, factorial(n-k))
python bench_spans.py run-112-above28-flog-dump-sh8-on-nsziz.txt
"""
+from __future__ import print_function
from pyutil import benchutil
from allmydata.util.spans import DataSpans
elif INIT_S in inline:
pass
else:
- print "Warning, didn't recognize this line: %r" % (inline,)
+ print("Warning, didn't recognize this line: %r" % (inline,))
count += 1
inline = self.inf.readline()
# print self.stats
benchutil.print_bench_footer(UNITS_PER_SECOND=1000000)
-print "(microseconds)"
+print("(microseconds)")
for N in [600, 6000, 60000]:
b = B(open(sys.argv[1], 'rU'))
- print "%7d" % N,
+ print("%7d" % N, end=' ')
benchutil.rep_bench(b.run, N, b.init, UNITS_PER_SECOND=1000000)
"""
+from __future__ import print_function
import sys, os.path
#URI:7jzbza6iwdsk5xbxsvdgjaugyrhetw64zpflp4gihmyh5krjblra====:a5qdejwbimu5b2wfke7xwexxlq======:gzeub5v42rjbgd7ccawnahu2evqd42lpdpzd447c6zkmdvjkpowq====:25:100:219889
for mode in MODES:
total[mode] += slotsize(mode, len(files), len(dirs)) + stringsize
- print "%d directories" % num_dirs
- print "%d files" % num_files
+ print("%d directories" % num_dirs)
+ print("%d files" % num_files)
for mode in sorted(total.keys()):
- print "%s: %d bytes" % (mode, total[mode])
+ print("%s: %d bytes" % (mode, total[mode]))
if __name__ == '__main__':
Mcycles_per_block = cycles_per_byte * L_block / (8 * 1000000.0)
+from __future__ import print_function
from math import floor, ceil, log, log1p, pow, e
from sys import stderr
from gc import collect
# Winternitz with B < 4 is never optimal. For example, going from B=4 to B=2 halves the
# chain depth, but that is cancelled out by doubling (roughly) the number of digits.
-range_B = xrange(4, 33)
+range_B = range(4, 33)
M = pow(2, lg_M)
T_min = ceil_div(lg_M - lg_K1, lg_K)
last_q = None
- for T in xrange(T_min, T_min+21):
+ for T in range(T_min, T_min+21):
# lg(total number of leaf private keys)
lg_S = lg_K1 + lg_K*T
lg_N = lg_S + lg_K2
# We approximate lg(M-x) as lg(M)
lg_px_step = lg_M + lg_p - lg_1_p
- for x in xrange(1, j):
+ for x in range(1, j):
lg_px[x] = lg_px[x-1] - lg(x) + lg_px_step
q = None
# Find the minimum acceptable value of q.
- for q_cand in xrange(1, q_max+1):
+ for q_cand in range(1, q_max+1):
lg_q = lg(q_cand)
- lg_pforge = [lg_px[x] + (lg_q*x - lg_K2)*q_cand for x in xrange(1, j)]
+ lg_pforge = [lg_px[x] + (lg_q*x - lg_K2)*q_cand for x in range(1, j)]
if max(lg_pforge) < -L_hash + lg(j) and lg_px[j-1] + 1.0 < -L_hash:
- #print "K = %d, K1 = %d, K2 = %d, L_hash = %d, lg_K2 = %.3f, q = %d, lg_pforge_1 = %.3f, lg_pforge_2 = %.3f, lg_pforge_3 = %.3f" \
- # % (K, K1, K2, L_hash, lg_K2, q, lg_pforge_1, lg_pforge_2, lg_pforge_3)
+ #print("K = %d, K1 = %d, K2 = %d, L_hash = %d, lg_K2 = %.3f, q = %d, lg_pforge_1 = %.3f, lg_pforge_2 = %.3f, lg_pforge_3 = %.3f"
+ # % (K, K1, K2, L_hash, lg_K2, q, lg_pforge_1, lg_pforge_2, lg_pforge_3))
q = q_cand
break
def search():
for L_hash in range_L_hash:
- print >>stderr, "collecting... \r",
+ print("collecting... \r", end=' ', file=stderr)
collect()
- print >>stderr, "precomputing... \r",
+ print("precomputing... \r", end=' ', file=stderr)
"""
# d/dq (lg(q+1) + L_hash/q) = 1/(ln(2)*(q+1)) - L_hash/q^2
K_max = 50
c2 = compressions(2*L_hash + L_label)
c3 = compressions(3*L_hash + L_label)
- for dau in xrange(0, 10):
+ for dau in range(0, 10):
a = pow(2, dau)
- for tri in xrange(0, ceil_log(30-dau, 3)):
+ for tri in range(0, ceil_log(30-dau, 3)):
x = int(a*pow(3, tri))
h = dau + 2*tri
c_x = int(sum_powers(2, dau)*c2 + a*sum_powers(3, tri)*c3)
- for y in xrange(1, x+1):
+ for y in range(1, x+1):
if tri > 0:
# If the bottom level has arity 3, then for every 2 nodes by which the tree is
# imperfect, we can save c3 compressions by pruning 3 leaves back to their parent.
trees[y] = (h, c_y, (dau, tri))
#for x in xrange(1, K_max+1):
- # print x, trees[x]
+ # print("%r: %r" % (x, trees[x]))
candidates = []
progress = 0
fuzz = 0
complete = (K_max-1)*(2200-200)/100
- for K in xrange(2, K_max+1):
- for K2 in xrange(200, 2200, 100):
- for K1 in xrange(max(2, K-fuzz), min(K_max, K+fuzz)+1):
+ for K in range(2, K_max+1):
+ for K2 in range(200, 2200, 100):
+ for K1 in range(max(2, K-fuzz), min(K_max, K+fuzz)+1):
candidates += calculate(K, K1, K2, q_max, L_hash, trees)
progress += 1
- print >>stderr, "searching: %3d %% \r" % (100.0 * progress / complete,),
+ print("searching: %3d %% \r" % (100.0 * progress / complete,), end=' ', file=stderr)
- print >>stderr, "filtering... \r",
+ print("filtering... \r", end=' ', file=stderr)
step = 2.0
bins = {}
limit = floor_div(limit_cost, step)
- for bin in xrange(0, limit+2):
+ for bin in range(0, limit+2):
bins[bin] = []
for c in candidates:
# For each in a range of signing times, find the best candidate.
best = []
- for bin in xrange(0, limit):
+ for bin in range(0, limit):
candidates = bins[bin] + bins[bin+1] + bins[bin+2]
if len(candidates) > 0:
best += [min(candidates, key=lambda c: c['sig_bytes'])]
"%(c_ver)7d +/-%(c_ver_pm)5d (%(Mcycles_ver)5.2f +/-%(Mcycles_ver_pm)5.2f) "
) % candidate
- print >>stderr, " \r",
+ print(" \r", end=' ', file=stderr)
if len(best) > 0:
- print " B K K1 K2 q T L_hash lg_N sig_bytes c_sign (Mcycles) c_ver ( Mcycles )"
- print "---- ---- ---- ------ ---- ---- ------ ------ --------- ------------------ --------------------------------"
+ print(" B K K1 K2 q T L_hash lg_N sig_bytes c_sign (Mcycles) c_ver ( Mcycles )")
+ print("---- ---- ---- ------ ---- ---- ------ ------ --------- ------------------ --------------------------------")
best.sort(key=lambda c: (c['sig_bytes'], c['cost']))
last_sign = None
last_ver = None
for c in best:
if last_sign is None or c['c_sign'] < last_sign or c['c_ver'] < last_ver:
- print format_candidate(c)
+ print(format_candidate(c))
last_sign = c['c_sign']
last_ver = c['c_ver']
- print
+ print()
else:
- print "No candidates found for L_hash = %d or higher." % (L_hash)
+ print("No candidates found for L_hash = %d or higher." % (L_hash))
return
del bins
del best
-print "Maximum signature size: %d bytes" % (limit_bytes,)
-print "Maximum (signing + %d*verification) cost: %.1f Mcycles" % (weight_ver, limit_cost)
-print "Hash parameters: %d-bit blocks with %d-bit padding and %d-bit labels, %.2f cycles per byte" \
- % (L_block, L_pad, L_label, cycles_per_byte)
-print "PRF output size: %d bits" % (L_prf,)
-print "Security level given by L_hash is maintained for up to 2^%d signatures.\n" % (lg_M,)
+print("Maximum signature size: %d bytes" % (limit_bytes,))
+print("Maximum (signing + %d*verification) cost: %.1f Mcycles" % (weight_ver, limit_cost))
+print("Hash parameters: %d-bit blocks with %d-bit padding and %d-bit labels, %.2f cycles per byte" \
+ % (L_block, L_pad, L_label, cycles_per_byte))
+print("PRF output size: %d bits" % (L_prf,))
+print("Security level given by L_hash is maintained for up to 2^%d signatures.\n" % (lg_M,))
search()
# used to discuss ticket #302: "stop permuting peerlist?"
+from __future__ import print_function
# import time
import math
from hashlib import md5 # sha1, sha256
sizes = [make_up_a_file_size(str(i)) for i in range(10000)]
avg_filesize = sum(sizes)/len(sizes)
-print "average file size:", abbreviate_space(avg_filesize)
+print("average file size:", abbreviate_space(avg_filesize))
SERVER_CAPACITY = 10**12
prev_s = self.servers[(i-1)%len(self.servers)]
diff = "%032x" % (int(s.nodeid,16) - int(prev_s.nodeid,16))
s.prev_diff = diff
- print s, s.prev_diff
+ print(s, s.prev_diff)
- print "sorted by delta"
+ print("sorted by delta")
for s in sorted(self.servers, key=lambda s:s.prev_diff):
- print s, s.prev_diff
+ print(s, s.prev_diff)
def servers_for_si(self, si):
if self.permute:
return "".join(bits)
def dump_usage(self, numfiles, avg_space_per_file):
- print "uploaded", numfiles
+ print("uploaded", numfiles)
# avg_space_per_file measures expected grid-wide ciphertext per file
used = list(reversed(sorted([s.used for s in self.servers])))
# used is actual per-server ciphertext
std_deviation = math.sqrt(variance)
sd_of_total = std_deviation / avg_usage_per_file
- print "min/max/(exp) usage-pf-ps %s/%s/(%s):" % (
+ print("min/max/(exp) usage-pf-ps %s/%s/(%s):" % (
abbreviate_space(usedpf[-1]),
abbreviate_space(usedpf[0]),
- abbreviate_space(avg_usage_per_file) ),
- print "spread-pf: %s (%.2f%%)" % (
- abbreviate_space(spreadpf), 100.0*spreadpf/avg_usage_per_file),
+ abbreviate_space(avg_usage_per_file) ), end=' ')
+ print("spread-pf: %s (%.2f%%)" % (
+ abbreviate_space(spreadpf), 100.0*spreadpf/avg_usage_per_file), end=' ')
#print "average_usage:", abbreviate_space(average_usagepf)
- print "stddev: %s (%.2f%%)" % (abbreviate_space(std_deviation),
- 100.0*sd_of_total)
+ print("stddev: %s (%.2f%%)" % (abbreviate_space(std_deviation),
+ 100.0*sd_of_total))
if self.SHOW_MINMAX:
s2 = sorted(self.servers, key=lambda s: s.used)
- print "least:", s2[0].nodeid
- print "most:", s2[-1].nodeid
+ print("least:", s2[0].nodeid)
+ print("most:", s2[-1].nodeid)
class Options(usage.Options):
server_was_full = True
remaining_servers.discard(s)
if not remaining_servers:
- print "-- GRID IS FULL"
+ print("-- GRID IS FULL")
ring.dump_usage(filenum, avg_space_per_file)
return filenum
index += 1
if server_was_full and all_servers_have_room:
all_servers_have_room = False
- print "-- FIRST SERVER FULL"
+ print("-- FIRST SERVER FULL")
ring.dump_usage(filenum, avg_space_per_file)
if file_was_wrapped and no_files_have_wrapped:
no_files_have_wrapped = False
- print "-- FIRST FILE WRAPPED"
+ print("-- FIRST FILE WRAPPED")
ring.dump_usage(filenum, avg_space_per_file)
total_capacity = opts["servers"]*SERVER_CAPACITY
avg_space_per_file = avg_filesize * opts["N"] / opts["k"]
avg_files = total_capacity / avg_space_per_file
- print "expected number of uploads:", avg_files
+ print("expected number of uploads:", avg_files)
if opts["permute"]:
- print " PERMUTED"
+ print(" PERMUTED")
else:
- print " LINEAR"
+ print(" LINEAR")
seed = opts["seed"]
ring = Ring(opts["servers"], seed, opts["permute"])
# WARNING. There is a bug in this script so that it does not simulate the actual Tahoe Two server selection algorithm that it was intended to simulate. See http://allmydata.org/trac/tahoe-lafs/ticket/302 (stop permuting peerlist, use SI as offset into ring instead?)
+from __future__ import print_function
import random
SERVER_CAPACITY = 10**12
filledat = []
for test in range(iters):
(servers, doubled_up_shares) = go(permutedpeerlist)
- print "doubled_up_shares: ", doubled_up_shares
+ print("doubled_up_shares: ", doubled_up_shares)
for server in servers:
fidx = server.full_at_tick
filledat.extend([0]*(fidx-len(filledat)+1))
if arg.startswith("--iters="):
iters = int(arg[8:])
if "--permute" in sys.argv:
- print "doing permuted peerlist, iterations: %d" % iters
+ print("doing permuted peerlist, iterations: %d" % iters)
test(True, iters)
else:
- print "doing simple ring, iterations: %d" % iters
+ print("doing simple ring, iterations: %d" % iters)
test(False, iters)
#! /usr/bin/env python
+from __future__ import print_function
import sha as shamodule
import os, random
size = random.randrange(1000)
n = random.choice(self.all_nodes)
if self.verbose:
- print "add_file(size=%d, from node %s)" % (size, n)
+ print("add_file(size=%d, from node %s)" % (size, n))
fileid = randomid()
able = n.publish_file(fileid, size)
if able:
if n.delete_file():
self.deleted_files += 1
return
- print "no files to delete"
+ print("no files to delete")
def _add_event(self, etype):
rate = getattr(self, "RATE_" + etype)
# self.print_stats(current_time, etype)
def print_stats_header(self):
- print "time: added failed lost avg_tried"
+ print("time: added failed lost avg_tried")
def print_stats(self, time, etype):
if not self.published_files:
avg_tried = "NONE"
else:
avg_tried = sum(self.published_files) / len(self.published_files)
- print time, etype, self.added_data, self.failed_files, self.lost_data_bytes, avg_tried, len(self.introducer.living_files), self.introducer.utilization
+ print(time, etype, self.added_data, self.failed_files, self.lost_data_bytes, avg_tried, len(self.introducer.living_files), self.introducer.utilization)
global s
s = None
# s.print_stats_header()
for i in range(1000):
s.do_event()
- print "%d files added, %d files deleted" % (s.added_files, s.deleted_files)
+ print("%d files added, %d files deleted" % (s.added_files, s.deleted_files))
return s
if __name__ == '__main__':
#! /usr/bin/env python
+from __future__ import print_function
import random, math, re
from twisted.python import usage
"share_storage_overhead", "share_transmission_overhead",
"storage_overhead", "storage_overhead_percentage",
"bytes_until_some_data"):
- print k, getattr(self, k)
+ print(k, getattr(self, k))
def fmt(num, trim=False):
if num < KiB:
mode = opts["mode"]
arity = opts["arity"]
# 0123456789012345678901234567890123456789012345678901234567890123456
- print "mode=%s" % mode, " arity=%d" % arity
- print " storage storage"
- print "Size sharesize overhead overhead k d alacrity"
- print " (bytes) (%)"
- print "------- ------- -------- -------- ---- -- --------"
+ print("mode=%s" % mode, " arity=%d" % arity)
+ print(" storage storage")
+ print("Size sharesize overhead overhead k d alacrity")
+ print(" (bytes) (%)")
+ print("------- ------- -------- -------- ---- -- --------")
#sizes = [2 ** i for i in range(7, 41)]
radix = math.sqrt(10); expstep = 2
radix = 2; expstep = 2
out += " %4d" % int(s.block_arity)
out += " %2d" % int(s.block_tree_depth)
out += " %8s" % fmt(s.bytes_until_some_data)
- print out
+ print(out)
def graph():
#!/usr/bin/env python
+from __future__ import print_function
import sys, math
from allmydata import uri, storage
from allmydata.immutable import upload
def main():
filesize = int(sys.argv[1])
urisize, sharesize, sharespace = calc(filesize)
- print "urisize:", urisize
- print "sharesize: %10d" % sharesize
- print "sharespace: %10d" % sharespace
- print "desired expansion: %1.1f" % (1.0 * 10 / 3)
- print "effective expansion: %1.1f" % (1.0 * sharespace / filesize)
+ print("urisize:", urisize)
+ print("sharesize: %10d" % sharesize)
+ print("sharespace: %10d" % sharespace)
+ print("desired expansion: %1.1f" % (1.0 * 10 / 3))
+ print("effective expansion: %1.1f" % (1.0 * sharespace / filesize))
def chart():
filesize = 2
while filesize < 2**20:
urisize, sharesize, sharespace = calc(int(filesize))
expansion = 1.0 * sharespace / int(filesize)
- print "%d,%d,%d,%1.2f" % (int(filesize), urisize, sharespace, expansion)
+ print("%d,%d,%d,%1.2f" % (int(filesize), urisize, sharespace, expansion))
filesize = filesize * 2**0.5
if __name__ == '__main__':
#! /usr/bin/env python
# -*- coding: utf-8 -*-
-import sys; assert sys.version_info < (3,), ur"Tahoe-LAFS does not run under Python 3. Please use a version of Python between 2.6 and 2.7.x inclusive."
# Tahoe-LAFS -- secure, distributed storage grid
#
#
# See the docs/about.rst file for licensing information.
-import glob, os, stat, subprocess, re
+import glob, os, stat, subprocess, re, sys
##### sys.path management
# the _auto_deps.install_requires list, which is used in the call to setup()
# below.
adglobals = {}
-execfile('src/allmydata/_auto_deps.py', adglobals)
+exec(compile(open('src/allmydata/_auto_deps.py').read(), 'src/allmydata/_auto_deps.py', 'exec'), adglobals)
install_requires = adglobals['install_requires']
if len(sys.argv) > 1 and sys.argv[1] == '--fakedependency':
def normalized_version(verstr, what=None):
try:
return verlib.NormalizedVersion(verlib.suggest_normalized_version(verstr))
- except (StandardError, verlib.IrrationalVersionError):
+ except (Exception, verlib.IrrationalVersionError):
cls, value, trace = sys.exc_info()
- raise PackagingError, ("could not parse %s due to %s: %s"
- % (what or repr(verstr), cls.__name__, value)), trace
+ msg = "could not parse %s due to %s: %s" % (what or repr(verstr), cls.__name__, value)
+ if sys.version_info[0] >= 3:
+ raise PackagingError(msg).with_traceback(trace)
+ else:
+ exec("raise c, v, t", {"c": PackagingError, "v": msg, "t": trace})
def get_package_versions_and_locations():
import warnings
- from _auto_deps import package_imports, global_deprecation_messages, deprecation_messages, \
+ from ._auto_deps import package_imports, global_deprecation_messages, deprecation_messages, \
user_warning_messages, runtime_warning_messages, warning_imports
def package_dir(srcfile):
"""This function returns a list of errors due to any failed cross-checks."""
import pkg_resources
- from _auto_deps import install_requires
+ from ._auto_deps import install_requires
pkg_resources_vers_and_locs = dict([(p.project_name.lower(), (str(p.version), p.location))
for p in pkg_resources.require(install_requires)])
try:
pr_normver = normalized_version(pr_ver)
- except Exception, e:
+ except Exception as e:
errors.append("Warning: version number %r found for dependency %r by pkg_resources could not be parsed. "
"The version found by import was %r from %r. "
"pkg_resources thought it should be found at %r. "
else:
try:
imp_normver = normalized_version(imp_ver)
- except Exception, e:
+ except Exception as e:
errors.append("Warning: version number %r found for dependency %r (imported from %r) could not be parsed. "
"pkg_resources thought it should be version %r at %r. "
"The exception was %s: %s"
% (name, pr_ver, str(pr_normver), pr_loc, imp_ver, str(imp_normver), imp_loc))
imported_packages = set([p.lower() for (p, _) in imported_vers_and_locs_list])
- for pr_name, (pr_ver, pr_loc) in pkg_resources_vers_and_locs.iteritems():
+ for pr_name, (pr_ver, pr_loc) in pkg_resources_vers_and_locs.items():
if pr_name not in imported_packages and pr_name not in ignorable:
errors.append("Warning: dependency %r (version %r) found by pkg_resources not found by import."
% (pr_name, pr_ver))
# (On Python 3, we'll have failed long before this point.)
if sys.version_info < (2, 6):
try:
- version_string = ".".join(map(str, sys.version_info))
+ version_string = ".".join([str(v) for v in sys.version_info])
except Exception:
version_string = repr(sys.version_info)
errors.append("Tahoe-LAFS currently requires Python v2.6 or greater (but less than v3), not %s"
for requirement in install_requires:
try:
check_requirement(requirement, vers_and_locs)
- except (ImportError, PackagingError), e:
+ except (ImportError, PackagingError) as e:
errors.append("%s: %s" % (e.__class__.__name__, e))
if errors:
# pycryptopp-0.6.0 includes ed25519
"pycryptopp >= 0.6.0",
+ "six >= 1.3.0",
+
# Will be needed to test web apps, but not yet. See #1001.
#"windmill >= 1.3",
]
('pycrypto', 'Crypto'),
('pyasn1', 'pyasn1'),
('mock', 'mock'),
+ ('six', 'six'),
]
def require_more():
si = base32.a2b(si_s) # must be valid base32
self.entries[si] = reason
self.last_mtime = current_mtime
- except Exception, e:
+ except Exception as e:
twisted_log.err(e, "unparseable blacklist file")
raise
keysize = keysize or self.default_keysize
if self._remote:
d = self._remote.callRemote('get_rsa_key_pair', keysize)
- def make_key_objs((verifying_key, signing_key)):
+ def make_key_objs(xxx_todo_changeme):
+ (verifying_key, signing_key) = xxx_todo_changeme
v = rsa.create_verifying_key_from_string(verifying_key)
s = rsa.create_signing_key_from_string(signing_key)
return v, s
s = drop_upload.DropUploader(self, upload_dircap, local_dir_utf8)
s.setServiceParent(self)
s.startService()
- except Exception, e:
+ except Exception as e:
self.log("couldn't start drop-uploader: %r", args=(e,))
def _check_hotline(self, hotline_file):
precondition(desired_share_ids is None or len(desired_share_ids) <= self.max_shares, desired_share_ids, self.max_shares)
if desired_share_ids is None:
- desired_share_ids = range(self.max_shares)
+ desired_share_ids = list(range(self.max_shares))
for inshare in inshares:
assert len(inshare) == self.share_size, (len(inshare), self.share_size, self.data_size, self.required_shares)
from allmydata.immutable import upload
from allmydata.mutable.publish import MutableData
from twisted.python import log
+import six
def get_memory_usage():
# this is obviously linux-specific
d.addCallback(self._do_one_ping, everyone_left, results)
def _average(res):
averaged = {}
- for server_name,times in results.iteritems():
+ for server_name,times in six.iteritems(results):
averaged[server_name] = sum(times) / len(times)
return averaged
d.addCallback(_average)
from allmydata.uri import LiteralFileURI, from_string, wrap_dirnode_cap
from pycryptopp.cipher.aes import AES
from allmydata.util.dictutil import AuxValueDict
+import six
def update_metadata(metadata, new_metadata, now):
def modify(self, old_contents, servermap, first_time):
children = self.node._unpack_contents(old_contents)
now = time.time()
- for (namex, (child, new_metadata)) in self.entries.iteritems():
+ for (namex, (child, new_metadata)) in six.iteritems(self.entries):
name = normalize(namex)
precondition(IFilesystemNode.providedBy(child), child)
def pack_children(childrenx, writekey, deep_immutable=False):
# initial_children must have metadata (i.e. {} instead of None)
children = {}
- for (namex, (node, metadata)) in childrenx.iteritems():
+ for (namex, (node, metadata)) in six.iteritems(childrenx):
precondition(isinstance(metadata, dict),
"directory creation requires metadata to be a dict, not None", metadata)
children[normalize(namex)] = (node, metadata)
log.msg(format="mutable cap for child %(name)s unpacked from an immutable directory",
name=quote_output(name, encoding='utf-8'),
facility="tahoe.webish", level=log.UNUSUAL)
- except CapConstraintError, e:
+ except CapConstraintError as e:
log.msg(format="unmet constraint on cap for child %(name)s unpacked from a directory:\n"
"%(message)s", message=e.args[0], name=quote_output(name, encoding='utf-8'),
facility="tahoe.webish", level=log.UNUSUAL)
exists a child of the given name, False if not."""
name = normalize(namex)
d = self._read()
- d.addCallback(lambda children: children.has_key(name))
+ d.addCallback(lambda children: name in children)
return d
def _get(self, children, name):
path-name elements.
"""
d = self.get_child_and_metadata_at_path(pathx)
- d.addCallback(lambda (node, metadata): node)
+ d.addCallback(lambda node_metadata: node_metadata[0])
return d
def get_child_and_metadata_at_path(self, pathx):
# this takes URIs
a = Adder(self, overwrite=overwrite,
create_readonly_node=self._create_readonly_node)
- for (namex, e) in entries.iteritems():
+ for (namex, e) in six.iteritems(entries):
assert isinstance(namex, unicode), namex
if len(e) == 2:
writecap, readcap = e
return defer.succeed("redundant rename/relink")
d = self.get_child_and_metadata(current_child_name)
- def _got_child( (child, metadata) ):
+ def _got_child(xxx_todo_changeme ):
+ (child, metadata) = xxx_todo_changeme
return new_parent.set_node(new_child_name, child, metadata,
overwrite=overwrite)
d.addCallback(_got_child)
# in the nodecache) seem to consume about 2000 bytes.
dirkids = []
filekids = []
- for name, (child, metadata) in sorted(children.iteritems()):
+ for name, (child, metadata) in sorted(six.iteritems(children)):
childpath = path + [name]
if isinstance(child, UnknownNode):
walker.add_node(child, childpath)
from twisted.internet import defer
from twisted.cred import error, checkers, credentials
from allmydata.util import base32
+import six
class NeedRootcapLookupScheme(Exception):
"""Accountname+Password-based access schemes require some kind of
"email": username,
"passwd": password,
}
- for name, value in fields.iteritems():
+ for name, value in six.iteritems(fields):
form.append('Content-Disposition: form-data; name="%s"' % name)
form.append('')
assert isinstance(value, str)
NoSuchChildError
from allmydata.immutable.upload import FileHandle
from allmydata.util.fileutil import EncryptedTemporaryFile
+import six
class ReadFile:
implements(ftp.IReadFile)
def makeDirectory(self, path):
d = self._get_root(path)
- d.addCallback(lambda (root,path):
- self._get_or_create_directories(root, path))
+ d.addCallback(lambda root_path:
+ self._get_or_create_directories(root_path[0], root_path[1]))
return d
def _get_or_create_directories(self, node, path):
raise NoParentError
childname = path[-1]
d = self._get_root(path)
- def _got_root((root, path)):
+ def _got_root(xxx_todo_changeme):
+ (root, path) = xxx_todo_changeme
if not path:
raise NoParentError
return root.get_child_at_path(path[:-1])
f.trap(NoParentError)
raise ftp.PermissionDeniedError("cannot delete root directory")
d.addErrback(_convert_error)
- def _got_parent( (parent, childname) ):
+ def _got_parent(xxx_todo_changeme1 ):
+ (parent, childname) = xxx_todo_changeme1
d = parent.get(childname)
def _got_child(child):
if must_be_directory and not IDirectoryNode.providedBy(child):
def rename(self, fromPath, toPath):
# the target directory must already exist
d = self._get_parent(fromPath)
- def _got_from_parent( (fromparent, childname) ):
+ def _got_from_parent(xxx_todo_changeme2 ):
+ (fromparent, childname) = xxx_todo_changeme2
d = self._get_parent(toPath)
- d.addCallback(lambda (toparent, tochildname):
+ d.addCallback(lambda toparent_tochildname:
fromparent.move_child_to(childname,
- toparent, tochildname,
+ toparent_tochildname[0], toparent_tochildname[1],
overwrite=False))
return d
d.addCallback(_got_from_parent)
def _get_node_and_metadata_for_path(self, path):
d = self._get_root(path)
- def _got_root((root,path)):
+ def _got_root(xxx_todo_changeme3):
+ (root,path) = xxx_todo_changeme3
if path:
return root.get_child_and_metadata_at_path(path)
else:
d.addCallback(_got_root)
return d
- def _populate_row(self, keys, (childnode, metadata)):
+ def _populate_row(self, keys, xxx_todo_changeme7):
+ (childnode, metadata) = xxx_todo_changeme7
values = []
isdir = bool(IDirectoryNode.providedBy(childnode))
for key in keys:
elif key == "directory":
value = isdir
elif key == "permissions":
- value = 0600
+ value = 0o600
elif key == "hardlinks":
value = 1
elif key == "modified":
def stat(self, path, keys=()):
# for files only, I think
d = self._get_node_and_metadata_for_path(path)
- def _render((node,metadata)):
+ def _render(xxx_todo_changeme4):
+ (node,metadata) = xxx_todo_changeme4
assert not IDirectoryNode.providedBy(node)
return self._populate_row(keys, (node,metadata))
d.addCallback(_render)
# the interface claims that path is a list of unicodes, but in
# practice it is not
d = self._get_node_and_metadata_for_path(path)
- def _list((node, metadata)):
+ def _list(xxx_todo_changeme5):
+ (node, metadata) = xxx_todo_changeme5
if IDirectoryNode.providedBy(node):
return node.list()
return { path[-1]: (node, metadata) } # need last-edge metadata
d.addCallback(_list)
def _render(children):
results = []
- for (name, childnode) in children.iteritems():
+ for (name, childnode) in six.iteritems(children):
# the interface claims that the result should have a unicode
# object as the name, but it fails unless you give it a
# bytestring
def openForReading(self, path):
d = self._get_node_and_metadata_for_path(path)
- d.addCallback(lambda (node,metadata): ReadFile(node))
+ d.addCallback(lambda node_metadata: ReadFile(node_metadata[0]))
d.addErrback(self._convert_error)
return d
raise ftp.PermissionDeniedError("cannot STOR to root directory")
childname = path[-1]
d = self._get_root(path)
- def _got_root((root, path)):
+ def _got_root(xxx_todo_changeme6):
+ (root, path) = xxx_todo_changeme6
if not path:
raise ftp.PermissionDeniedError("cannot STOR to root directory")
return root.get_child_at_path(path[:-1])
+from __future__ import print_function
import heapq, traceback, array, stat, struct
from types import NoneType
from allmydata.immutable.upload import FileHandle
from allmydata.dirnode import update_metadata
from allmydata.util.fileutil import EncryptedTemporaryFile
+import six
noisy = True
use_foolscap_logging = True
(logmsg, logerr, PrefixingLogMixin) = (_msg, _err, _PrefixingLogMixin)
else: # pragma: no cover
def logmsg(s, level=None):
- print s
+ print(s)
def logerr(s, level=None):
- print s
+ print(s)
class PrefixingLogMixin:
def __init__(self, facility=None, prefix=''):
self.prefix = prefix
def log(self, s, level=None):
- print "%r %s" % (self.prefix, s)
+ print("%r %s" % (self.prefix, s))
def eventually_callback(d):
"""SFTP times are unsigned 32-bit integers representing UTC seconds
(ignoring leap seconds) since the Unix epoch, January 1 1970 00:00 UTC.
A Tahoe time is the corresponding float."""
- return long(t) & 0xFFFFFFFFL
+ return long(t) & 0xFFFFFFFF
def _convert_error(res, request):
if childnode and childnode.is_unknown():
perms = 0
elif childnode and IDirectoryNode.providedBy(childnode):
- perms = S_IFDIR | 0777
+ perms = S_IFDIR | 0o777
else:
# For files, omit the size if we don't immediately know it.
if childnode and size is None:
if size is not None:
_assert(isinstance(size, (int, long)) and not isinstance(size, bool), size=size)
attrs['size'] = size
- perms = S_IFREG | 0666
+ perms = S_IFREG | 0o666
if metadata:
if metadata.get('no-write', False):
- perms &= S_IFDIR | S_IFREG | 0555 # clear 'w' bits
+ perms &= S_IFDIR | S_IFREG | 0o555 # clear 'w' bits
# See webapi.txt for what these times mean.
# We would prefer to omit atime, but SFTP version 3 can only
self.is_closed = True
try:
self.f.close()
- except Exception, e:
+ except Exception as e:
self.log("suppressed %r from close of temporary file %r" % (e, self.f), level=WEIRD)
self.download_done("closed")
return self.done_status
def logout(self):
self.log(".logout()", level=OPERATIONAL)
- for files in self._heisenfiles.itervalues():
+ for files in six.itervalues(self._heisenfiles):
for f in files:
f.abandon()
d = delay or defer.succeed(None)
d.addCallback(lambda ign: self._get_root(path))
- def _got_root( (root, path) ):
+ def _got_root(xxx_todo_changeme2 ):
+ (root, path) = xxx_todo_changeme2
if root.is_unknown():
raise SFTPError(FX_PERMISSION_DENIED,
"cannot open an unknown cap (or child of an unknown object). "
if noisy: self.log("%r.get_child_and_metadata(%r)" % (parent, childname), level=NOISY)
d3.addCallback(lambda ign: parent.get_child_and_metadata(childname))
- def _got_child( (filenode, current_metadata) ):
+ def _got_child(xxx_todo_changeme ):
+ (filenode, current_metadata) = xxx_todo_changeme
if noisy: self.log("_got_child( (%r, %r) )" % (filenode, current_metadata), level=NOISY)
metadata = update_metadata(current_metadata, desired_metadata, time())
# the target directory must already exist
d = deferredutil.gatherResults([self._get_parent_or_node(from_path),
self._get_parent_or_node(to_path)])
- def _got( (from_pair, to_pair) ):
+ def _got(xxx_todo_changeme3 ):
+ (from_pair, to_pair) = xxx_todo_changeme3
if noisy: self.log("_got( (%r, %r) ) in .renameFile(%r, %r, overwrite=%r)" %
(from_pair, to_pair, from_pathstring, to_pathstring, overwrite), level=NOISY)
(from_parent, from_childname) = from_pair
return defer.execute(_denied)
d = self._get_root(path)
- d.addCallback(lambda (root, path):
- self._get_or_create_directories(root, path, metadata))
+ d.addCallback(lambda root_path:
+ self._get_or_create_directories(root_path[0], root_path[1], metadata))
d.addBoth(_convert_error, request)
return d
def _remove_object(self, path, must_be_directory=False, must_be_file=False):
userpath = self._path_to_utf8(path)
d = self._get_parent_or_node(path)
- def _got_parent( (parent, childname) ):
+ def _got_parent(xxx_todo_changeme4 ):
+ (parent, childname) = xxx_todo_changeme4
if childname is None:
raise SFTPError(FX_NO_SUCH_FILE, "cannot remove an object specified by URI")
path = self._path_from_string(pathstring)
d = self._get_parent_or_node(path)
- def _got_parent_or_node( (parent_or_node, childname) ):
+ def _got_parent_or_node(xxx_todo_changeme5 ):
+ (parent_or_node, childname) = xxx_todo_changeme5
if noisy: self.log("_got_parent_or_node( (%r, %r) ) in openDirectory(%r)" %
(parent_or_node, childname, pathstring), level=NOISY)
if childname is None:
def _render(children):
parent_readonly = dirnode.is_readonly()
results = []
- for filename, (child, metadata) in children.iteritems():
+ for filename, (child, metadata) in six.iteritems(children):
# The file size may be cached or absent.
metadata['no-write'] = _no_write(parent_readonly, child, metadata)
attrs = _populate_attrs(child, metadata)
path = self._path_from_string(pathstring)
userpath = self._path_to_utf8(path)
d = self._get_parent_or_node(path)
- def _got_parent_or_node( (parent_or_node, childname) ):
+ def _got_parent_or_node(xxx_todo_changeme6 ):
+ (parent_or_node, childname) = xxx_todo_changeme6
if noisy: self.log("_got_parent_or_node( (%r, %r) )" % (parent_or_node, childname), level=NOISY)
# Some clients will incorrectly try to get the attributes
else:
parent = parent_or_node
d2.addCallback(lambda ign: parent.get_child_and_metadata_at_path([childname]))
- def _got( (child, metadata) ):
+ def _got(xxx_todo_changeme1 ):
+ (child, metadata) = xxx_todo_changeme1
if noisy: self.log("_got( (%r, %r) )" % (child, metadata), level=NOISY)
_assert(IDirectoryNode.providedBy(parent), parent=parent)
metadata['no-write'] = _no_write(parent.is_readonly(), child, metadata)
path = self._path_from_string(pathstring)
userpath = self._path_to_utf8(path)
d = self._get_parent_or_node(path)
- def _got_parent_or_node( (parent_or_node, childname) ):
+ def _got_parent_or_node(xxx_todo_changeme7 ):
+ (parent_or_node, childname) = xxx_todo_changeme7
if noisy: self.log("_got_parent_or_node( (%r, %r) )" % (parent_or_node, childname), level=NOISY)
direntry = _direntry_for(parent_or_node, childname)
def _get_parent_or_node(self, path):
# return Deferred (parent, childname) or (node, None)
d = self._get_root(path)
- def _got_root( (root, remaining_path) ):
+ def _got_root(xxx_todo_changeme8 ):
+ (root, remaining_path) = xxx_todo_changeme8
if not remaining_path:
return (root, None)
else:
# -*- test-case-name: allmydata.test.test_hashtree -*-
from allmydata.util import mathutil # from the pyutil library
+import six
"""
Read and write chunks from files.
while len(rows[-1]) != 1:
last = rows[-1]
rows += [[pair_hash(last[2*i], last[2*i+1])
- for i in xrange(len(last)//2)]]
+ for i in range(len(last)//2)]]
# Flatten the list of rows into a single list.
rows.reverse()
self[:] = sum(rows, [])
rows = [L]
while len(rows[-1]) != 1:
last = rows[-1]
- rows += [[None for i in xrange(len(last)//2)]]
+ rows += [[None for i in range(len(last)//2)]]
# Flatten the list of rows into a single list.
rows.reverse()
self[:] = sum(rows, [])
for h in leaves.values():
assert isinstance(h, str)
new_hashes = hashes.copy()
- for leafnum,leafhash in leaves.iteritems():
+ for leafnum,leafhash in six.iteritems(leaves):
hashnum = self.first_leaf_num + leafnum
if hashnum in new_hashes:
if new_hashes[hashnum] != leafhash:
# first we provisionally add all hashes to the tree, comparing
# any duplicates
- for i,h in new_hashes.iteritems():
+ for i,h in six.iteritems(new_hashes):
if self[i]:
if self[i] != h:
raise BadHashError("new hash %s does not match "
self[i] = h
remove_upon_failure.add(i)
- for level in reversed(range(len(hashes_to_check))):
+ for level in reversed(list(range(len(hashes_to_check)))):
this_level = hashes_to_check[level]
while this_level:
i = this_level.pop()
# Next: things that are optional and not redundant: crypttext_hash
- if d.has_key('crypttext_hash'):
+ if 'crypttext_hash' in d:
self.crypttext_hash = d['crypttext_hash']
if len(self.crypttext_hash) != CRYPTO_VAL_SIZE:
raise BadURIExtension('crypttext_hash is required to be hashutil.CRYPTO_VAL_SIZE bytes, not %s bytes' % (len(self.crypttext_hash),))
# Next: things that are optional, redundant, and required to be
# consistent: codec_name, codec_params, tail_codec_params,
# num_segments, size, needed_shares, total_shares
- if d.has_key('codec_name'):
+ if 'codec_name' in d:
if d['codec_name'] != "crs":
raise UnsupportedErasureCodec(d['codec_name'])
- if d.has_key('codec_params'):
+ if 'codec_params' in d:
ucpss, ucpns, ucpts = codec.parse_params(d['codec_params'])
if ucpss != self.segment_size:
raise BadURIExtension("inconsistent erasure code params: "
"self._verifycap.total_shares: %s" %
(ucpts, self._verifycap.total_shares))
- if d.has_key('tail_codec_params'):
+ if 'tail_codec_params' in d:
utcpss, utcpns, utcpts = codec.parse_params(d['tail_codec_params'])
if utcpss != self.tail_segment_size:
raise BadURIExtension("inconsistent erasure code params: utcpss: %s != "
"self._verifycap.total_shares: %s" % (utcpts,
self._verifycap.total_shares))
- if d.has_key('num_segments'):
+ if 'num_segments' in d:
if d['num_segments'] != self.num_segments:
raise BadURIExtension("inconsistent num_segments: size: %s, "
"segment_size: %s, computed_num_segments: %s, "
self.segment_size,
self.num_segments, d['num_segments']))
- if d.has_key('size'):
+ if 'size' in d:
if d['size'] != self._verifycap.size:
raise BadURIExtension("inconsistent size: URI size: %s, UEB size: %s" %
(self._verifycap.size, d['size']))
- if d.has_key('needed_shares'):
+ if 'needed_shares' in d:
if d['needed_shares'] != self._verifycap.needed_shares:
raise BadURIExtension("inconsistent needed shares: URI needed shares: %s, UEB "
"needed shares: %s" % (self._verifycap.total_shares,
d['needed_shares']))
- if d.has_key('total_shares'):
+ if 'total_shares' in d:
if d['total_shares'] != self._verifycap.total_shares:
raise BadURIExtension("inconsistent total shares: URI total shares: %s, UEB "
"total shares: %s" % (self._verifycap.total_shares,
sharehashes = dict(sh)
try:
self.share_hash_tree.set_hashes(sharehashes)
- except IndexError, le:
+ except IndexError as le:
raise BadOrMissingHash(le)
- except (hashtree.BadHashError, hashtree.NotEnoughHashesError), le:
+ except (hashtree.BadHashError, hashtree.NotEnoughHashesError) as le:
raise BadOrMissingHash(le)
d.addCallback(_got_share_hashes)
return d
try:
self.block_hash_tree.set_hashes(bh)
- except IndexError, le:
+ except IndexError as le:
raise BadOrMissingHash(le)
- except (hashtree.BadHashError, hashtree.NotEnoughHashesError), le:
+ except (hashtree.BadHashError, hashtree.NotEnoughHashesError) as le:
raise BadOrMissingHash(le)
d.addCallback(_got_block_hashes)
return d
ct_hashes = dict(enumerate(hashes))
try:
crypttext_hash_tree.set_hashes(ct_hashes)
- except IndexError, le:
+ except IndexError as le:
raise BadOrMissingHash(le)
- except (hashtree.BadHashError, hashtree.NotEnoughHashesError), le:
+ except (hashtree.BadHashError, hashtree.NotEnoughHashesError) as le:
raise BadOrMissingHash(le)
d.addCallback(_got_crypttext_hashes)
return d
sharehashes, blockhashes, blockdata = results
try:
sharehashes = dict(sharehashes)
- except ValueError, le:
+ except ValueError as le:
le.args = tuple(le.args + (sharehashes,))
raise
blockhashes = dict(enumerate(blockhashes))
# match the root node of self.share_hash_tree.
try:
self.share_hash_tree.set_hashes(sharehashes)
- except IndexError, le:
+ except IndexError as le:
# Weird -- sharehashes contained index numbers outside of
# the range that fit into this hash tree.
raise BadOrMissingHash(le)
# (self.sharenum, blocknum, len(blockdata),
# blockdata[:50], blockdata[-50:], base32.b2a(blockhash)))
- except (hashtree.BadHashError, hashtree.NotEnoughHashesError), le:
+ except (hashtree.BadHashError, hashtree.NotEnoughHashesError) as le:
# log.WEIRD: indicates undetected disk/network error, or more
# likely a programming error
self.log("hash failure in block=%d, shnum=%d on %s" %
from allmydata.interfaces import NotEnoughSharesError, NoSharesError
from allmydata.util import log
from allmydata.util.dictutil import DictOfSets
-from common import OVERDUE, COMPLETE, CORRUPT, DEAD, BADSEGNUM, \
+from .common import OVERDUE, COMPLETE, CORRUPT, DEAD, BADSEGNUM, \
BadSegmentNumberError
class SegmentFetcher:
import time
+import six
now = time.time
from foolscap.api import eventually
from allmydata.util import base32, log
from twisted.internet import reactor
-from share import Share, CommonShare
+from .share import Share, CommonShare
def incidentally(res, f, *args, **kwargs):
"""Add me to a Deferred chain like this:
server = None
try:
if self._servers:
- server = self._servers.next()
+ server = six.advance_iterator(self._servers)
except StopIteration:
self._servers = None
shnums=shnums_s, name=server.get_name(),
level=log.NOISY, parent=lp, umid="0fcEZw")
shares = []
- for shnum, bucket in buckets.iteritems():
+ for shnum, bucket in six.iteritems(buckets):
s = self._create_share(shnum, bucket, server, dyhb_rtt)
shares.append(s)
self._deliver_shares(shares)
import time
+import six
now = time.time
from zope.interface import Interface
from twisted.python.failure import Failure
NotEnoughHashesError
# local imports
-from finder import ShareFinder
-from fetcher import SegmentFetcher
-from segmentation import Segmentation
-from common import BadCiphertextHashError
+from .finder import ShareFinder
+from .fetcher import SegmentFetcher
+from .segmentation import Segmentation
+from .common import BadCiphertextHashError
class IDownloadStatusHandlingConsumer(Interface):
def set_download_status_read_event(read_ev):
# each segment is turned into N blocks. All but the last are of size
# block_size, and the last is of size tail_block_size
- block_size = segment_size / k
- tail_block_size = tail_segment_padded / k
+ block_size = segment_size // k
+ tail_block_size = tail_segment_padded // k
return { "tail_segment_size": tail_segment_size,
"tail_segment_padded": tail_segment_padded,
shares = []
shareids = []
- for (shareid, share) in blocks.iteritems():
+ for (shareid, share) in six.iteritems(blocks):
assert len(share) == block_size
shareids.append(shareid)
shares.append(share)
d.addCallback(_process)
return d
- def _check_ciphertext_hash(self, (segment, decodetime), segnum):
+ def _check_ciphertext_hash(self, xxx_todo_changeme, segnum):
+ (segment, decodetime) = xxx_todo_changeme
start = now()
assert self._active_segment.segnum == segnum
assert self.segment_size is not None
from allmydata.util.spans import overlap
from allmydata.interfaces import DownloadStopped
-from common import BadSegmentNumberError, WrongSegmentError
+from .common import BadSegmentNumberError, WrongSegmentError
class Segmentation:
"""I am responsible for a single offset+size read of the file. I handle
self._cancel_segment_request = None
return res
- def _got_segment(self, (segment_start,segment,decodetime), wanted_segnum):
+ def _got_segment(self, xxx_todo_changeme, wanted_segnum):
+ (segment_start,segment,decodetime) = xxx_todo_changeme
self._cancel_segment_request = None
# we got file[segment_start:segment_start+len(segment)]
# we want file[self._offset:self._offset+self._size]
from allmydata.immutable.layout import make_write_bucket_proxy
from allmydata.util.observer import EventStreamObserver
-from common import COMPLETE, CORRUPT, DEAD, BADSEGNUM
+from .common import COMPLETE, CORRUPT, DEAD, BADSEGNUM
class LayoutInvalid(Exception):
level=log.NOISY, parent=self._lp, umid="BaL1zw")
self._do_loop()
# all exception cases call self._fail(), which clears self._alive
- except (BadHashError, NotEnoughHashesError, LayoutInvalid), e:
+ except (BadHashError, NotEnoughHashesError, LayoutInvalid) as e:
# Abandon this share. We do this if we see corruption in the
# offset table, the UEB, or a hash tree. We don't abandon the
# whole share if we see corruption in a data block (we abandon
share=repr(self),
level=log.UNUSUAL, parent=self._lp, umid="gWspVw")
self._fail(Failure(e), log.UNUSUAL)
- except DataUnavailable, e:
+ except DataUnavailable as e:
# Abandon this share.
log.msg(format="need data that will never be available"
" from %s: pending=%s, received=%s, unavailable=%s" %
try:
self._node.validate_and_store_UEB(UEB_s)
return True
- except (LayoutInvalid, BadHashError), e:
+ except (LayoutInvalid, BadHashError) as e:
# TODO: if this UEB was bad, we'll keep trying to validate it
# over and over again. Only log.err on the first one, or better
# yet skip all but the first
try:
self._node.process_share_hashes(share_hashes)
# adds to self._node.share_hash_tree
- except (BadHashError, NotEnoughHashesError), e:
+ except (BadHashError, NotEnoughHashesError) as e:
f = Failure(e)
self._signal_corruption(f, o["share_hashes"], hashlen)
self.had_corruption = True
# cannot validate)
try:
self._commonshare.process_block_hashes(block_hashes)
- except (BadHashError, NotEnoughHashesError), e:
+ except (BadHashError, NotEnoughHashesError) as e:
f = Failure(e)
hashnums = ",".join([str(n) for n in sorted(block_hashes.keys())])
log.msg(format="hash failure in block_hashes=(%(hashnums)s),"
# gotten them all
try:
self._node.process_ciphertext_hashes(hashes)
- except (BadHashError, NotEnoughHashesError), e:
+ except (BadHashError, NotEnoughHashesError) as e:
f = Failure(e)
hashnums = ",".join([str(n) for n in sorted(hashes.keys())])
log.msg(format="hash failure in ciphertext_hashes=(%(hashnums)s),"
# now clear our received data, to dodge the #1170 spans.py
# complexity bug
self._received = DataSpans()
- except (BadHashError, NotEnoughHashesError), e:
+ except (BadHashError, NotEnoughHashesError) as e:
# rats, we have a corrupt block. Notify our clients that they
# need to look elsewhere, and advise the server. Unlike
# corruption in other parts of the share, this doesn't cause us
import itertools
from zope.interface import implements
from allmydata.interfaces import IDownloadStatus
+import six
class ReadEvent:
def __init__(self, ev, ds):
def __init__(self, storage_index, size):
self.storage_index = storage_index
self.size = size
- self.counter = self.statusid_counter.next()
+ self.counter = six.advance_iterator(self.statusid_counter)
self.helper = False
self.first_timestamp = None
from allmydata.codec import CRSEncoder
from allmydata.interfaces import IEncoder, IStorageBucketWriter, \
IEncryptedUploadable, IUploadStatus, UploadUnhappinessError
+import six
"""
assert IStorageBucketWriter.providedBy(landlords[k])
self.landlords = landlords.copy()
assert isinstance(servermap, dict)
- for v in servermap.itervalues():
+ for v in six.itervalues(servermap):
assert isinstance(v, set)
self.servermap = servermap.copy()
d.addCallback(_got)
return d
- def _send_segment(self, (shares, shareids), segnum):
+ def _send_segment(self, xxx_todo_changeme, segnum):
# To generate the URI, we must generate the roothash, so we must
# generate all shares, even if we aren't actually giving them to
# anybody. This means that the set of shares we create will be equal
# to or larger than the set of landlords. If we have any landlord who
# *doesn't* have a share, that's an error.
+ (shares, shareids) = xxx_todo_changeme
_assert(set(self.landlords.keys()).issubset(set(shareids)),
shareids=shareids, landlords=self.landlords)
start = time.time()
(self,
self.segment_size*(segnum+1),
self.segment_size*self.num_segments,
- 100 * (segnum+1) / self.num_segments,
+ 100 * (segnum+1) // self.num_segments,
),
level=log.OPERATIONAL)
elapsed = time.time() - start
import binascii
import time
+from functools import reduce
now = time.time
from zope.interface import implements
from twisted.internet import defer
from pycryptopp.cipher.aes import AES
from cStringIO import StringIO
+import six
# this wants to live in storage, not here
# this.
def pretty_print_shnum_to_servers(s):
- return ', '.join([ "sh%s: %s" % (k, '+'.join([idlib.shortnodeid_b2a(x) for x in v])) for k, v in s.iteritems() ])
+ return ', '.join([ "sh%s: %s" % (k, '+'.join([idlib.shortnodeid_b2a(x) for x in v])) for k, v in six.iteritems(s) ])
class ServerTracker:
def __init__(self, server,
rref = self._server.get_rref()
return rref.callRemote("get_buckets", self.storage_index)
- def _got_reply(self, (alreadygot, buckets)):
+ def _got_reply(self, xxx_todo_changeme):
#log.msg("%s._got_reply(%s)" % (self, (alreadygot, buckets)))
+ (alreadygot, buckets) = xxx_todo_changeme
b = {}
- for sharenum, rref in buckets.iteritems():
+ for sharenum, rref in six.iteritems(buckets):
bp = self.wbp_class(rref, self._server, self.sharesize,
self.blocksize,
self.num_segments,
% (self, self._get_progress_message(),
pretty_print_shnum_to_servers(merged),
[', '.join([str_shareloc(k,v)
- for k,v in st.buckets.iteritems()])
+ for k,v in six.iteritems(st.buckets)])
for st in self.use_trackers],
pretty_print_shnum_to_servers(self.preexisting_shares))
self.log(msg, level=log.OPERATIONAL)
self.progress = [0.0, 0.0, 0.0]
self.active = True
self.results = None
- self.counter = self.statusid_counter.next()
+ self.counter = six.advance_iterator(self.statusid_counter)
self.started = time.time()
def get_started(self):
d.addCallback(_done)
return d
- def set_shareholders(self, (upload_trackers, already_serverids), encoder):
+ def set_shareholders(self, xxx_todo_changeme1, encoder):
"""
@param upload_trackers: a sequence of ServerTracker objects that
have agreed to hold some shares for us (the
serverids for servers that claim to already
have this share
"""
+ (upload_trackers, already_serverids) = xxx_todo_changeme1
msgtempl = "set_shareholders; upload_trackers is %s, already_serverids is %s"
values = ([', '.join([str_shareloc(k,v)
- for k,v in st.buckets.iteritems()])
+ for k,v in six.iteritems(st.buckets)])
for st in upload_trackers], already_serverids)
self.log(msgtempl % values, level=log.OPERATIONAL)
# record already-present shares in self._results
d.addCallback(self._contacted_helper)
return d
- def _contacted_helper(self, (helper_upload_results, upload_helper)):
+ def _contacted_helper(self, xxx_todo_changeme2):
+ (helper_upload_results, upload_helper) = xxx_todo_changeme2
now = time.time()
elapsed = now - self._time_contacting_helper_start
self._elapsed_time_contacting_helper = elapsed
from zope.interface import Interface
from foolscap.api import StringConstraint, TupleOf, SetOf, DictOf, Any, \
RemoteInterface, Referenceable
-from old import RIIntroducerSubscriberClient_v1
+from .old import RIIntroducerSubscriberClient_v1
FURL = StringConstraint(1000)
# old introducer protocol (v1):
+from __future__ import print_function
import os
import time
kgf = os.path.join(self.basedir, self.furl_file)
self.keygen_furl = self.tub.registerReference(self.key_generator, furlFile=kgf)
if display_furl:
- print 'key generator at:', self.keygen_furl
+ print('key generator at:', self.keygen_furl)
from allmydata.mutable.retrieve import Retrieve
from allmydata.mutable.checker import MutableChecker, MutableCheckAndRepairer
from allmydata.mutable.repairer import Repairer
+import six
class BackoffAgent:
return self
- def create_with_keys(self, (pubkey, privkey), contents,
+ def create_with_keys(self, xxx_todo_changeme2, contents,
version=SDMF_VERSION):
"""Call this to create a brand-new mutable file. It will create the
shares, find homes for them, and upload the initial contents (created
Deferred that fires (with the MutableFileNode instance you should
use) when it completes.
"""
+ (pubkey, privkey) = xxx_todo_changeme2
self._pubkey, self._privkey = pubkey, privkey
pubkey_s = self._pubkey.serialize()
privkey_s = self._privkey.serialize()
representing the best recoverable version of the file.
"""
d = self._get_version_from_servermap(MODE_READ, servermap, version)
- def _build_version((servermap, their_version)):
+ def _build_version(xxx_todo_changeme):
+ (servermap, their_version) = xxx_todo_changeme
assert their_version in servermap.recoverable_versions()
assert their_version in servermap.make_versionmap()
# get_mutable_version => write intent, so we require that the
# servermap is updated in MODE_WRITE
d = self._get_version_from_servermap(MODE_WRITE, servermap, version)
- def _build_version((servermap, smap_version)):
+ def _build_version(xxx_todo_changeme1):
# these should have been set by the servermap update.
+ (servermap, smap_version) = xxx_todo_changeme1
assert self._secret_holder
assert self._writekey
start_segments = {} # shnum -> start segment
end_segments = {} # shnum -> end segment
blockhashes = {} # shnum -> blockhash tree
- for (shnum, original_data) in update_data.iteritems():
+ for (shnum, original_data) in six.iteritems(update_data):
data = [d[1] for d in original_data if d[0] == self._version]
# data is [(blockhashes,start,end)..]
from twisted.python import failure
from twisted.internet import defer
from zope.interface import implements
+import six
+from six.moves import filter
# These strings describe the format of the packed structs they help process.
self._required_shares)
assert expected_segment_size == segment_size
- self._block_size = self._segment_size / self._required_shares
+ self._block_size = self._segment_size // self._required_shares
# This is meant to mimic how SDMF files were built before MDMF
# entered the picture: we generate each share in its entirety,
Add the share hash chain to the share.
"""
assert isinstance(sharehashes, dict)
- for h in sharehashes.itervalues():
+ for h in six.itervalues(sharehashes):
assert len(h) == HASH_SIZE
# serialize the sharehashes, then set them.
# and also because it provides a useful amount of bounds checking.
self._num_segments = mathutil.div_ceil(self._data_length,
self._segment_size)
- self._block_size = self._segment_size / self._required_shares
+ self._block_size = self._segment_size // self._required_shares
# We also calculate the share size, to help us with block
# constraints later.
tail_size = self._data_length % self._segment_size
else:
self._tail_block_size = mathutil.next_multiple(tail_size,
self._required_shares)
- self._tail_block_size /= self._required_shares
+ self._tail_block_size //= self._required_shares
# We already know where the sharedata starts; right after the end
# of the header (which is defined as the signable part + the offsets)
self._segment_size = segsize
self._data_length = datalen
- self._block_size = self._segment_size / self._required_shares
+ self._block_size = self._segment_size // self._required_shares
# We can upload empty files, and need to account for this fact
# so as to avoid zero-division and zero-modulo errors.
if datalen > 0:
else:
self._tail_block_size = mathutil.next_multiple(tail_size,
self._required_shares)
- self._tail_block_size /= self._required_shares
+ self._tail_block_size //= self._required_shares
return encoding_parameters
unpack_sdmf_checkstring, \
MDMFSlotWriteProxy, \
SDMFSlotWriteProxy
+import six
KiB = 1024
DEFAULT_MAX_SEGMENT_SIZE = 128 * KiB
self.size = None
self.status = "Not started"
self.progress = 0.0
- self.counter = self.statusid_counter.next()
+ self.counter = six.advance_iterator(self.statusid_counter)
self.started = time.time()
def add_per_server_time(self, server, elapsed):
# Our update process fetched these for us. We need to update
# them in place as publishing happens.
self.blockhashes = {} # (shnum, [blochashes])
- for (i, bht) in blockhashes.iteritems():
+ for (i, bht) in six.iteritems(blockhashes):
# We need to extract the leaves from our old hash tree.
old_segcount = mathutil.div_ceil(version[4],
version[3])
bht = dict(enumerate(bht))
h.set_hashes(bht)
leaves = h[h.get_leaf_index(0):]
- for j in xrange(self.num_segments - len(leaves)):
+ for j in range(self.num_segments - len(leaves)):
leaves.append(None)
assert len(leaves) >= self.num_segments
# This will eventually hold the block hash chain for each share
# that we publish. We define it this way so that empty publishes
# will still have something to write to the remote slot.
- self.blockhashes = dict([(i, []) for i in xrange(self.total_shares)])
- for i in xrange(self.total_shares):
+ self.blockhashes = dict([(i, []) for i in range(self.total_shares)])
+ for i in range(self.total_shares):
blocks = self.blockhashes[i]
- for j in xrange(self.num_segments):
+ for j in range(self.num_segments):
blocks.append(None)
self.sharehash_leaves = None # eventually [sharehashes]
self.sharehashes = {} # shnum -> [sharehash leaves necessary to
salt = os.urandom(16)
assert self._version == SDMF_VERSION
- for shnum, writers in self.writers.iteritems():
+ for shnum, writers in six.iteritems(self.writers):
for writer in writers:
writer.put_salt(salt)
results, salt = encoded_and_salt
shares, shareids = results
self._status.set_status("Pushing segment")
- for i in xrange(len(shares)):
+ for i in range(len(shares)):
sharedata = shares[i]
shareid = shareids[i]
if self._version == MDMF_VERSION:
def push_encprivkey(self):
encprivkey = self._encprivkey
self._status.set_status("Pushing encrypted private key")
- for shnum, writers in self.writers.iteritems():
+ for shnum, writers in six.iteritems(self.writers):
for writer in writers:
writer.put_encprivkey(encprivkey)
def push_blockhashes(self):
self.sharehash_leaves = [None] * len(self.blockhashes)
self._status.set_status("Building and pushing block hash tree")
- for shnum, blockhashes in self.blockhashes.iteritems():
+ for shnum, blockhashes in six.iteritems(self.blockhashes):
t = hashtree.HashTree(blockhashes)
self.blockhashes[shnum] = list(t)
# set the leaf for future use.
def push_sharehashes(self):
self._status.set_status("Building and pushing share hash chain")
share_hash_tree = hashtree.HashTree(self.sharehash_leaves)
- for shnum in xrange(len(self.sharehash_leaves)):
+ for shnum in range(len(self.sharehash_leaves)):
needed_indices = share_hash_tree.needed_hashes(shnum)
self.sharehashes[shnum] = dict( [ (i, share_hash_tree[i])
for i in needed_indices] )
# - Get the checkstring of the resulting layout; sign that.
# - Push the signature
self._status.set_status("Pushing root hashes and signature")
- for shnum in xrange(self.total_shares):
+ for shnum in range(self.total_shares):
writers = self.writers[shnum]
for writer in writers:
writer.put_root_hash(self.root_hash)
signable = self._get_some_writer().get_signable()
self.signature = self._privkey.sign(signable)
- for (shnum, writers) in self.writers.iteritems():
+ for (shnum, writers) in six.iteritems(self.writers):
for writer in writers:
writer.put_signature(self.signature)
self._status.timings['sign'] = time.time() - started
ds = []
verification_key = self._pubkey.serialize()
- for (shnum, writers) in self.writers.copy().iteritems():
+ for (shnum, writers) in six.iteritems(self.writers.copy()):
for writer in writers:
writer.put_verification_key(verification_key)
self.num_outstanding += 1
# TODO: Precompute this.
shares = []
- for shnum, writers in self.writers.iteritems():
+ for shnum, writers in six.iteritems(self.writers):
shares.extend([x.shnum for x in writers if x.server == server])
known_shnums = set(shares)
surprise_shares -= known_shnums
from allmydata.mutable.common import CorruptShareError, BadShareError, \
UncoordinatedWriteError
from allmydata.mutable.layout import MDMFSlotReadProxy
+import six
class RetrieveStatus:
implements(IRetrieveStatus)
self.size = None
self.status = "Not started"
self.progress = 0.0
- self.counter = self.statusid_counter.next()
+ self.counter = six.advance_iterator(self.statusid_counter)
self.started = time.time()
def get_started(self):
self._active_readers = [] # list of active readers for this dl.
self._block_hash_trees = {} # shnum => hashtree
- for i in xrange(self._total_shares):
+ for i in range(self._total_shares):
# So we don't have to do this later.
self._block_hash_trees[i] = hashtree.IncompleteHashTree(self._num_segments)
try:
bht.set_hashes(blockhashes)
except (hashtree.BadHashError, hashtree.NotEnoughHashesError, \
- IndexError), e:
+ IndexError) as e:
raise CorruptShareError(server,
reader.shnum,
"block hash tree failure: %s" % e)
try:
bht.set_hashes(leaves={segnum: blockhash})
except (hashtree.BadHashError, hashtree.NotEnoughHashesError, \
- IndexError), e:
+ IndexError) as e:
raise CorruptShareError(server,
reader.shnum,
"block hash tree failure: %s" % e)
self.share_hash_tree.set_hashes(hashes=sharehashes,
leaves={reader.shnum: bht[0]})
except (hashtree.BadHashError, hashtree.NotEnoughHashesError, \
- IndexError), e:
+ IndexError) as e:
raise CorruptShareError(server,
reader.shnum,
"corrupt hashes: %s" % e)
+from __future__ import print_function
import sys, time, copy
from zope.interface import implements
from allmydata.mutable.common import MODE_CHECK, MODE_ANYTHING, MODE_WRITE, \
MODE_READ, MODE_REPAIR, CorruptShareError
from allmydata.mutable.layout import SIGNED_PREFIX_LENGTH, MDMFSlotReadProxy
+import six
class UpdateStatus:
implements(IServermapUpdaterStatus)
self.mode = "?"
self.status = "Not started"
self.progress = 0.0
- self.counter = self.statusid_counter.next()
+ self.counter = six.advance_iterator(self.statusid_counter)
self.started = time.time()
self.finished = None
return (self._last_update_mode, self._last_update_time)
def dump(self, out=sys.stdout):
- print >>out, "servermap:"
+ print("servermap:", file=out)
for ( (server, shnum), (verinfo, timestamp) ) in self._known_shares.items():
(seqnum, root_hash, IV, segsize, datalength, k, N, prefix,
offsets_tuple) = verinfo
- print >>out, ("[%s]: sh#%d seq%d-%s %d-of-%d len%d" %
+ print(("[%s]: sh#%d seq%d-%s %d-of-%d len%d" %
(server.get_name(), shnum,
seqnum, base32.b2a(root_hash)[:4], k, N,
- datalength))
+ datalength)), file=out)
if self._problems:
- print >>out, "%d PROBLEMS" % len(self._problems)
+ print("%d PROBLEMS" % len(self._problems), file=out)
for f in self._problems:
- print >>out, str(f)
+ print(str(f), file=out)
return out
def all_servers(self):
+from __future__ import print_function
import datetime, os.path, re, types, ConfigParser, tempfile
from base64 import b32decode, b32encode
from allmydata.util.assertutil import precondition, _assert
from allmydata.util.fileutil import abspath_expanduser_unicode
from allmydata.util.encodingutil import get_filesystem_encoding, quote_output
+import six
# Add our application versions to the data that Foolscap's LogPublisher
# reports.
-for thing, things_version in get_package_versions().iteritems():
+for thing, things_version in six.iteritems(get_package_versions()):
app_versions.add_version(thing, str(things_version))
# group 1 will be addr (dotted quad string), group 3 if any will be portnum (string)
self.basedir = abspath_expanduser_unicode(unicode(basedir))
self._portnumfile = os.path.join(self.basedir, self.PORTNUMFILE)
self._tub_ready_observerlist = observer.OneShotObserverList()
- fileutil.make_dirs(os.path.join(self.basedir, "private"), 0700)
+ fileutil.make_dirs(os.path.join(self.basedir, "private"), 0o700)
open(os.path.join(self.basedir, "private", "README"), "w").write(PRIV_README)
# creates self.config
fn = os.path.join(self.basedir, name)
try:
fileutil.write(fn, value, mode)
- except EnvironmentError, e:
+ except EnvironmentError as e:
self.log("Unable to write config file '%s'" % fn)
self.log(e)
# need to send a pid to the foolscap log here.
twlog.msg("My pid: %s" % os.getpid())
try:
- os.chmod("twistd.pid", 0644)
+ os.chmod("twistd.pid", 0o644)
except EnvironmentError:
pass
# Delay until the reactor is running.
def _service_startup_failed(self, failure):
self.log('_startService() failed')
log.err(failure)
- print "Node._startService failed, aborting"
- print failure
+ print("Node._startService failed, aborting")
+ print(failure)
#reactor.stop() # for unknown reasons, reactor.stop() isn't working. [ ] TODO
self.log('calling os.abort()')
twlog.msg('calling os.abort()') # make sure it gets into twistd.log
- print "calling os.abort()"
+ print("calling os.abort()")
os.abort()
def stopService(self):
for o in twlog.theLogPublisher.observers:
# o might be a FileLogObserver's .emit method
if type(o) is type(self.setup_logging): # bound method
- ob = o.im_self
+ ob = o.__self__
if isinstance(ob, twlog.FileLogObserver):
newmeth = types.UnboundMethodType(formatTimeTahoeStyle, ob, ob.__class__)
ob.formatTime = newmeth
from allmydata.unknown import UnknownNode
from allmydata.blacklist import ProhibitedNode
from allmydata import uri
+import six
class NodeMaker:
def create_new_mutable_directory(self, initial_children={}, version=None):
# initial_children must have metadata (i.e. {} instead of None)
- for (name, (node, metadata)) in initial_children.iteritems():
+ for (name, (node, metadata)) in six.iteritems(initial_children):
precondition(isinstance(metadata, dict),
"create_new_mutable_directory requires metadata to be a dict, not None", metadata)
node.raise_error()
+from __future__ import print_function
from twisted.python import usage
from allmydata.scripts.common import BaseOptions
from allmydata.util.keyutil import make_keypair
out = options.stdout
privkey_vs, pubkey_vs = make_keypair()
- print >>out, "private:", privkey_vs
- print >>out, "public:", pubkey_vs
+ print("private:", privkey_vs, file=out)
+ print("public:", pubkey_vs, file=out)
class DerivePubkeyOptions(BaseOptions):
def parseArgs(self, privkey):
from allmydata.util import keyutil
privkey_vs = options.privkey
sk, pubkey_vs = keyutil.parse_privkey(privkey_vs)
- print >>out, "private:", privkey_vs
- print >>out, "public:", pubkey_vs
+ print("private:", privkey_vs, file=out)
+ print("public:", pubkey_vs, file=out)
return 0
class AdminCommand(BaseOptions):
+from __future__ import print_function
import os.path, sys, time, random, stat
must_create = not os.path.exists(dbfile)
try:
db = sqlite3.connect(dbfile)
- except (EnvironmentError, sqlite3.OperationalError), e:
- print >>stderr, "Unable to create/open backupdb file %s: %s" % (dbfile, e)
+ except (EnvironmentError, sqlite3.OperationalError) as e:
+ print("Unable to create/open backupdb file %s: %s" % (dbfile, e), file=stderr)
return None
c = db.cursor()
try:
c.execute("SELECT version FROM version")
version = c.fetchone()[0]
- except sqlite3.DatabaseError, e:
+ except sqlite3.DatabaseError as e:
# this indicates that the file is not a compatible database format.
# Perhaps it was created with an old version, or it might be junk.
- print >>stderr, "backupdb file is unusable: %s" % e
+ print("backupdb file is unusable: %s" % e, file=stderr)
return None
if just_create: # for tests
version = 2
if version == 2:
return BackupDB_v2(sqlite3, db)
- print >>stderr, "Unable to handle backupdb version %s" % version
+ print("Unable to handle backupdb version %s" % version, file=stderr)
return None
class FileResult:
+from __future__ import print_function
import os.path, re, fnmatch
from twisted.python import usage
from allmydata.scripts.common import get_aliases, get_default_nodedir, \
DEFAULT_ALIAS, BaseOptions
from allmydata.util.encodingutil import argv_to_unicode, argv_to_abspath, quote_output
+from allmydata.util.sixutil import map
NODEURL_RE=re.compile("http(s?)://([^:]*)(:([1-9][0-9]*))?")
# enough to have picked an empty file
pass
else:
- print >>options.stderr, "%s retrieved and written to %s" % \
- (options.from_file, options.to_file)
+ print("%s retrieved and written to %s" % \
+ (options.from_file, options.to_file), file=options.stderr)
return rc
def put(options):
+from __future__ import print_function
import os, sys, urllib
import codecs
self.msg = msg
def display(self, err):
- print >>err, self.msg
+ print(self.msg, file=err)
class UnknownAliasError(TahoeError):
+from __future__ import print_function
import os
from cStringIO import StringIO
try:
c.endheaders()
- except socket_error, err:
+ except socket_error as err:
return BadResponse(url, err)
while True:
def check_http_error(resp, stderr):
if resp.status < 200 or resp.status >= 300:
- print >>stderr, format_http_error("Error during HTTP request", resp)
+ print(format_http_error("Error during HTTP request", resp), file=stderr)
return 1
+from __future__ import print_function
import os, sys
from allmydata.scripts.common import BasedirOptions
if os.path.exists(basedir):
if listdir_unicode(basedir):
- print >>err, "The base directory %s is not empty." % quote_output(basedir)
- print >>err, "To avoid clobbering anything, I am going to quit now."
- print >>err, "Please use a different directory, or empty this one."
+ print("The base directory %s is not empty." % quote_output(basedir), file=err)
+ print("To avoid clobbering anything, I am going to quit now.", file=err)
+ print("Please use a different directory, or empty this one.", file=err)
return -1
# we're willing to use an empty directory
else:
c.close()
from allmydata.util import fileutil
- fileutil.make_dirs(os.path.join(basedir, "private"), 0700)
- print >>out, "Node created in %s" % quote_output(basedir)
+ fileutil.make_dirs(os.path.join(basedir, "private"), 0o700)
+ print("Node created in %s" % quote_output(basedir), file=out)
if not config.get("introducer", ""):
- print >>out, " Please set [client]introducer.furl= in tahoe.cfg!"
- print >>out, " The node cannot connect to a grid without it."
+ print(" Please set [client]introducer.furl= in tahoe.cfg!", file=out)
+ print(" The node cannot connect to a grid without it.", file=out)
if not config.get("nickname", ""):
- print >>out, " Please set [node]nickname= in tahoe.cfg"
+ print(" Please set [node]nickname= in tahoe.cfg", file=out)
return 0
def create_client(config, out=sys.stdout, err=sys.stderr):
if os.path.exists(basedir):
if listdir_unicode(basedir):
- print >>err, "The base directory %s is not empty." % quote_output(basedir)
- print >>err, "To avoid clobbering anything, I am going to quit now."
- print >>err, "Please use a different directory, or empty this one."
+ print("The base directory %s is not empty." % quote_output(basedir), file=err)
+ print("To avoid clobbering anything, I am going to quit now.", file=err)
+ print("Please use a different directory, or empty this one.", file=err)
return -1
# we're willing to use an empty directory
else:
write_node_config(c, config)
c.close()
- print >>out, "Introducer created in %s" % quote_output(basedir)
+ print("Introducer created in %s" % quote_output(basedir), file=out)
return 0
+from __future__ import print_function
# do not import any allmydata modules at this level. Do that from inside
# individual functions instead.
from twisted.scripts import trial as twisted_trial
from foolscap.logging import cli as foolscap_cli
from allmydata.scripts.common import BaseOptions
+from allmydata.util.sixutil import map
class DumpOptions(BaseOptions):
out = options.stdout
# check the version, to see if we have a mutable or immutable share
- print >>out, "share filename: %s" % quote_output(options['filename'])
+ print("share filename: %s" % quote_output(options['filename']), file=out)
f = open(options['filename'], "rb")
prefix = f.read(32)
if not options["leases-only"]:
dump_immutable_chk_share(f, out, options)
dump_immutable_lease_info(f, out)
- print >>out
+ print(file=out)
return 0
def dump_immutable_chk_share(f, out, options):
# use a ReadBucketProxy to parse the bucket and find the uri extension
bp = ReadBucketProxy(None, None, '')
offsets = bp._parse_offsets(f.read_share_data(0, 0x44))
- print >>out, "%20s: %d" % ("version", bp._version)
+ print("%20s: %d" % ("version", bp._version), file=out)
seek = offsets['uri_extension']
length = struct.unpack(bp._fieldstruct,
f.read_share_data(seek, bp._fieldsize))[0]
for k in keys1:
if k in unpacked:
dk = display_keys.get(k, k)
- print >>out, "%20s: %s" % (dk, unpacked[k])
- print >>out
+ print("%20s: %s" % (dk, unpacked[k]), file=out)
+ print(file=out)
for k in keys2:
if k in unpacked:
dk = display_keys.get(k, k)
- print >>out, "%20s: %s" % (dk, unpacked[k])
- print >>out
+ print("%20s: %s" % (dk, unpacked[k]), file=out)
+ print(file=out)
for k in keys3:
if k in unpacked:
dk = display_keys.get(k, k)
- print >>out, "%20s: %s" % (dk, unpacked[k])
+ print("%20s: %s" % (dk, unpacked[k]), file=out)
leftover = set(unpacked.keys()) - set(keys1 + keys2 + keys3)
if leftover:
- print >>out
- print >>out, "LEFTOVER:"
+ print(file=out)
+ print("LEFTOVER:", file=out)
for k in sorted(leftover):
- print >>out, "%20s: %s" % (k, unpacked[k])
+ print("%20s: %s" % (k, unpacked[k]), file=out)
# the storage index isn't stored in the share itself, so we depend upon
# knowing the parent directory name to get it
unpacked["needed_shares"],
unpacked["total_shares"], unpacked["size"])
verify_cap = u.to_string()
- print >>out, "%20s: %s" % ("verify-cap", quote_output(verify_cap, quotemarks=False))
+ print("%20s: %s" % ("verify-cap", quote_output(verify_cap, quotemarks=False)), file=out)
sizes = {}
sizes['data'] = (offsets['plaintext_hash_tree'] -
sizes['validation'] = (offsets['uri_extension'] -
offsets['plaintext_hash_tree'])
sizes['uri-extension'] = len(UEB_data)
- print >>out
- print >>out, " Size of data within the share:"
+ print(file=out)
+ print(" Size of data within the share:", file=out)
for k in sorted(sizes):
- print >>out, "%20s: %s" % (k, sizes[k])
+ print("%20s: %s" % (k, sizes[k]), file=out)
if options['offsets']:
- print >>out
- print >>out, " Section Offsets:"
- print >>out, "%20s: %s" % ("share data", f._data_offset)
+ print(file=out)
+ print(" Section Offsets:", file=out)
+ print("%20s: %s" % ("share data", f._data_offset), file=out)
for k in ["data", "plaintext_hash_tree", "crypttext_hash_tree",
"block_hashes", "share_hashes", "uri_extension"]:
name = {"data": "block data"}.get(k,k)
offset = f._data_offset + offsets[k]
- print >>out, " %20s: %s (0x%x)" % (name, offset, offset)
- print >>out, "%20s: %s" % ("leases", f._lease_offset)
+ print(" %20s: %s (0x%x)" % (name, offset, offset), file=out)
+ print("%20s: %s" % ("leases", f._lease_offset), file=out)
def dump_immutable_lease_info(f, out):
# display lease information too
- print >>out
+ print(file=out)
leases = list(f.get_leases())
if leases:
for i,lease in enumerate(leases):
when = format_expiration_time(lease.expiration_time)
- print >>out, " Lease #%d: owner=%d, expire in %s" \
- % (i, lease.owner_num, when)
+ print(" Lease #%d: owner=%d, expire in %s" \
+ % (i, lease.owner_num, when), file=out)
else:
- print >>out, " No leases."
+ print(" No leases.", file=out)
def format_expiration_time(expiration_time):
now = time.time()
share_type = "MDMF"
f.close()
- print >>out
- print >>out, "Mutable slot found:"
- print >>out, " share_type: %s" % share_type
- print >>out, " write_enabler: %s" % base32.b2a(WE)
- print >>out, " WE for nodeid: %s" % idlib.nodeid_b2a(nodeid)
- print >>out, " num_extra_leases: %d" % num_extra_leases
- print >>out, " container_size: %d" % container_size
- print >>out, " data_length: %d" % data_length
+ print(file=out)
+ print("Mutable slot found:", file=out)
+ print(" share_type: %s" % share_type, file=out)
+ print(" write_enabler: %s" % base32.b2a(WE), file=out)
+ print(" WE for nodeid: %s" % idlib.nodeid_b2a(nodeid), file=out)
+ print(" num_extra_leases: %d" % num_extra_leases, file=out)
+ print(" container_size: %d" % container_size, file=out)
+ print(" data_length: %d" % data_length, file=out)
if leases:
for (leasenum, lease) in leases:
- print >>out
- print >>out, " Lease #%d:" % leasenum
- print >>out, " ownerid: %d" % lease.owner_num
+ print(file=out)
+ print(" Lease #%d:" % leasenum, file=out)
+ print(" ownerid: %d" % lease.owner_num, file=out)
when = format_expiration_time(lease.expiration_time)
- print >>out, " expires in %s" % when
- print >>out, " renew_secret: %s" % base32.b2a(lease.renew_secret)
- print >>out, " cancel_secret: %s" % base32.b2a(lease.cancel_secret)
- print >>out, " secrets are for nodeid: %s" % idlib.nodeid_b2a(lease.nodeid)
+ print(" expires in %s" % when, file=out)
+ print(" renew_secret: %s" % base32.b2a(lease.renew_secret), file=out)
+ print(" cancel_secret: %s" % base32.b2a(lease.cancel_secret), file=out)
+ print(" secrets are for nodeid: %s" % idlib.nodeid_b2a(lease.nodeid), file=out)
else:
- print >>out, "No leases."
- print >>out
+ print("No leases.", file=out)
+ print(file=out)
if share_type == "SDMF":
dump_SDMF_share(m, data_length, options)
try:
pieces = unpack_share(data)
- except NeedMoreDataError, e:
+ except NeedMoreDataError as e:
# retry once with the larger size
size = e.needed_bytes
f = open(options['filename'], "rb")
(ig_version, ig_seqnum, ig_roothash, ig_IV, ig_k, ig_N, ig_segsize,
ig_datalen, offsets) = unpack_header(data)
- print >>out, " SDMF contents:"
- print >>out, " seqnum: %d" % seqnum
- print >>out, " root_hash: %s" % base32.b2a(root_hash)
- print >>out, " IV: %s" % base32.b2a(IV)
- print >>out, " required_shares: %d" % k
- print >>out, " total_shares: %d" % N
- print >>out, " segsize: %d" % segsize
- print >>out, " datalen: %d" % datalen
- print >>out, " enc_privkey: %d bytes" % len(enc_privkey)
- print >>out, " pubkey: %d bytes" % len(pubkey)
- print >>out, " signature: %d bytes" % len(signature)
+ print(" SDMF contents:", file=out)
+ print(" seqnum: %d" % seqnum, file=out)
+ print(" root_hash: %s" % base32.b2a(root_hash), file=out)
+ print(" IV: %s" % base32.b2a(IV), file=out)
+ print(" required_shares: %d" % k, file=out)
+ print(" total_shares: %d" % N, file=out)
+ print(" segsize: %d" % segsize, file=out)
+ print(" datalen: %d" % datalen, file=out)
+ print(" enc_privkey: %d bytes" % len(enc_privkey), file=out)
+ print(" pubkey: %d bytes" % len(pubkey), file=out)
+ print(" signature: %d bytes" % len(signature), file=out)
share_hash_ids = ",".join(sorted([str(hid)
for hid in share_hash_chain.keys()]))
- print >>out, " share_hash_chain: %s" % share_hash_ids
- print >>out, " block_hash_tree: %d nodes" % len(block_hash_tree)
+ print(" share_hash_chain: %s" % share_hash_ids, file=out)
+ print(" block_hash_tree: %d nodes" % len(block_hash_tree), file=out)
# the storage index isn't stored in the share itself, so we depend upon
# knowing the parent directory name to get it
fingerprint = hashutil.ssk_pubkey_fingerprint_hash(pubkey)
u = SSKVerifierURI(storage_index, fingerprint)
verify_cap = u.to_string()
- print >>out, " verify-cap:", quote_output(verify_cap, quotemarks=False)
+ print(" verify-cap:", quote_output(verify_cap, quotemarks=False), file=out)
if options['offsets']:
# NOTE: this offset-calculation code is fragile, and needs to be
# merged with MutableShareFile's internals.
- print >>out
- print >>out, " Section Offsets:"
+ print(file=out)
+ print(" Section Offsets:", file=out)
def printoffset(name, value, shift=0):
- print >>out, "%s%20s: %s (0x%x)" % (" "*shift, name, value, value)
+ print("%s%20s: %s (0x%x)" % (" "*shift, name, value, value), file=out)
printoffset("first lease", m.HEADER_SIZE)
printoffset("share data", m.DATA_OFFSET)
o_seqnum = m.DATA_OFFSET + struct.calcsize(">B")
printoffset("extra leases", m._read_extra_lease_offset(f) + 4)
f.close()
- print >>out
+ print(file=out)
def dump_MDMF_share(m, length, options):
from allmydata.mutable.layout import MDMFSlotReadProxy
(seqnum, root_hash, salt_to_use, segsize, datalen, k, N, prefix,
offsets) = verinfo
- print >>out, " MDMF contents:"
- print >>out, " seqnum: %d" % seqnum
- print >>out, " root_hash: %s" % base32.b2a(root_hash)
+ print(" MDMF contents:", file=out)
+ print(" seqnum: %d" % seqnum, file=out)
+ print(" root_hash: %s" % base32.b2a(root_hash), file=out)
#print >>out, " IV: %s" % base32.b2a(IV)
- print >>out, " required_shares: %d" % k
- print >>out, " total_shares: %d" % N
- print >>out, " segsize: %d" % segsize
- print >>out, " datalen: %d" % datalen
- print >>out, " enc_privkey: %d bytes" % len(encprivkey)
- print >>out, " pubkey: %d bytes" % len(pubkey)
- print >>out, " signature: %d bytes" % len(signature)
+ print(" required_shares: %d" % k, file=out)
+ print(" total_shares: %d" % N, file=out)
+ print(" segsize: %d" % segsize, file=out)
+ print(" datalen: %d" % datalen, file=out)
+ print(" enc_privkey: %d bytes" % len(encprivkey), file=out)
+ print(" pubkey: %d bytes" % len(pubkey), file=out)
+ print(" signature: %d bytes" % len(signature), file=out)
share_hash_ids = ",".join([str(hid)
for hid in sorted(share_hash_chain.keys())])
- print >>out, " share_hash_chain: %s" % share_hash_ids
- print >>out, " block_hash_tree: %d nodes" % len(block_hash_tree)
+ print(" share_hash_chain: %s" % share_hash_ids, file=out)
+ print(" block_hash_tree: %d nodes" % len(block_hash_tree), file=out)
# the storage index isn't stored in the share itself, so we depend upon
# knowing the parent directory name to get it
fingerprint = hashutil.ssk_pubkey_fingerprint_hash(pubkey)
u = MDMFVerifierURI(storage_index, fingerprint)
verify_cap = u.to_string()
- print >>out, " verify-cap:", quote_output(verify_cap, quotemarks=False)
+ print(" verify-cap:", quote_output(verify_cap, quotemarks=False), file=out)
if options['offsets']:
# NOTE: this offset-calculation code is fragile, and needs to be
# merged with MutableShareFile's internals.
- print >>out
- print >>out, " Section Offsets:"
+ print(file=out)
+ print(" Section Offsets:", file=out)
def printoffset(name, value, shift=0):
- print >>out, "%s%.20s: %s (0x%x)" % (" "*shift, name, value, value)
+ print("%s%.20s: %s (0x%x)" % (" "*shift, name, value, value), file=out)
printoffset("first lease", m.HEADER_SIZE, 2)
printoffset("share data", m.DATA_OFFSET, 2)
o_seqnum = m.DATA_OFFSET + struct.calcsize(">B")
printoffset("extra leases", m._read_extra_lease_offset(f) + 4, 2)
f.close()
- print >>out
+ print(file=out)
u = uri.from_string(cap)
- print >>out
+ print(file=out)
dump_uri_instance(u, nodeid, secret, out)
def _dump_secrets(storage_index, secret, nodeid, out):
if secret:
crs = hashutil.my_renewal_secret_hash(secret)
- print >>out, " client renewal secret:", base32.b2a(crs)
+ print(" client renewal secret:", base32.b2a(crs), file=out)
frs = hashutil.file_renewal_secret_hash(crs, storage_index)
- print >>out, " file renewal secret:", base32.b2a(frs)
+ print(" file renewal secret:", base32.b2a(frs), file=out)
if nodeid:
renew = hashutil.bucket_renewal_secret_hash(frs, nodeid)
- print >>out, " lease renewal secret:", base32.b2a(renew)
+ print(" lease renewal secret:", base32.b2a(renew), file=out)
ccs = hashutil.my_cancel_secret_hash(secret)
- print >>out, " client cancel secret:", base32.b2a(ccs)
+ print(" client cancel secret:", base32.b2a(ccs), file=out)
fcs = hashutil.file_cancel_secret_hash(ccs, storage_index)
- print >>out, " file cancel secret:", base32.b2a(fcs)
+ print(" file cancel secret:", base32.b2a(fcs), file=out)
if nodeid:
cancel = hashutil.bucket_cancel_secret_hash(fcs, nodeid)
- print >>out, " lease cancel secret:", base32.b2a(cancel)
+ print(" lease cancel secret:", base32.b2a(cancel), file=out)
def dump_uri_instance(u, nodeid, secret, out, show_header=True):
from allmydata import uri
if isinstance(u, uri.CHKFileURI):
if show_header:
- print >>out, "CHK File:"
- print >>out, " key:", base32.b2a(u.key)
- print >>out, " UEB hash:", base32.b2a(u.uri_extension_hash)
- print >>out, " size:", u.size
- print >>out, " k/N: %d/%d" % (u.needed_shares, u.total_shares)
- print >>out, " storage index:", si_b2a(u.get_storage_index())
+ print("CHK File:", file=out)
+ print(" key:", base32.b2a(u.key), file=out)
+ print(" UEB hash:", base32.b2a(u.uri_extension_hash), file=out)
+ print(" size:", u.size, file=out)
+ print(" k/N: %d/%d" % (u.needed_shares, u.total_shares), file=out)
+ print(" storage index:", si_b2a(u.get_storage_index()), file=out)
_dump_secrets(u.get_storage_index(), secret, nodeid, out)
elif isinstance(u, uri.CHKFileVerifierURI):
if show_header:
- print >>out, "CHK Verifier URI:"
- print >>out, " UEB hash:", base32.b2a(u.uri_extension_hash)
- print >>out, " size:", u.size
- print >>out, " k/N: %d/%d" % (u.needed_shares, u.total_shares)
- print >>out, " storage index:", si_b2a(u.get_storage_index())
+ print("CHK Verifier URI:", file=out)
+ print(" UEB hash:", base32.b2a(u.uri_extension_hash), file=out)
+ print(" size:", u.size, file=out)
+ print(" k/N: %d/%d" % (u.needed_shares, u.total_shares), file=out)
+ print(" storage index:", si_b2a(u.get_storage_index()), file=out)
elif isinstance(u, uri.LiteralFileURI):
if show_header:
- print >>out, "Literal File URI:"
- print >>out, " data:", quote_output(u.data)
+ print("Literal File URI:", file=out)
+ print(" data:", quote_output(u.data), file=out)
elif isinstance(u, uri.WriteableSSKFileURI): # SDMF
if show_header:
- print >>out, "SDMF Writeable URI:"
- print >>out, " writekey:", base32.b2a(u.writekey)
- print >>out, " readkey:", base32.b2a(u.readkey)
- print >>out, " storage index:", si_b2a(u.get_storage_index())
- print >>out, " fingerprint:", base32.b2a(u.fingerprint)
- print >>out
+ print("SDMF Writeable URI:", file=out)
+ print(" writekey:", base32.b2a(u.writekey), file=out)
+ print(" readkey:", base32.b2a(u.readkey), file=out)
+ print(" storage index:", si_b2a(u.get_storage_index()), file=out)
+ print(" fingerprint:", base32.b2a(u.fingerprint), file=out)
+ print(file=out)
if nodeid:
we = hashutil.ssk_write_enabler_hash(u.writekey, nodeid)
- print >>out, " write_enabler:", base32.b2a(we)
- print >>out
+ print(" write_enabler:", base32.b2a(we), file=out)
+ print(file=out)
_dump_secrets(u.get_storage_index(), secret, nodeid, out)
elif isinstance(u, uri.ReadonlySSKFileURI):
if show_header:
- print >>out, "SDMF Read-only URI:"
- print >>out, " readkey:", base32.b2a(u.readkey)
- print >>out, " storage index:", si_b2a(u.get_storage_index())
- print >>out, " fingerprint:", base32.b2a(u.fingerprint)
+ print("SDMF Read-only URI:", file=out)
+ print(" readkey:", base32.b2a(u.readkey), file=out)
+ print(" storage index:", si_b2a(u.get_storage_index()), file=out)
+ print(" fingerprint:", base32.b2a(u.fingerprint), file=out)
elif isinstance(u, uri.SSKVerifierURI):
if show_header:
- print >>out, "SDMF Verifier URI:"
- print >>out, " storage index:", si_b2a(u.get_storage_index())
- print >>out, " fingerprint:", base32.b2a(u.fingerprint)
+ print("SDMF Verifier URI:", file=out)
+ print(" storage index:", si_b2a(u.get_storage_index()), file=out)
+ print(" fingerprint:", base32.b2a(u.fingerprint), file=out)
elif isinstance(u, uri.WriteableMDMFFileURI): # MDMF
if show_header:
- print >>out, "MDMF Writeable URI:"
- print >>out, " writekey:", base32.b2a(u.writekey)
- print >>out, " readkey:", base32.b2a(u.readkey)
- print >>out, " storage index:", si_b2a(u.get_storage_index())
- print >>out, " fingerprint:", base32.b2a(u.fingerprint)
- print >>out
+ print("MDMF Writeable URI:", file=out)
+ print(" writekey:", base32.b2a(u.writekey), file=out)
+ print(" readkey:", base32.b2a(u.readkey), file=out)
+ print(" storage index:", si_b2a(u.get_storage_index()), file=out)
+ print(" fingerprint:", base32.b2a(u.fingerprint), file=out)
+ print(file=out)
if nodeid:
we = hashutil.ssk_write_enabler_hash(u.writekey, nodeid)
- print >>out, " write_enabler:", base32.b2a(we)
- print >>out
+ print(" write_enabler:", base32.b2a(we), file=out)
+ print(file=out)
_dump_secrets(u.get_storage_index(), secret, nodeid, out)
elif isinstance(u, uri.ReadonlyMDMFFileURI):
if show_header:
- print >>out, "MDMF Read-only URI:"
- print >>out, " readkey:", base32.b2a(u.readkey)
- print >>out, " storage index:", si_b2a(u.get_storage_index())
- print >>out, " fingerprint:", base32.b2a(u.fingerprint)
+ print("MDMF Read-only URI:", file=out)
+ print(" readkey:", base32.b2a(u.readkey), file=out)
+ print(" storage index:", si_b2a(u.get_storage_index()), file=out)
+ print(" fingerprint:", base32.b2a(u.fingerprint), file=out)
elif isinstance(u, uri.MDMFVerifierURI):
if show_header:
- print >>out, "MDMF Verifier URI:"
- print >>out, " storage index:", si_b2a(u.get_storage_index())
- print >>out, " fingerprint:", base32.b2a(u.fingerprint)
+ print("MDMF Verifier URI:", file=out)
+ print(" storage index:", si_b2a(u.get_storage_index()), file=out)
+ print(" fingerprint:", base32.b2a(u.fingerprint), file=out)
elif isinstance(u, uri.ImmutableDirectoryURI): # CHK-based directory
if show_header:
- print >>out, "CHK Directory URI:"
+ print("CHK Directory URI:", file=out)
dump_uri_instance(u._filenode_uri, nodeid, secret, out, False)
elif isinstance(u, uri.ImmutableDirectoryURIVerifier):
if show_header:
- print >>out, "CHK Directory Verifier URI:"
+ print("CHK Directory Verifier URI:", file=out)
dump_uri_instance(u._filenode_uri, nodeid, secret, out, False)
elif isinstance(u, uri.DirectoryURI): # SDMF-based directory
if show_header:
- print >>out, "Directory Writeable URI:"
+ print("Directory Writeable URI:", file=out)
dump_uri_instance(u._filenode_uri, nodeid, secret, out, False)
elif isinstance(u, uri.ReadonlyDirectoryURI):
if show_header:
- print >>out, "Directory Read-only URI:"
+ print("Directory Read-only URI:", file=out)
dump_uri_instance(u._filenode_uri, nodeid, secret, out, False)
elif isinstance(u, uri.DirectoryURIVerifier):
if show_header:
- print >>out, "Directory Verifier URI:"
+ print("Directory Verifier URI:", file=out)
dump_uri_instance(u._filenode_uri, nodeid, secret, out, False)
elif isinstance(u, uri.MDMFDirectoryURI): # MDMF-based directory
if show_header:
- print >>out, "Directory Writeable URI:"
+ print("Directory Writeable URI:", file=out)
dump_uri_instance(u._filenode_uri, nodeid, secret, out, False)
elif isinstance(u, uri.ReadonlyMDMFDirectoryURI):
if show_header:
- print >>out, "Directory Read-only URI:"
+ print("Directory Read-only URI:", file=out)
dump_uri_instance(u._filenode_uri, nodeid, secret, out, False)
elif isinstance(u, uri.MDMFDirectoryURIVerifier):
if show_header:
- print >>out, "Directory Verifier URI:"
+ print("Directory Verifier URI:", file=out)
dump_uri_instance(u._filenode_uri, nodeid, secret, out, False)
else:
- print >>out, "unknown cap type"
+ print("unknown cap type", file=out)
class FindSharesOptions(BaseOptions):
def getSynopsis(self):
d = os.path.join(d, "storage/shares", sharedir)
if os.path.exists(d):
for shnum in listdir_unicode(d):
- print >>out, os.path.join(d, shnum)
+ print(os.path.join(d, shnum), file=out)
return 0
try:
pieces = unpack_share(data)
- except NeedMoreDataError, e:
+ except NeedMoreDataError as e:
# retry once with the larger size
size = e.needed_bytes
f.seek(m.DATA_OFFSET)
pubkey, signature, share_hash_chain, block_hash_tree,
share_data, enc_privkey) = pieces
- print >>out, "SDMF %s %d/%d %d #%d:%s %d %s" % \
+ print("SDMF %s %d/%d %d #%d:%s %d %s" % \
(si_s, k, N, datalen,
seqnum, base32.b2a(root_hash),
- expiration, quote_output(abs_sharefile))
+ expiration, quote_output(abs_sharefile)), file=out)
elif share_type == "MDMF":
from allmydata.mutable.layout import MDMFSlotReadProxy
fake_shnum = 0
verinfo = extract(p.get_verinfo)
(seqnum, root_hash, salt_to_use, segsize, datalen, k, N, prefix,
offsets) = verinfo
- print >>out, "MDMF %s %d/%d %d #%d:%s %d %s" % \
+ print("MDMF %s %d/%d %d #%d:%s %d %s" % \
(si_s, k, N, datalen,
seqnum, base32.b2a(root_hash),
- expiration, quote_output(abs_sharefile))
+ expiration, quote_output(abs_sharefile)), file=out)
else:
- print >>out, "UNKNOWN mutable %s" % quote_output(abs_sharefile)
+ print("UNKNOWN mutable %s" % quote_output(abs_sharefile), file=out)
elif struct.unpack(">L", prefix[:4]) == (1,):
# immutable
filesize = unpacked["size"]
ueb_hash = unpacked["UEB_hash"]
- print >>out, "CHK %s %d/%d %d %s %d %s" % (si_s, k, N, filesize,
+ print("CHK %s %d/%d %d %s %d %s" % (si_s, k, N, filesize,
ueb_hash, expiration,
- quote_output(abs_sharefile))
+ quote_output(abs_sharefile)), file=out)
else:
- print >>out, "UNKNOWN really-unknown %s" % quote_output(abs_sharefile)
+ print("UNKNOWN really-unknown %s" % quote_output(abs_sharefile), file=out)
f.close()
si_dir = os.path.join(abbrevdir, si_s)
catalog_shares_one_abbrevdir(si_s, si_dir, now, out,err)
except:
- print >>err, "Error processing %s" % quote_output(abbrevdir)
+ print("Error processing %s" % quote_output(abbrevdir), file=err)
failure.Failure().printTraceback(err)
return 0
describe_share(abs_sharefile, si_s, shnum_s, now,
out)
except:
- print >>err, "Error processing %s" % quote_output(abs_sharefile)
+ print("Error processing %s" % quote_output(abs_sharefile), file=err)
failure.Failure().printTraceback(err)
except:
- print >>err, "Error processing %s" % quote_output(si_dir)
+ print("Error processing %s" % quote_output(si_dir), file=err)
failure.Failure().printTraceback(err)
class CorruptShareOptions(BaseOptions):
def flip_bit(start, end):
offset = random.randrange(start, end)
bit = random.randrange(0, 8)
- print >>out, "[%d..%d): %d.b%d" % (start, end, offset, bit)
+ print("[%d..%d): %d.b%d" % (start, end, offset, bit), file=out)
f = open(fn, "rb+")
f.seek(offset)
d = f.read(1)
twisted_trial.run()
-def fixOptionsClass( (subcmd, shortcut, OptionsClass, desc) ):
+def fixOptionsClass(xxx_todo_changeme ):
+ (subcmd, shortcut, OptionsClass, desc) = xxx_todo_changeme
class FixedOptionsClass(OptionsClass):
def getSynopsis(self):
t = OptionsClass.getSynopsis(self)
return t
def opt_help(self):
- print str(self)
+ print(str(self))
sys.exit(0)
def flogtool(config):
+from __future__ import print_function
import os, sys
from allmydata.scripts.common import BasedirOptions
if os.path.exists(basedir):
if listdir_unicode(basedir):
- print >>err, "The base directory %s is not empty." % quote_output(basedir)
- print >>err, "To avoid clobbering anything, I am going to quit now."
- print >>err, "Please use a different directory, or empty this one."
+ print("The base directory %s is not empty." % quote_output(basedir), file=err)
+ print("To avoid clobbering anything, I am going to quit now.", file=err)
+ print("Please use a different directory, or empty this one.", file=err)
return -1
# we're willing to use an empty directory
else:
+from __future__ import print_function
import os, sys
from cStringIO import StringIO
def opt_version(self):
import allmydata
- print >>self.stdout, allmydata.get_package_versions_string(debug=True)
+ print(allmydata.get_package_versions_string(debug=True), file=self.stdout)
self.no_command_needed = True
def opt_version_and_path(self):
import allmydata
- print >>self.stdout, allmydata.get_package_versions_string(show_paths=True, debug=True)
+ print(allmydata.get_package_versions_string(show_paths=True, debug=True), file=self.stdout)
self.no_command_needed = True
def getSynopsis(self):
try:
config.parseOptions(argv)
- except usage.error, e:
+ except usage.error as e:
if not run_by_human:
raise
c = config
while hasattr(c, 'subOptions'):
c = c.subOptions
- print >>stdout, str(c)
+ print(str(c), file=stdout)
try:
msg = e.args[0].decode(get_io_encoding())
except Exception:
msg = repr(e)
- print >>stdout, "%s: %s\n" % (sys.argv[0], quote_output(msg, quotemarks=False))
+ print("%s: %s\n" % (sys.argv[0], quote_output(msg, quotemarks=False)), file=stdout)
return 1
command = config.subCommand
+from __future__ import print_function
import os, time
from allmydata.scripts.common import get_alias, DEFAULT_ALIAS, escape_path, \
where = options.where
try:
rootcap, path = get_alias(options.aliases, where, DEFAULT_ALIAS)
- except UnknownAliasError, e:
+ except UnknownAliasError as e:
e.display(stderr)
return 1
if path == '/':
url = self.make_url(url, ophandle)
resp = do_http("POST", url)
if resp.status not in (200, 302):
- print >>stderr, format_http_error("ERROR", resp)
+ print(format_http_error("ERROR", resp), file=stderr)
return 1
# now we poll for results. We nominally poll at t=1, 5, 10, 30, 60,
# 90, k*120 seconds, but if the poll takes non-zero time, that will
stderr = self.options.stderr
resp = do_http("GET", url)
if resp.status != 200:
- print >>stderr, format_http_error("ERROR", resp)
+ print(format_http_error("ERROR", resp), file=stderr)
return True
jdata = resp.read()
data = simplejson.loads(jdata)
return False
if self.options.get("raw"):
if is_printable_ascii(jdata):
- print >>stdout, jdata
+ print(jdata, file=stdout)
else:
- print >>stderr, "The JSON response contained unprintable characters:\n%s" % quote_output(jdata)
+ print("The JSON response contained unprintable characters:\n%s" % quote_output(jdata), file=stderr)
return True
self.write_results(data)
return True
+from __future__ import print_function
import os, sys, signal, time
from allmydata.scripts.common import BasedirOptions
def start(opts, out=sys.stdout, err=sys.stderr):
basedir = opts['basedir']
- print >>out, "STARTING", quote_output(basedir)
+ print("STARTING", quote_output(basedir), file=out)
if not os.path.isdir(basedir):
- print >>err, "%s does not look like a directory at all" % quote_output(basedir)
+ print("%s does not look like a directory at all" % quote_output(basedir), file=err)
return 1
for fn in listdir_unicode(basedir):
if fn.endswith(u".tac"):
tac = str(fn)
break
else:
- print >>err, "%s does not look like a node directory (no .tac file)" % quote_output(basedir)
+ print("%s does not look like a node directory (no .tac file)" % quote_output(basedir), file=err)
return 1
if "client" in tac:
nodetype = "client"
def stop(config, out=sys.stdout, err=sys.stderr):
basedir = config['basedir']
- print >>out, "STOPPING", quote_output(basedir)
+ print("STOPPING", quote_output(basedir), file=out)
pidfile = os.path.join(basedir, "twistd.pid")
if not os.path.exists(pidfile):
- print >>err, "%s does not look like a running node directory (no twistd.pid)" % quote_output(basedir)
+ print("%s does not look like a running node directory (no twistd.pid)" % quote_output(basedir), file=err)
# we define rc=2 to mean "nothing is running, but it wasn't me who
# stopped it"
return 2
# the user but keep waiting until they give up.
try:
os.kill(pid, signal.SIGKILL)
- except OSError, oserr:
+ except OSError as oserr:
if oserr.errno == 3:
- print oserr.strerror
+ print(oserr.strerror)
# the process didn't exist, so wipe the pid file
os.remove(pidfile)
return 2
try:
os.kill(pid, 0)
except OSError:
- print >>out, "process %d is dead" % pid
+ print("process %d is dead" % pid, file=out)
return
wait -= 1
if wait < 0:
if first_time:
- print >>err, ("It looks like pid %d is still running "
+ print(("It looks like pid %d is still running "
"after %d seconds" % (pid,
- (time.time() - start)))
- print >>err, "I will keep watching it until you interrupt me."
+ (time.time() - start))), file=err)
+ print("I will keep watching it until you interrupt me.", file=err)
wait = 10
first_time = False
else:
- print >>err, "pid %d still running after %d seconds" % \
- (pid, (time.time() - start))
+ print("pid %d still running after %d seconds" % \
+ (pid, (time.time() - start)), file=err)
wait = 10
time.sleep(1)
# we define rc=1 to mean "I think something is still running, sorry"
def restart(config, stdout, stderr):
rc = stop(config, stdout, stderr)
if rc == 2:
- print >>stderr, "ignoring couldn't-stop"
+ print("ignoring couldn't-stop", file=stderr)
rc = 0
if rc:
- print >>stderr, "not restarting"
+ print("not restarting", file=stderr)
return rc
return start(config, stdout, stderr)
precondition(isinstance(basedir, unicode), basedir)
if not os.path.isdir(basedir):
- print >>stderr, "%s does not look like a directory at all" % quote_output(basedir)
+ print("%s does not look like a directory at all" % quote_output(basedir), file=stderr)
return 1
for fn in listdir_unicode(basedir):
if fn.endswith(u".tac"):
tac = str(fn)
break
else:
- print >>stderr, "%s does not look like a node directory (no .tac file)" % quote_output(basedir)
+ print("%s does not look like a node directory (no .tac file)" % quote_output(basedir), file=stderr)
return 1
if "client" not in tac:
- print >>stderr, ("%s looks like it contains a non-client node (%s).\n"
+ print(("%s looks like it contains a non-client node (%s).\n"
"Use 'tahoe start' instead of 'tahoe run'."
- % (quote_output(basedir), tac))
+ % (quote_output(basedir), tac)), file=stderr)
return 1
os.chdir(basedir)
+from __future__ import print_function
import os, sys
from allmydata.scripts.common import BasedirOptions
if os.path.exists(basedir):
if listdir_unicode(basedir):
- print >>err, "The base directory %s is not empty." % quote_output(basedir)
- print >>err, "To avoid clobbering anything, I am going to quit now."
- print >>err, "Please use a different directory, or empty this one."
+ print("The base directory %s is not empty." % quote_output(basedir), file=err)
+ print("To avoid clobbering anything, I am going to quit now.", file=err)
+ print("Please use a different directory, or empty this one.", file=err)
return -1
# we're willing to use an empty directory
else:
+from __future__ import print_function
import os.path
import codecs
stderr = options.stderr
if u":" in alias:
# a single trailing colon will already have been stripped if present
- print >>stderr, "Alias names cannot contain colons."
+ print("Alias names cannot contain colons.", file=stderr)
return 1
if u" " in alias:
- print >>stderr, "Alias names cannot contain spaces."
+ print("Alias names cannot contain spaces.", file=stderr)
return 1
old_aliases = get_aliases(nodedir)
if alias in old_aliases:
- print >>stderr, "Alias %s already exists!" % quote_output(alias)
+ print("Alias %s already exists!" % quote_output(alias), file=stderr)
return 1
aliasfile = os.path.join(nodedir, "private", "aliases")
cap = uri.from_string_dirnode(cap).to_string()
add_line_to_aliasfile(aliasfile, alias, cap)
- print >>stdout, "Alias %s added" % quote_output(alias)
+ print("Alias %s added" % quote_output(alias), file=stdout)
return 0
def create_alias(options):
stderr = options.stderr
if u":" in alias:
# a single trailing colon will already have been stripped if present
- print >>stderr, "Alias names cannot contain colons."
+ print("Alias names cannot contain colons.", file=stderr)
return 1
if u" " in alias:
- print >>stderr, "Alias names cannot contain spaces."
+ print("Alias names cannot contain spaces.", file=stderr)
return 1
old_aliases = get_aliases(nodedir)
if alias in old_aliases:
- print >>stderr, "Alias %s already exists!" % quote_output(alias)
+ print("Alias %s already exists!" % quote_output(alias), file=stderr)
return 1
aliasfile = os.path.join(nodedir, "private", "aliases")
add_line_to_aliasfile(aliasfile, alias, new_uri)
- print >>stdout, "Alias %s created" % (quote_output(alias),)
+ print("Alias %s created" % (quote_output(alias),), file=stdout)
return 0
def list_aliases(options):
rc = 0
for name in alias_names:
try:
- print >>stdout, fmt % (unicode_to_output(name), unicode_to_output(aliases[name].decode('utf-8')))
+ print(fmt % (unicode_to_output(name), unicode_to_output(aliases[name].decode('utf-8'))), file=stdout)
except (UnicodeEncodeError, UnicodeDecodeError):
- print >>stderr, fmt % (quote_output(name), quote_output(aliases[name]))
+ print(fmt % (quote_output(name), quote_output(aliases[name])), file=stderr)
rc = 1
if rc == 1:
- print >>stderr, "\nThis listing included aliases or caps that could not be converted to the terminal" \
- "\noutput encoding. These are shown using backslash escapes and in quotes."
+ print("\nThis listing included aliases or caps that could not be converted to the terminal" \
+ "\noutput encoding. These are shown using backslash escapes and in quotes.", file=stderr)
return rc
+from __future__ import print_function
import os.path
import time
bdbfile = abspath_expanduser_unicode(bdbfile)
self.backupdb = backupdb.get_backupdb(bdbfile, stderr)
if not self.backupdb:
- print >>stderr, "ERROR: Unable to load backup db."
+ print("ERROR: Unable to load backup db.", file=stderr)
return 1
try:
rootcap, path = get_alias(options.aliases, options.to_dir, DEFAULT_ALIAS)
- except UnknownAliasError, e:
+ except UnknownAliasError as e:
e.display(stderr)
return 1
to_url = nodeurl + "uri/%s/" % urllib.quote(rootcap)
if resp.status == 404:
resp = do_http("POST", archives_url + "?t=mkdir")
if resp.status != 200:
- print >>stderr, format_http_error("Unable to create target directory", resp)
+ print(format_http_error("Unable to create target directory", resp), file=stderr)
return 1
# second step: process the tree
elapsed_time = str(end_timestamp - start_timestamp).split('.')[0]
if self.verbosity >= 1:
- print >>stdout, (" %d files uploaded (%d reused), "
+ print((" %d files uploaded (%d reused), "
"%d files skipped, "
"%d directories created (%d reused), "
"%d directories skipped"
self.files_skipped,
self.directories_created,
self.directories_reused,
- self.directories_skipped))
+ self.directories_skipped)), file=stdout)
if self.verbosity >= 2:
- print >>stdout, (" %d files checked, %d directories checked"
+ print((" %d files checked, %d directories checked"
% (self.files_checked,
- self.directories_checked))
- print >>stdout, " backup done, elapsed time: %s" % elapsed_time
+ self.directories_checked)), file=stdout)
+ print(" backup done, elapsed time: %s" % elapsed_time, file=stdout)
# The command exits with code 2 if files or directories were skipped
if self.files_skipped or self.directories_skipped:
def verboseprint(self, msg):
precondition(isinstance(msg, str), msg)
if self.verbosity >= 2:
- print >>self.options.stdout, msg
+ print(msg, file=self.options.stdout)
def warn(self, msg):
precondition(isinstance(msg, str), msg)
- print >>self.options.stderr, msg
+ print(msg, file=self.options.stderr)
def process(self, localpath):
precondition(isinstance(localpath, unicode), localpath)
+from __future__ import print_function
import urllib
import simplejson
nodeurl += "/"
try:
rootcap, path = get_alias(options.aliases, where, DEFAULT_ALIAS)
- except UnknownAliasError, e:
+ except UnknownAliasError as e:
e.display(stderr)
return 1
if path == '/':
resp = do_http("POST", url)
if resp.status != 200:
- print >>stderr, format_http_error("ERROR", resp)
+ print(format_http_error("ERROR", resp), file=stderr)
return 1
jdata = resp.read()
if options.get("raw"):
def lineReceived(self, line):
if self.in_error:
- print >>self.stderr, quote_output(line, quotemarks=False)
+ print(quote_output(line, quotemarks=False), file=self.stderr)
return
if line.startswith("ERROR:"):
self.in_error = True
self.streamer.rc = 1
- print >>self.stderr, quote_output(line, quotemarks=False)
+ print(quote_output(line, quotemarks=False), file=self.stderr)
return
d = simplejson.loads(line)
self.num_objects += 1
# non-verbose means print a progress marker every 100 files
if self.num_objects % 100 == 0:
- print >>stdout, "%d objects checked.." % self.num_objects
+ print("%d objects checked.." % self.num_objects, file=stdout)
cr = d["check-results"]
if cr["results"]["healthy"]:
self.files_healthy += 1
# LIT files and directories do not have a "summary" field.
summary = cr.get("summary", "Healthy (LIT)")
- print >>stdout, "%s: %s" % (quote_path(path), quote_output(summary, quotemarks=False))
+ print("%s: %s" % (quote_path(path), quote_output(summary, quotemarks=False)), file=stdout)
# always print out corrupt shares
for shareloc in cr["results"].get("list-corrupt-shares", []):
(serverid, storage_index, sharenum) = shareloc
- print >>stdout, " corrupt: %s" % _quote_serverid_index_share(serverid, storage_index, sharenum)
+ print(" corrupt: %s" % _quote_serverid_index_share(serverid, storage_index, sharenum), file=stdout)
def done(self):
if self.in_error:
return
stdout = self.stdout
- print >>stdout, "done: %d objects checked, %d healthy, %d unhealthy" \
- % (self.num_objects, self.files_healthy, self.files_unhealthy)
+ print("done: %d objects checked, %d healthy, %d unhealthy" \
+ % (self.num_objects, self.files_healthy, self.files_unhealthy), file=stdout)
class DeepCheckAndRepairOutput(LineOnlyReceiver):
delimiter = "\n"
def lineReceived(self, line):
if self.in_error:
- print >>self.stderr, quote_output(line, quotemarks=False)
+ print(quote_output(line, quotemarks=False), file=self.stderr)
return
if line.startswith("ERROR:"):
self.in_error = True
self.streamer.rc = 1
- print >>self.stderr, quote_output(line, quotemarks=False)
+ print(quote_output(line, quotemarks=False), file=self.stderr)
return
d = simplejson.loads(line)
self.num_objects += 1
# non-verbose means print a progress marker every 100 files
if self.num_objects % 100 == 0:
- print >>stdout, "%d objects checked.." % self.num_objects
+ print("%d objects checked.." % self.num_objects, file=stdout)
crr = d["check-and-repair-results"]
if d["storage-index"]:
if crr["pre-repair-results"]["results"]["healthy"]:
summary = "healthy"
else:
summary = "not healthy"
- print >>stdout, "%s: %s" % (quote_path(path), summary)
+ print("%s: %s" % (quote_path(path), summary), file=stdout)
# always print out corrupt shares
prr = crr.get("pre-repair-results", {})
for shareloc in prr.get("results", {}).get("list-corrupt-shares", []):
(serverid, storage_index, sharenum) = shareloc
- print >>stdout, " corrupt: %s" % _quote_serverid_index_share(serverid, storage_index, sharenum)
+ print(" corrupt: %s" % _quote_serverid_index_share(serverid, storage_index, sharenum), file=stdout)
# always print out repairs
if crr["repair-attempted"]:
if crr["repair-successful"]:
- print >>stdout, " repair successful"
+ print(" repair successful", file=stdout)
else:
- print >>stdout, " repair failed"
+ print(" repair failed", file=stdout)
def done(self):
if self.in_error:
return
stdout = self.stdout
- print >>stdout, "done: %d objects checked" % self.num_objects
- print >>stdout, " pre-repair: %d healthy, %d unhealthy" \
+ print("done: %d objects checked" % self.num_objects, file=stdout)
+ print(" pre-repair: %d healthy, %d unhealthy" \
% (self.pre_repair_files_healthy,
- self.pre_repair_files_unhealthy)
- print >>stdout, " %d repairs attempted, %d successful, %d failed" \
+ self.pre_repair_files_unhealthy), file=stdout)
+ print(" %d repairs attempted, %d successful, %d failed" \
% (self.repairs_attempted,
self.repairs_successful,
- (self.repairs_attempted - self.repairs_successful))
- print >>stdout, " post-repair: %d healthy, %d unhealthy" \
+ (self.repairs_attempted - self.repairs_successful)), file=stdout)
+ print(" post-repair: %d healthy, %d unhealthy" \
% (self.post_repair_files_healthy,
- self.post_repair_files_unhealthy)
+ self.post_repair_files_unhealthy), file=stdout)
class DeepCheckStreamer(LineOnlyReceiver):
try:
rootcap, path = get_alias(options.aliases, where, DEFAULT_ALIAS)
- except UnknownAliasError, e:
+ except UnknownAliasError as e:
e.display(stderr)
return 1
if path == '/':
url += "&add-lease=true"
resp = do_http("POST", url)
if resp.status not in (200, 302):
- print >>stderr, format_http_error("ERROR", resp)
+ print(format_http_error("ERROR", resp), file=stderr)
return 1
# use Twisted to split this into lines
+from __future__ import print_function
import os.path
import urllib
from allmydata.util.fileutil import abspath_expanduser_unicode
from allmydata.util.encodingutil import unicode_to_url, listdir_unicode, quote_output, to_str
from allmydata.util.assertutil import precondition
+import six
class MissingSourceError(TahoeError):
self.mutable = d.get("mutable", False) # older nodes don't provide it
self.children_d = dict( [(unicode(name),value)
for (name,value)
- in d["children"].iteritems()] )
+ in six.iteritems(d["children"])] )
self.children = None
def init_from_parsed(self, parsed):
self.mutable = d.get("mutable", False) # older nodes don't provide it
self.children_d = dict( [(unicode(name),value)
for (name,value)
- in d["children"].iteritems()] )
+ in six.iteritems(d["children"])] )
self.children = None
def populate(self, recurse):
self.mutable = d.get("mutable", False) # older nodes don't provide it
self.children_d = dict( [(unicode(name),value)
for (name,value)
- in d["children"].iteritems()] )
+ in six.iteritems(d["children"])] )
self.children = None
def init_from_grid(self, writecap, readcap):
self.mutable = d.get("mutable", False) # older nodes don't provide it
self.children_d = dict( [(unicode(name),value)
for (name,value)
- in d["children"].iteritems()] )
+ in six.iteritems(d["children"])] )
self.children = None
def just_created(self, writecap):
self.stderr = options.stderr
if verbosity >= 2 and not self.progressfunc:
def progress(message):
- print >>self.stderr, message
+ print(message, file=self.stderr)
self.progressfunc = progress
self.caps_only = options["caps-only"]
self.cache = {}
try:
status = self.try_copy()
return status
- except TahoeError, te:
+ except TahoeError as te:
if verbosity >= 2:
Failure().printTraceback(self.stderr)
- print >>self.stderr
+ print(file=self.stderr)
te.display(self.stderr)
return 1
return 1
def to_stderr(self, text):
- print >>self.stderr, text
+ print(text, file=self.stderr)
def get_target_info(self, destination_spec):
rootcap, path = get_alias(self.aliases, destination_spec, None)
def dump_graph(self, s, indent=" "):
for name, child in s.children.items():
- print "%s%s: %r" % (indent, quote_output(name), child)
+ print("%s%s: %r" % (indent, quote_output(name), child))
if isinstance(child, (LocalDirectorySource, TahoeDirectorySource)):
self.dump_graph(child, indent+" ")
def announce_success(self, msg):
if self.verbosity >= 1:
- print >>self.stdout, "Success: %s" % msg
+ print("Success: %s" % msg, file=self.stdout)
return 0
def copy_file(self, source, target):
+from __future__ import print_function
import os, urllib
from allmydata.scripts.common import get_alias, DEFAULT_ALIAS, escape_path, \
nodeurl += "/"
try:
rootcap, path = get_alias(aliases, from_file, DEFAULT_ALIAS)
- except UnknownAliasError, e:
+ except UnknownAliasError as e:
e.display(stderr)
return 1
url = nodeurl + "uri/%s" % urllib.quote(rootcap)
outf.close()
rc = 0
else:
- print >>stderr, format_http_error("Error during GET", resp)
+ print(format_http_error("Error during GET", resp), file=stderr)
rc = 1
return rc
+from __future__ import print_function
import urllib, time
import simplejson
where = where[:-1]
try:
rootcap, path = get_alias(aliases, where, DEFAULT_ALIAS)
- except UnknownAliasError, e:
+ except UnknownAliasError as e:
e.display(stderr)
return 1
url = nodeurl + "uri/%s" % urllib.quote(rootcap)
url += "?t=json"
resp = do_http("GET", url)
if resp.status == 404:
- print >>stderr, "No such file or directory"
+ print("No such file or directory", file=stderr)
return 2
if resp.status != 200:
- print >>stderr, format_http_error("Error during GET", resp)
+ print(format_http_error("Error during GET", resp), file=stderr)
if resp.status == 0:
return 3
else:
if options['json']:
# The webapi server should always output printable ASCII.
if is_printable_ascii(data):
- print >>stdout, data
+ print(data, file=stdout)
return 0
else:
- print >>stderr, "The JSON response contained unprintable characters:"
- print >>stderr, quote_output(data, quotemarks=False)
+ print("The JSON response contained unprintable characters:", file=stderr)
+ print(quote_output(data, quotemarks=False), file=stderr)
return 1
try:
parsed = simplejson.loads(data)
- except Exception, e:
- print >>stderr, "error: %s" % quote_output(e.args[0], quotemarks=False)
- print >>stderr, "Could not parse JSON response:"
- print >>stderr, quote_output(data, quotemarks=False)
+ except Exception as e:
+ print("error: %s" % quote_output(e.args[0], quotemarks=False), file=stderr)
+ print("Could not parse JSON response:", file=stderr)
+ print(quote_output(data, quotemarks=False), file=stderr)
return 1
nodetype, d = parsed
rc = 0
for (encoding_error, row) in rows:
if encoding_error:
- print >>stderr, (fmt % tuple(row)).rstrip()
+ print((fmt % tuple(row)).rstrip(), file=stderr)
rc = 1
else:
- print >>stdout, (fmt % tuple(row)).rstrip()
+ print((fmt % tuple(row)).rstrip(), file=stdout)
if rc == 1:
- print >>stderr, "\nThis listing included files whose names could not be converted to the terminal" \
- "\noutput encoding. Their names are shown using backslash escapes and in quotes."
+ print("\nThis listing included files whose names could not be converted to the terminal" \
+ "\noutput encoding. Their names are shown using backslash escapes and in quotes.", file=stderr)
if has_unknowns:
- print >>stderr, "\nThis listing included unknown objects. Using a webapi server that supports" \
- "\na later version of Tahoe may help."
+ print("\nThis listing included unknown objects. Using a webapi server that supports" \
+ "\na later version of Tahoe may help.", file=stderr)
return rc
+from __future__ import print_function
import urllib, simplejson
from twisted.protocols.basic import LineOnlyReceiver
where = options.where
try:
rootcap, path = get_alias(options.aliases, where, DEFAULT_ALIAS)
- except UnknownAliasError, e:
+ except UnknownAliasError as e:
e.display(stderr)
return 1
if path == '/':
url += "?t=stream-manifest"
resp = do_http("POST", url)
if resp.status not in (200, 302):
- print >>stderr, format_http_error("ERROR", resp)
+ print(format_http_error("ERROR", resp), file=stderr)
return 1
#print "RESP", dir(resp)
# use Twisted to split this into lines
stdout = self.options.stdout
stderr = self.options.stderr
if self.in_error:
- print >>stderr, quote_output(line, quotemarks=False)
+ print(quote_output(line, quotemarks=False), file=stderr)
return
if line.startswith("ERROR:"):
self.in_error = True
self.rc = 1
- print >>stderr, quote_output(line, quotemarks=False)
+ print(quote_output(line, quotemarks=False), file=stderr)
return
try:
d = simplejson.loads(line.decode('utf-8'))
- except Exception, e:
- print >>stderr, "ERROR could not decode/parse %s\nERROR %r" % (quote_output(line), e)
+ except Exception as e:
+ print("ERROR could not decode/parse %s\nERROR %r" % (quote_output(line), e), file=stderr)
else:
if d["type"] in ("file", "directory"):
if self.options["storage-index"]:
si = d.get("storage-index", None)
if si:
- print >>stdout, quote_output(si, quotemarks=False)
+ print(quote_output(si, quotemarks=False), file=stdout)
elif self.options["verify-cap"]:
vc = d.get("verifycap", None)
if vc:
- print >>stdout, quote_output(vc, quotemarks=False)
+ print(quote_output(vc, quotemarks=False), file=stdout)
elif self.options["repair-cap"]:
vc = d.get("repaircap", None)
if vc:
- print >>stdout, quote_output(vc, quotemarks=False)
+ print(quote_output(vc, quotemarks=False), file=stdout)
else:
- print >>stdout, "%s %s" % (quote_output(d["cap"], quotemarks=False),
- quote_path(d["path"], quotemarks=False))
+ print("%s %s" % (quote_output(d["cap"], quotemarks=False),
+ quote_path(d["path"], quotemarks=False)), file=stdout)
def manifest(options):
return ManifestStreamer().run(options)
"largest-immutable-file",
)
width = max([len(k) for k in keys])
- print >>stdout, "Counts and Total Sizes:"
+ print("Counts and Total Sizes:", file=stdout)
for k in keys:
fmt = "%" + str(width) + "s: %d"
if k in data:
value = data[k]
if not k.startswith("count-") and value > 1000:
absize = abbreviate_space_both(value)
- print >>stdout, fmt % (k, data[k]), " ", absize
+ print(fmt % (k, data[k]), " ", absize, file=stdout)
else:
- print >>stdout, fmt % (k, data[k])
+ print(fmt % (k, data[k]), file=stdout)
if data["size-files-histogram"]:
- print >>stdout, "Size Histogram:"
+ print("Size Histogram:", file=stdout)
prevmax = None
maxlen = max([len(str(maxsize))
for (minsize, maxsize, count)
linefmt = minfmt + "-" + maxfmt + " : " + countfmt + " %s"
for (minsize, maxsize, count) in data["size-files-histogram"]:
if prevmax is not None and minsize != prevmax+1:
- print >>stdout, " "*(maxlen-1) + "..."
+ print(" "*(maxlen-1) + "...", file=stdout)
prevmax = maxsize
- print >>stdout, linefmt % (minsize, maxsize, count,
- abbreviate_space_both(maxsize))
+ print(linefmt % (minsize, maxsize, count,
+ abbreviate_space_both(maxsize)), file=stdout)
def stats(options):
return StatsGrabber().run(options)
+from __future__ import print_function
import urllib
from allmydata.scripts.common_http import do_http, check_http_error
if where:
try:
rootcap, path = get_alias(aliases, where, DEFAULT_ALIAS)
- except UnknownAliasError, e:
+ except UnknownAliasError as e:
e.display(stderr)
return 1
return rc
new_uri = resp.read().strip()
# emit its write-cap
- print >>stdout, quote_output(new_uri, quotemarks=False)
+ print(quote_output(new_uri, quotemarks=False), file=stdout)
return 0
# create a new directory at the given location
resp = do_http("POST", url)
check_http_error(resp, stderr)
new_uri = resp.read().strip()
- print >>stdout, quote_output(new_uri, quotemarks=False)
+ print(quote_output(new_uri, quotemarks=False), file=stdout)
return 0
+from __future__ import print_function
import re
import urllib
nodeurl += "/"
try:
rootcap, from_path = get_alias(aliases, from_file, DEFAULT_ALIAS)
- except UnknownAliasError, e:
+ except UnknownAliasError as e:
e.display(stderr)
return 1
from_url = nodeurl + "uri/%s" % urllib.quote(rootcap)
# figure out the source cap
resp = do_http("GET", from_url + "?t=json")
if not re.search(r'^2\d\d$', str(resp.status)):
- print >>stderr, format_http_error("Error", resp)
+ print(format_http_error("Error", resp), file=stderr)
return 1
data = resp.read()
nodetype, attrs = simplejson.loads(data)
# now get the target
try:
rootcap, path = get_alias(aliases, to_file, DEFAULT_ALIAS)
- except UnknownAliasError, e:
+ except UnknownAliasError as e:
e.display(stderr)
return 1
to_url = nodeurl + "uri/%s" % urllib.quote(rootcap)
status = resp.status
if not re.search(r'^2\d\d$', str(status)):
if status == 409:
- print >>stderr, "Error: You can't overwrite a directory with a file"
+ print("Error: You can't overwrite a directory with a file", file=stderr)
else:
- print >>stderr, format_http_error("Error", resp)
+ print(format_http_error("Error", resp), file=stderr)
if mode == "move":
- print >>stderr, "NOT removing the original"
+ print("NOT removing the original", file=stderr)
return 1
if mode == "move":
# now remove the original
resp = do_http("DELETE", from_url)
if not re.search(r'^2\d\d$', str(resp.status)):
- print >>stderr, format_http_error("Error deleting original after move", resp)
+ print(format_http_error("Error deleting original after move", resp), file=stderr)
return 2
- print >>stdout, "OK"
+ print("OK", file=stdout)
return 0
+from __future__ import print_function
import os
from cStringIO import StringIO
else:
try:
rootcap, path = get_alias(aliases, to_file, DEFAULT_ALIAS)
- except UnknownAliasError, e:
+ except UnknownAliasError as e:
e.display(stderr)
return 1
if path.startswith("/"):
suggestion = to_file.replace(u"/", u"", 1)
- print >>stderr, "Error: The remote filename must not start with a slash"
- print >>stderr, "Please try again, perhaps with %s" % quote_output(suggestion)
+ print("Error: The remote filename must not start with a slash", file=stderr)
+ print("Please try again, perhaps with %s" % quote_output(suggestion), file=stderr)
return 1
url = nodeurl + "uri/%s/" % urllib.quote(rootcap)
if path:
# do_http() can't use stdin directly: for one thing, we need a
# Content-Length field. So we currently must copy it.
if verbosity > 0:
- print >>stderr, "waiting for file data on stdin.."
+ print("waiting for file data on stdin..", file=stderr)
data = stdin.read()
infileobj = StringIO(data)
resp = do_http("PUT", url, infileobj)
if resp.status in (200, 201,):
- print >>stderr, format_http_success(resp)
- print >>stdout, quote_output(resp.read(), quotemarks=False)
+ print(format_http_success(resp), file=stderr)
+ print(quote_output(resp.read(), quotemarks=False), file=stdout)
return 0
- print >>stderr, format_http_error("Error", resp)
+ print(format_http_error("Error", resp), file=stderr)
return 1
+from __future__ import print_function
import urllib
from allmydata.scripts.common_http import do_http, format_http_success, format_http_error
nodeurl += "/"
try:
rootcap, path = get_alias(aliases, where, DEFAULT_ALIAS)
- except UnknownAliasError, e:
+ except UnknownAliasError as e:
e.display(stderr)
return 1
if not path:
- print >>stderr, """
-'tahoe %s' can only unlink directory entries, so a path must be given.""" % (command,)
+ print("""
+'tahoe %s' can only unlink directory entries, so a path must be given.""" % (command,), file=stderr)
return 1
url = nodeurl + "uri/%s" % urllib.quote(rootcap)
resp = do_http("DELETE", url)
if resp.status in (200,):
- print >>stdout, format_http_success(resp)
+ print(format_http_success(resp), file=stdout)
return 0
- print >>stderr, format_http_error("ERROR", resp)
+ print(format_http_error("ERROR", resp), file=stderr)
return 1
if where:
try:
rootcap, path = get_alias(options.aliases, where, DEFAULT_ALIAS)
- except UnknownAliasError, e:
+ except UnknownAliasError as e:
e.display(stderr)
return 1
if path == '/':
+from __future__ import print_function
import os
import pickle
def remote_provide(self, provider, nickname):
tubid = self.get_tubid(provider)
if tubid == '<unauth>':
- print "WARNING: failed to get tubid for %s (%s)" % (provider, nickname)
+ print("WARNING: failed to get tubid for %s (%s)" % (provider, nickname))
# don't add to clients to poll (polluting data) don't care about disconnect
return
self.clients[tubid] = provider
def remote_provide(self, provider, nickname):
tubid = self.get_tubid(provider)
if self.verbose:
- print 'connect "%s" [%s]' % (nickname, tubid)
+ print('connect "%s" [%s]' % (nickname, tubid))
provider.notifyOnDisconnect(self.announce_lost_client, tubid)
StatsGatherer.remote_provide(self, provider, nickname)
def announce_lost_client(self, tubid):
- print 'disconnect "%s" [%s]' % (self.nicknames[tubid], tubid)
+ print('disconnect "%s" [%s]' % (self.nicknames[tubid], tubid))
def got_stats(self, stats, tubid, nickname):
- print '"%s" [%s]:' % (nickname, tubid)
+ print('"%s" [%s]:' % (nickname, tubid))
pprint.pprint(stats)
class PickleStatsGatherer(StdOutStatsGatherer):
try:
self.gathered_stats = pickle.load(f)
except Exception:
- print ("Error while attempting to load pickle file %s.\n"
+ print(("Error while attempting to load pickle file %s.\n"
"You may need to restore this file from a backup, or delete it if no backup is available.\n" %
- quote_output(os.path.abspath(self.picklefile)))
+ quote_output(os.path.abspath(self.picklefile))))
raise
f.close()
else:
import allmydata # for __full_version__
from allmydata.storage.common import si_b2a, si_a2b, storage_index_to_dir
+import six
_pyflakes_hush = [si_b2a, si_a2b, storage_index_to_dir] # re-exported
from allmydata.storage.lease import LeaseInfo
from allmydata.storage.mutable import MutableShareFile, EmptyShare, \
# since all shares get the same lease data, we just grab the leases
# from the first share
try:
- shnum, filename = self._get_bucket_shares(storage_index).next()
+ shnum, filename = six.advance_iterator(self._get_bucket_shares(storage_index))
sf = ShareFile(filename)
return sf.get_leases()
except StopIteration:
+from __future__ import print_function
import hotshot.stats, os, random, sys
from pyutil import benchutil, randutil # http://tahoe-lafs.org/trac/pyutil
return self.random_fsnode(), random_metadata()
def init_for_pack(self, N):
- for i in xrange(len(self.children), N):
+ for i in range(len(self.children), N):
name = random_unicode(random.randrange(0, 10))
self.children.append( (name, self.random_child()) )
for (initfunc, func) in [(self.init_for_unpack, self.unpack),
(self.init_for_pack, self.pack),
(self.init_for_unpack, self.unpack_and_repack)]:
- print "benchmarking %s" % (func,)
+ print("benchmarking %s" % (func,))
for N in 16, 512, 2048, 16384:
- print "%5d" % N,
+ print("%5d" % N, end=' ')
benchutil.rep_bench(func, N, initfunc=initfunc, MAXREPS=20, UNITS_PER_SECOND=1000)
benchutil.print_bench_footer(UNITS_PER_SECOND=1000)
- print "(milliseconds)"
+ print("(milliseconds)")
def prof_benchmarks(self):
# This requires pyutil >= v1.3.34.
if __name__ == "__main__":
if '--profile' in sys.argv:
if os.path.exists(PROF_FILE_NAME):
- print "WARNING: profiling results file '%s' already exists -- the profiling results from this run will be added into the profiling results stored in that file and then the sum of them will be printed out after this run." % (PROF_FILE_NAME,)
+ print("WARNING: profiling results file '%s' already exists -- the profiling results from this run will be added into the profiling results stored in that file and then the sum of them will be printed out after this run." % (PROF_FILE_NAME,))
b = B()
b.prof_benchmarks()
b.print_stats()
in a machine-readable logfile.
"""
+from __future__ import print_function
import time, subprocess, md5, os.path, random
from twisted.python import usage
rc = p.returncode
if expected_rc != None and rc != expected_rc:
if stderr:
- print "STDERR:"
- print stderr
+ print("STDERR:")
+ print(stderr)
raise CommandFailed("command '%s' failed: rc=%d" % (cmd, rc))
return stdout, stderr
def cli(self, cmd, *args, **kwargs):
- print "tahoe", cmd, " ".join(args)
+ print("tahoe", cmd, " ".join(args))
stdout, stderr = self.command(self.tahoe, cmd, "-d", self.nodedir,
*args, **kwargs)
if not kwargs.get("ignore_stderr", False) and stderr != "":
return stdout
def stop_old_node(self):
- print "tahoe stop", self.nodedir, "(force)"
+ print("tahoe stop", self.nodedir, "(force)")
self.command(self.tahoe, "stop", self.nodedir, expected_rc=None)
def start_node(self):
- print "tahoe start", self.nodedir
+ print("tahoe start", self.nodedir)
self.command(self.tahoe, "start", self.nodedir)
time.sleep(5)
def stop_node(self):
- print "tahoe stop", self.nodedir
+ print("tahoe stop", self.nodedir)
self.command(self.tahoe, "stop", self.nodedir)
def read_and_check(self, f):
def listdir(self, dirname):
out = self.cli("ls", "testgrid:"+dirname).strip().split("\n")
files = [f.strip() for f in out]
- print " ", files
+ print(" ", files)
return files
def do_test(self):
"""
+from __future__ import print_function
import os, sys, httplib, binascii
import urllib, simplejson, random, time, urlparse
+import six
if sys.argv[1] == "--stats":
statsfiles = sys.argv[2:]
if last_stats:
delta = dict( [ (name,stats[name]-last_stats[name])
for name in stats ] )
- print "THIS SAMPLE:"
+ print("THIS SAMPLE:")
for name in sorted(delta.keys()):
avg = float(delta[name]) / float(DELAY)
- print "%20s: %0.2f per second" % (name, avg)
+ print("%20s: %0.2f per second" % (name, avg))
totals.append(delta)
while len(totals) > MAXSAMPLES:
totals.pop(0)
# now compute average
- print
- print "MOVING WINDOW AVERAGE:"
+ print()
+ print("MOVING WINDOW AVERAGE:")
for name in sorted(delta.keys()):
avg = sum([ s[name] for s in totals]) / (DELAY*len(totals))
- print "%20s %0.2f per second" % (name, avg)
+ print("%20s %0.2f per second" % (name, avg))
last_stats = stats
- print
- print
+ print()
+ print()
time.sleep(DELAY)
stats_out = sys.argv[1]
try:
parsed = simplejson.loads(data)
except ValueError:
- print "URL was", url
- print "DATA was", data
+ print("URL was", url)
+ print("DATA was", data)
raise
nodetype, d = parsed
assert nodetype == "dirnode"
directories_read += 1
children = dict( [(unicode(name),value)
for (name,value)
- in d["children"].iteritems()] )
+ in six.iteritems(d["children"])] )
return children
op = "read"
else:
op = "write"
- print "OP:", op
+ print("OP:", op)
server = random.choice(server_urls)
if op == "read":
pathname = choose_random_descendant(server, root)
- print " reading", pathname
+ print(" reading", pathname)
read_and_discard(server, root, pathname)
files_downloaded += 1
elif op == "write":
pathname = current_writedir + "/" + filename
else:
pathname = filename
- print " writing", pathname
+ print(" writing", pathname)
size = choose_size()
- print " size", size
+ print(" size", size)
generate_and_put(server, root, pathname, size)
files_uploaded += 1
+from __future__ import print_function
import os, shutil, sys, urllib, time, stat
from cStringIO import StringIO
from twisted.internet import defer, reactor, protocol, error
from allmydata.util.encodingutil import get_filesystem_encoding
from foolscap.api import Tub, fireEventually, flushEventualQueue
from twisted.python import log
+import six
class StallableHTTPGetterDiscarder(tw_client.HTTPPageGetter):
full_speed_ahead = False
return
if self._bytes_so_far > 1e6+100:
if not self.stalled:
- print "STALLING"
+ print("STALLING")
self.transport.pauseProducing()
self.stalled = reactor.callLater(10.0, self._resume_speed)
def _resume_speed(self):
- print "RESUME SPEED"
+ print("RESUME SPEED")
self.stalled = None
self.full_speed_ahead = True
self.transport.resumeProducing()
def handleResponseEnd(self):
if self.stalled:
- print "CANCEL"
+ print("CANCEL")
self.stalled.cancel()
self.stalled = None
return tw_client.HTTPPageGetter.handleResponseEnd(self)
def _err(err):
self.failed = err
log.err(err)
- print err
+ print(err)
d.addErrback(_err)
def _done(res):
reactor.stop()
return d
def record_initial_memusage(self):
- print
- print "Client started (no connections yet)"
+ print()
+ print("Client started (no connections yet)")
d = self._print_usage()
d.addCallback(self.stash_stats, "init")
return d
def wait_for_client_connected(self):
- print
- print "Client connecting to other nodes.."
+ print()
+ print("Client connecting to other nodes..")
return self.control_rref.callRemote("wait_for_client_connections",
self.numnodes+1)
form.append('')
form.append('UTF-8')
form.append(sep)
- for name, value in fields.iteritems():
+ for name, value in six.iteritems(fields):
if isinstance(value, tuple):
filename, value = value
form.append('Content-Disposition: form-data; name="%s"; '
def _print_usage(self, res=None):
d = self.control_rref.callRemote("get_memory_usage")
def _print(stats):
- print "VmSize: %9d VmPeak: %9d" % (stats["VmSize"],
- stats["VmPeak"])
+ print("VmSize: %9d VmPeak: %9d" % (stats["VmSize"],
+ stats["VmPeak"]))
return stats
d.addCallback(_print)
return d
def _do_upload(self, res, size, files, uris):
name = '%d' % size
- print
- print "uploading %s" % name
+ print()
+ print("uploading %s" % name)
if self.mode in ("upload", "upload-self"):
files[name] = self.create_data(name, size)
d = self.control_rref.callRemote("upload_from_file_to_uri",
raise ValueError("unknown mode=%s" % self.mode)
def _complete(uri):
uris[name] = uri
- print "uploaded %s" % name
+ print("uploaded %s" % name)
d.addCallback(_complete)
return d
if self.mode not in ("download", "download-GET", "download-GET-slow"):
return
name = '%d' % size
- print "downloading %s" % name
+ print("downloading %s" % name)
uri = uris[name]
if self.mode == "download":
d = self.GET_discard(urllib.quote(url), stall=True)
def _complete(res):
- print "downloaded %s" % name
+ print("downloaded %s" % name)
return res
d.addCallback(_complete)
return d
#d.addCallback(self.stall)
def _done(res):
- print "FINISHING"
+ print("FINISHING")
d.addCallback(_done)
return d
class ClientWatcher(protocol.ProcessProtocol):
ended = False
def outReceived(self, data):
- print "OUT:", data
+ print("OUT:", data)
def errReceived(self, data):
- print "ERR:", data
+ print("ERR:", data)
def processEnded(self, reason):
self.ended = reason
self.d.callback(None)
+from __future__ import print_function
import os, sys
from twisted.internet import reactor, defer
from twisted.python import log
self.download_times = {}
def run(self):
- print "STARTING"
+ print("STARTING")
d = fireEventually()
d.addCallback(lambda res: self.setUp())
d.addCallback(lambda res: self.do_test())
def _err(err):
self.failed = err
log.err(err)
- print err
+ print(err)
d.addErrback(_err)
def _done(res):
reactor.stop()
d.addBoth(_done)
reactor.run()
if self.failed:
- print "EXCEPTION"
- print self.failed
+ print("EXCEPTION")
+ print(self.failed)
sys.exit(1)
def setUp(self):
d = self.tub.getReference(self.control_furl)
def _gotref(rref):
self.client_rref = rref
- print "Got Client Control reference"
+ print("Got Client Control reference")
return self.stall(5)
d.addCallback(_gotref)
return d
return d
def record_times(self, times, key):
- print "TIME (%s): %s up, %s down" % (key, times[0], times[1])
+ print("TIME (%s): %s up, %s down" % (key, times[0], times[1]))
self.upload_times[key], self.download_times[key] = times
def one_test(self, res, name, count, size, mutable):
self.total_rtt = sum(times)
self.average_rtt = sum(times) / len(times)
self.max_rtt = max(times)
- print "num-peers: %d" % len(times)
- print "total-RTT: %f" % self.total_rtt
- print "average-RTT: %f" % self.average_rtt
- print "max-RTT: %f" % self.max_rtt
+ print("num-peers: %d" % len(times))
+ print("total-RTT: %f" % self.total_rtt)
+ print("average-RTT: %f" % self.average_rtt)
+ print("max-RTT: %f" % self.max_rtt)
d.addCallback(_got)
return d
def do_test(self):
- print "doing test"
+ print("doing test")
d = defer.succeed(None)
d.addCallback(self.one_test, "startup", 1, 1000, False) #ignore this one
d.addCallback(self.measure_rtt)
d.addCallback(self.one_test, "10x 200B", 10, 200, False)
def _maybe_do_100x_200B(res):
if self.upload_times["10x 200B"] < 5:
- print "10x 200B test went too fast, doing 100x 200B test"
+ print("10x 200B test went too fast, doing 100x 200B test")
return self.one_test(None, "100x 200B", 100, 200, False)
return
d.addCallback(_maybe_do_100x_200B)
d.addCallback(self.one_test, "10MB", 1, 10*MB, False)
def _maybe_do_100MB(res):
if self.upload_times["10MB"] > 30:
- print "10MB test took too long, skipping 100MB test"
+ print("10MB test took too long, skipping 100MB test")
return
return self.one_test(None, "100MB", 1, 100*MB, False)
d.addCallback(_maybe_do_100MB)
d.addCallback(self.one_test, "10x 200B SSK", 10, 200, "upload")
def _maybe_do_100x_200B_SSK(res):
if self.upload_times["10x 200B SSK"] < 5:
- print "10x 200B SSK test went too fast, doing 100x 200B SSK"
+ print("10x 200B SSK test went too fast, doing 100x 200B SSK")
return self.one_test(None, "100x 200B SSK", 100, 200,
"upload")
return
B = self.upload_times["100x 200B"] / 100
else:
B = self.upload_times["10x 200B"] / 10
- print "upload per-file time: %.3fs" % B
- print "upload per-file times-avg-RTT: %f" % (B / self.average_rtt)
- print "upload per-file times-total-RTT: %f" % (B / self.total_rtt)
+ print("upload per-file time: %.3fs" % B)
+ print("upload per-file times-avg-RTT: %f" % (B / self.average_rtt))
+ print("upload per-file times-total-RTT: %f" % (B / self.total_rtt))
A1 = 1*MB / (self.upload_times["1MB"] - B) # in bytes per second
- print "upload speed (1MB):", self.number(A1, "Bps")
+ print("upload speed (1MB):", self.number(A1, "Bps"))
A2 = 10*MB / (self.upload_times["10MB"] - B)
- print "upload speed (10MB):", self.number(A2, "Bps")
+ print("upload speed (10MB):", self.number(A2, "Bps"))
if "100MB" in self.upload_times:
A3 = 100*MB / (self.upload_times["100MB"] - B)
- print "upload speed (100MB):", self.number(A3, "Bps")
+ print("upload speed (100MB):", self.number(A3, "Bps"))
# download
if "100x 200B" in self.download_times:
B = self.download_times["100x 200B"] / 100
else:
B = self.download_times["10x 200B"] / 10
- print "download per-file time: %.3fs" % B
- print "download per-file times-avg-RTT: %f" % (B / self.average_rtt)
- print "download per-file times-total-RTT: %f" % (B / self.total_rtt)
+ print("download per-file time: %.3fs" % B)
+ print("download per-file times-avg-RTT: %f" % (B / self.average_rtt))
+ print("download per-file times-total-RTT: %f" % (B / self.total_rtt))
A1 = 1*MB / (self.download_times["1MB"] - B) # in bytes per second
- print "download speed (1MB):", self.number(A1, "Bps")
+ print("download speed (1MB):", self.number(A1, "Bps"))
A2 = 10*MB / (self.download_times["10MB"] - B)
- print "download speed (10MB):", self.number(A2, "Bps")
+ print("download speed (10MB):", self.number(A2, "Bps"))
if "100MB" in self.download_times:
A3 = 100*MB / (self.download_times["100MB"] - B)
- print "download speed (100MB):", self.number(A3, "Bps")
+ print("download speed (100MB):", self.number(A3, "Bps"))
if self.DO_MUTABLE_CREATE:
# SSK creation
B = self.upload_times["10x 200B SSK creation"] / 10
- print "create per-file time SSK: %.3fs" % B
+ print("create per-file time SSK: %.3fs" % B)
if self.DO_MUTABLE:
# upload SSK
B = self.upload_times["100x 200B SSK"] / 100
else:
B = self.upload_times["10x 200B SSK"] / 10
- print "upload per-file time SSK: %.3fs" % B
+ print("upload per-file time SSK: %.3fs" % B)
A1 = 1*MB / (self.upload_times["1MB SSK"] - B) # in bytes per second
- print "upload speed SSK (1MB):", self.number(A1, "Bps")
+ print("upload speed SSK (1MB):", self.number(A1, "Bps"))
# download SSK
if "100x 200B SSK" in self.download_times:
B = self.download_times["100x 200B SSK"] / 100
else:
B = self.download_times["10x 200B SSK"] / 10
- print "download per-file time SSK: %.3fs" % B
+ print("download per-file time SSK: %.3fs" % B)
A1 = 1*MB / (self.download_times["1MB SSK"] - B) # in bytes per
# second
- print "download speed SSK (1MB):", self.number(A1, "Bps")
+ print("download speed SSK (1MB):", self.number(A1, "Bps"))
def number(self, value, suffix=""):
scaling = 1
+from __future__ import print_function
import os, random, struct
from zope.interface import implements
from twisted.internet import defer
return self.my_uri.get_size()
try:
data = self.all_contents[self.my_uri.to_string()]
- except KeyError, le:
+ except KeyError as le:
raise NotEnoughSharesError(le, 0, 3)
return len(data)
def get_current_size(self):
# this method as an errback handler, and it will reveal the hidden
# message.
f.trap(WebError)
- print "Web Error:", f.value, ":", f.value.response
+ print("Web Error:", f.value, ":", f.value.response)
return f
def _shouldHTTPError(self, res, which, validator):
class ErrorMixin(WebErrorMixin):
def explain_error(self, f):
if f.check(defer.FirstError):
- print "First Error:", f.value.subFailure
+ print("First Error:", f.value.subFailure)
return f
def corrupt_field(data, offset, size, debug=False):
+from __future__ import print_function
import os, signal, sys, time
from random import randrange
from allmydata.util import fileutil, log
from allmydata.util.encodingutil import unicode_platform, get_filesystem_encoding
+from allmydata.util.sixutil import map
def insecurerandstr(n):
if os.path.exists(dirpath):
msg = ("We were unable to delete a non-ASCII directory %r created by the test. "
"This is liable to cause failures on future builds." % (dirpath,))
- print msg
+ print(msg)
log.err(msg)
self.addCleanup(_cleanup)
os.mkdir(dirpath)
if p.active():
p.cancel()
else:
- print "WEIRDNESS! pending timed call not active!"
+ print("WEIRDNESS! pending timed call not active!")
if required_to_quiesce and active:
self.fail("Reactor was still active when it was required to be quiescent.")
for sharefile, data in shares.items():
open(sharefile, "wb").write(data)
- def delete_share(self, (shnum, serverid, sharefile)):
+ def delete_share(self, xxx_todo_changeme):
+ (shnum, serverid, sharefile) = xxx_todo_changeme
os.unlink(sharefile)
def delete_shares_numbered(self, uri, shnums):
if i_shnum in shnums:
os.unlink(i_sharefile)
- def corrupt_share(self, (shnum, serverid, sharefile), corruptor_function):
+ def corrupt_share(self, xxx_todo_changeme1, corruptor_function):
+ (shnum, serverid, sharefile) = xxx_todo_changeme1
sharedata = open(sharefile, "rb").read()
corruptdata = corruptor_function(sharedata)
open(sharefile, "wb").write(corruptdata)
import random, unittest
from allmydata.util import base62, mathutil
+from allmydata.util.sixutil import map
def insecurerandstr(n):
return ''.join(map(chr, map(random.randrange, [0]*n, [256]*n)))
def _test_ende(self, bs):
ascii=base62.b2a(bs)
bs2=base62.a2b(ascii)
- assert bs2 == bs, "bs2: %s:%s, bs: %s:%s, ascii: %s:%s" % (len(bs2), `bs2`, len(bs), `bs`, len(ascii), `ascii`)
+ assert bs2 == bs, "bs2: %s:%s, bs: %s:%s, ascii: %s:%s" % (len(bs2), repr(bs2), len(bs), repr(bs), len(ascii), repr(ascii))
def test_num_octets_that_encode_to_this_many_chars(self):
return self._test_num_octets_that_encode_to_this_many_chars(2, 1)
from allmydata.immutable.upload import Data
from allmydata.test.common_web import WebRenderingMixin
from allmydata.mutable.publish import MutableData
+import six
class FakeClient:
def get_storage_broker(self):
"This little printing function is only meant for < 26 servers"
shares_chart = {}
names = dict(zip([ss.my_nodeid
- for _,ss in self.g.servers_by_number.iteritems()],
+ for _,ss in six.iteritems(self.g.servers_by_number)],
letters))
for shnum, serverid, _ in self.find_uri_shares(uri):
shares_chart.setdefault(shnum, []).append(names[serverid])
"didn't see 'mqfblse6m5a6dh45isu2cg7oji' in '%s'" % err)
def test_alias(self):
- def s128(c): return base32.b2a(c*(128/8))
- def s256(c): return base32.b2a(c*(256/8))
+ def s128(c): return base32.b2a(c*(128//8))
+ def s256(c): return base32.b2a(c*(256//8))
TA = "URI:DIR2:%s:%s" % (s128("T"), s256("T"))
WA = "URI:DIR2:%s:%s" % (s128("W"), s256("W"))
CA = "URI:DIR2:%s:%s" % (s128("C"), s256("C"))
self.failUnlessRaises(common.UnknownAliasError, ga5, u"C:\\Windows")
def test_alias_tolerance(self):
- def s128(c): return base32.b2a(c*(128/8))
- def s256(c): return base32.b2a(c*(256/8))
+ def s128(c): return base32.b2a(c*(128//8))
+ def s256(c): return base32.b2a(c*(256//8))
TA = "URI:DIR2:%s:%s" % (s128("T"), s256("T"))
aliases = {"present": TA,
"future": "URI-FROM-FUTURE:ooh:aah"}
aliasfile = os.path.join(self.get_clientdir(), "private", "aliases")
d = self.do_cli("create-alias", "tahoe")
- def _done((rc,stdout,stderr)):
+ def _done(xxx_todo_changeme41):
+ (rc,stdout,stderr) = xxx_todo_changeme41
self.failUnless("Alias 'tahoe' created" in stdout)
self.failIf(stderr)
aliases = get_aliases(self.get_clientdir())
d.addCallback(_stash_urls)
d.addCallback(lambda res: self.do_cli("create-alias", "two")) # dup
- def _check_create_duplicate((rc,stdout,stderr)):
+ def _check_create_duplicate(xxx_todo_changeme42):
+ (rc,stdout,stderr) = xxx_todo_changeme42
self.failIfEqual(rc, 0)
self.failUnless("Alias 'two' already exists!" in stderr)
aliases = get_aliases(self.get_clientdir())
d.addCallback(_check_create_duplicate)
d.addCallback(lambda res: self.do_cli("add-alias", "added", self.two_uri))
- def _check_add((rc,stdout,stderr)):
+ def _check_add(xxx_todo_changeme43):
+ (rc,stdout,stderr) = xxx_todo_changeme43
self.failUnlessReallyEqual(rc, 0)
self.failUnless("Alias 'added' added" in stdout)
d.addCallback(_check_add)
# check add-alias with a duplicate
d.addCallback(lambda res: self.do_cli("add-alias", "two", self.two_uri))
- def _check_add_duplicate((rc,stdout,stderr)):
+ def _check_add_duplicate(xxx_todo_changeme44):
+ (rc,stdout,stderr) = xxx_todo_changeme44
self.failIfEqual(rc, 0)
self.failUnless("Alias 'two' already exists!" in stderr)
aliases = get_aliases(self.get_clientdir())
d.addCallback(_check_add_duplicate)
# check create-alias and add-alias with invalid aliases
- def _check_invalid((rc,stdout,stderr)):
+ def _check_invalid(xxx_todo_changeme45):
+ (rc,stdout,stderr) = xxx_todo_changeme45
self.failIfEqual(rc, 0)
self.failUnlessIn("cannot contain", stderr)
fileutil.write(aliasfile, old.rstrip())
return self.do_cli("create-alias", "un-corrupted1")
d.addCallback(_remove_trailing_newline_and_create_alias)
- def _check_not_corrupted1((rc,stdout,stderr)):
+ def _check_not_corrupted1(xxx_todo_changeme46):
+ (rc,stdout,stderr) = xxx_todo_changeme46
self.failUnless("Alias 'un-corrupted1' created" in stdout, stdout)
self.failIf(stderr)
# the old behavior was to simply append the new record, causing a
fileutil.write(aliasfile, old.rstrip())
return self.do_cli("add-alias", "un-corrupted2", self.two_uri)
d.addCallback(_remove_trailing_newline_and_add_alias)
- def _check_not_corrupted((rc,stdout,stderr)):
+ def _check_not_corrupted(xxx_todo_changeme47):
+ (rc,stdout,stderr) = xxx_todo_changeme47
self.failUnless("Alias 'un-corrupted2' added" in stdout, stdout)
self.failIf(stderr)
aliases = get_aliases(self.get_clientdir())
raise unittest.SkipTest("A non-ASCII command argument could not be encoded on this platform.")
d = self.do_cli("create-alias", etudes_arg)
- def _check_create_unicode((rc, out, err)):
+ def _check_create_unicode(xxx_todo_changeme48):
+ (rc, out, err) = xxx_todo_changeme48
self.failUnlessReallyEqual(rc, 0)
self.failUnlessReallyEqual(err, "")
self.failUnlessIn("Alias %s created" % quote_output(u"\u00E9tudes"), out)
d.addCallback(_check_create_unicode)
d.addCallback(lambda res: self.do_cli("ls", etudes_arg + ":"))
- def _check_ls1((rc, out, err)):
+ def _check_ls1(xxx_todo_changeme49):
+ (rc, out, err) = xxx_todo_changeme49
self.failUnlessReallyEqual(rc, 0)
self.failUnlessReallyEqual(err, "")
self.failUnlessReallyEqual(out, "")
stdin="Blah blah blah"))
d.addCallback(lambda res: self.do_cli("ls", etudes_arg + ":"))
- def _check_ls2((rc, out, err)):
+ def _check_ls2(xxx_todo_changeme50):
+ (rc, out, err) = xxx_todo_changeme50
self.failUnlessReallyEqual(rc, 0)
self.failUnlessReallyEqual(err, "")
self.failUnlessReallyEqual(out, "uploaded.txt\n")
d.addCallback(_check_ls2)
d.addCallback(lambda res: self.do_cli("get", etudes_arg + ":uploaded.txt"))
- def _check_get((rc, out, err)):
+ def _check_get(xxx_todo_changeme51):
+ (rc, out, err) = xxx_todo_changeme51
self.failUnlessReallyEqual(rc, 0)
self.failUnlessReallyEqual(err, "")
self.failUnlessReallyEqual(out, "Blah blah blah")
d.addCallback(lambda res: self.do_cli("get",
get_aliases(self.get_clientdir())[u"\u00E9tudes"] + "/" + lumiere_arg))
- def _check_get2((rc, out, err)):
+ def _check_get2(xxx_todo_changeme52):
+ (rc, out, err) = xxx_todo_changeme52
self.failUnlessReallyEqual(rc, 0)
self.failUnlessReallyEqual(err, "")
self.failUnlessReallyEqual(out, "Let the sunshine In!")
self.basedir = "cli/Ln/ln_without_alias"
self.set_up_grid()
d = self.do_cli("ln", "from", "to")
- def _check((rc, out, err)):
+ def _check(xxx_todo_changeme53):
+ (rc, out, err) = xxx_todo_changeme53
self.failUnlessReallyEqual(rc, 1)
self.failUnlessIn("error:", err)
self.failUnlessReallyEqual(out, "")
self.basedir = "cli/Ln/ln_with_nonexistent_alias"
self.set_up_grid()
d = self.do_cli("ln", "havasu:from", "havasu:to")
- def _check((rc, out, err)):
+ def _check(xxx_todo_changeme54):
+ (rc, out, err) = xxx_todo_changeme54
self.failUnlessReallyEqual(rc, 1)
self.failUnlessIn("error:", err)
d.addCallback(_check)
self.failUnlessReallyEqual(out, DATA)
d.addCallback(_downloaded)
d.addCallback(lambda res: self.do_cli("put", "-", stdin=DATA))
- d.addCallback(lambda (rc, out, err):
- self.failUnlessReallyEqual(out, self.readcap))
+ d.addCallback(lambda rc_out_err:
+ self.failUnlessReallyEqual(rc_out_err[1], self.readcap))
return d
def test_unlinked_immutable_from_file(self):
# we make the file small enough to fit in a LIT file, for speed
fileutil.write(rel_fn, "short file")
d = self.do_cli("put", rel_fn)
- def _uploaded((rc, out, err)):
+ def _uploaded(xxx_todo_changeme55):
+ (rc, out, err) = xxx_todo_changeme55
readcap = out
self.failUnless(readcap.startswith("URI:LIT:"), readcap)
self.readcap = readcap
d.addCallback(_uploaded)
d.addCallback(lambda res: self.do_cli("put", "./" + rel_fn))
- d.addCallback(lambda (rc,stdout,stderr):
- self.failUnlessReallyEqual(stdout, self.readcap))
+ d.addCallback(lambda rc_stdout_stderr:
+ self.failUnlessReallyEqual(rc_stdout_stderr[1], self.readcap))
d.addCallback(lambda res: self.do_cli("put", abs_fn))
- d.addCallback(lambda (rc,stdout,stderr):
- self.failUnlessReallyEqual(stdout, self.readcap))
+ d.addCallback(lambda rc_stdout_stderr1:
+ self.failUnlessReallyEqual(rc_stdout_stderr1[1], self.readcap))
# we just have to assume that ~ is handled properly
return d
d.addCallback(lambda res:
self.do_cli("put", rel_fn, "uploaded.txt"))
- def _uploaded((rc, out, err)):
+ def _uploaded(xxx_todo_changeme56):
+ (rc, out, err) = xxx_todo_changeme56
readcap = out.strip()
self.failUnless(readcap.startswith("URI:LIT:"), readcap)
self.failUnlessIn("201 Created", err)
d.addCallback(_uploaded)
d.addCallback(lambda res:
self.do_cli("get", "tahoe:uploaded.txt"))
- d.addCallback(lambda (rc,stdout,stderr):
- self.failUnlessReallyEqual(stdout, DATA))
+ d.addCallback(lambda rc_stdout_stderr2:
+ self.failUnlessReallyEqual(rc_stdout_stderr2[1], DATA))
d.addCallback(lambda res:
self.do_cli("put", "-", "uploaded.txt", stdin=DATA2))
- def _replaced((rc, out, err)):
+ def _replaced(xxx_todo_changeme57):
+ (rc, out, err) = xxx_todo_changeme57
readcap = out.strip()
self.failUnless(readcap.startswith("URI:LIT:"), readcap)
self.failUnlessIn("200 OK", err)
d.addCallback(lambda res:
self.do_cli("put", rel_fn, "subdir/uploaded2.txt"))
d.addCallback(lambda res: self.do_cli("get", "subdir/uploaded2.txt"))
- d.addCallback(lambda (rc,stdout,stderr):
- self.failUnlessReallyEqual(stdout, DATA))
+ d.addCallback(lambda rc_stdout_stderr3:
+ self.failUnlessReallyEqual(rc_stdout_stderr3[1], DATA))
d.addCallback(lambda res:
self.do_cli("put", rel_fn, "tahoe:uploaded3.txt"))
d.addCallback(lambda res: self.do_cli("get", "tahoe:uploaded3.txt"))
- d.addCallback(lambda (rc,stdout,stderr):
- self.failUnlessReallyEqual(stdout, DATA))
+ d.addCallback(lambda rc_stdout_stderr4:
+ self.failUnlessReallyEqual(rc_stdout_stderr4[1], DATA))
d.addCallback(lambda res:
self.do_cli("put", rel_fn, "tahoe:subdir/uploaded4.txt"))
d.addCallback(lambda res:
self.do_cli("get", "tahoe:subdir/uploaded4.txt"))
- d.addCallback(lambda (rc,stdout,stderr):
- self.failUnlessReallyEqual(stdout, DATA))
+ d.addCallback(lambda rc_stdout_stderr5:
+ self.failUnlessReallyEqual(rc_stdout_stderr5[1], DATA))
def _get_dircap(res):
self.dircap = get_aliases(self.get_clientdir())["tahoe"]
self.dircap+":./uploaded5.txt"))
d.addCallback(lambda res:
self.do_cli("get", "tahoe:uploaded5.txt"))
- d.addCallback(lambda (rc,stdout,stderr):
- self.failUnlessReallyEqual(stdout, DATA))
+ d.addCallback(lambda rc_stdout_stderr6:
+ self.failUnlessReallyEqual(rc_stdout_stderr6[1], DATA))
d.addCallback(lambda res:
self.do_cli("put", rel_fn,
self.dircap+":./subdir/uploaded6.txt"))
d.addCallback(lambda res:
self.do_cli("get", "tahoe:subdir/uploaded6.txt"))
- d.addCallback(lambda (rc,stdout,stderr):
- self.failUnlessReallyEqual(stdout, DATA))
+ d.addCallback(lambda rc_stdout_stderr7:
+ self.failUnlessReallyEqual(rc_stdout_stderr7[1], DATA))
return d
self.failUnless(self.filecap.startswith("URI:SSK:"), self.filecap)
d.addCallback(_created)
d.addCallback(lambda res: self.do_cli("get", self.filecap))
- d.addCallback(lambda (rc,out,err): self.failUnlessReallyEqual(out, DATA))
+ d.addCallback(lambda rc_out_err8: self.failUnlessReallyEqual(rc_out_err8[1], DATA))
d.addCallback(lambda res: self.do_cli("put", "-", self.filecap, stdin=DATA2))
def _replaced(res):
self.failUnlessReallyEqual(self.filecap, out)
d.addCallback(_replaced)
d.addCallback(lambda res: self.do_cli("get", self.filecap))
- d.addCallback(lambda (rc,out,err): self.failUnlessReallyEqual(out, DATA2))
+ d.addCallback(lambda rc_out_err9: self.failUnlessReallyEqual(rc_out_err9[1], DATA2))
d.addCallback(lambda res: self.do_cli("put", rel_fn, self.filecap))
def _replaced2(res):
self.failUnlessReallyEqual(self.filecap, out)
d.addCallback(_replaced2)
d.addCallback(lambda res: self.do_cli("get", self.filecap))
- d.addCallback(lambda (rc,out,err): self.failUnlessReallyEqual(out, DATA3))
+ d.addCallback(lambda rc_out_err10: self.failUnlessReallyEqual(rc_out_err10[1], DATA3))
return d
d.addCallback(_check2)
d.addCallback(lambda res:
self.do_cli("get", "tahoe:uploaded.txt"))
- d.addCallback(lambda (rc,out,err): self.failUnlessReallyEqual(out, DATA2))
+ d.addCallback(lambda rc_out_err11: self.failUnlessReallyEqual(rc_out_err11[1], DATA2))
return d
- def _check_mdmf_json(self, (rc, json, err)):
+ def _check_mdmf_json(self, xxx_todo_changeme165):
+ (rc, json, err) = xxx_todo_changeme165
self.failUnlessEqual(rc, 0)
self.failUnlessEqual(err, "")
self.failUnlessIn('"format": "MDMF"', json)
self.failUnlessIn("URI:MDMF-RO", json)
self.failUnlessIn("URI:MDMF-Verifier", json)
- def _check_sdmf_json(self, (rc, json, err)):
+ def _check_sdmf_json(self, xxx_todo_changeme166):
+ (rc, json, err) = xxx_todo_changeme166
self.failUnlessEqual(rc, 0)
self.failUnlessEqual(err, "")
self.failUnlessIn('"format": "SDMF"', json)
self.failUnlessIn("URI:SSK-RO", json)
self.failUnlessIn("URI:SSK-Verifier", json)
- def _check_chk_json(self, (rc, json, err)):
+ def _check_chk_json(self, xxx_todo_changeme167):
+ (rc, json, err) = xxx_todo_changeme167
self.failUnlessEqual(rc, 0)
self.failUnlessEqual(err, "")
self.failUnlessIn('"format": "CHK"', json)
# unlinked
args = ["put"] + cmdargs + [fn1]
d2 = self.do_cli(*args)
- def _list((rc, out, err)):
+ def _list(xxx_todo_changeme):
+ (rc, out, err) = xxx_todo_changeme
self.failUnlessEqual(rc, 0) # don't allow failure
if filename:
return self.do_cli("ls", "--json", filename)
fn1 = os.path.join(self.basedir, "data")
fileutil.write(fn1, data)
d = self.do_cli("put", "--format=MDMF", fn1)
- def _got_cap((rc, out, err)):
+ def _got_cap(xxx_todo_changeme58):
+ (rc, out, err) = xxx_todo_changeme58
self.failUnlessEqual(rc, 0)
self.cap = out.strip()
d.addCallback(_got_cap)
fileutil.write(fn2, data2)
d.addCallback(lambda ignored:
self.do_cli("put", fn2, self.cap))
- def _got_put((rc, out, err)):
+ def _got_put(xxx_todo_changeme59):
+ (rc, out, err) = xxx_todo_changeme59
self.failUnlessEqual(rc, 0)
self.failUnlessIn(self.cap, out)
d.addCallback(_got_put)
# Now get the cap. We should see the data we just put there.
d.addCallback(lambda ignored:
self.do_cli("get", self.cap))
- def _got_data((rc, out, err)):
+ def _got_data(xxx_todo_changeme60):
+ (rc, out, err) = xxx_todo_changeme60
self.failUnlessEqual(rc, 0)
self.failUnlessEqual(out, data2)
d.addCallback(_got_data)
self.do_cli("put", fn3, self.cap))
d.addCallback(lambda ignored:
self.do_cli("get", self.cap))
- def _got_data3((rc, out, err)):
+ def _got_data3(xxx_todo_changeme61):
+ (rc, out, err) = xxx_todo_changeme61
self.failUnlessEqual(rc, 0)
self.failUnlessEqual(out, data3)
d.addCallback(_got_data3)
fn1 = os.path.join(self.basedir, "data")
fileutil.write(fn1, data)
d = self.do_cli("put", "--format=SDMF", fn1)
- def _got_cap((rc, out, err)):
+ def _got_cap(xxx_todo_changeme62):
+ (rc, out, err) = xxx_todo_changeme62
self.failUnlessEqual(rc, 0)
self.cap = out.strip()
d.addCallback(_got_cap)
fileutil.write(fn2, data2)
d.addCallback(lambda ignored:
self.do_cli("put", fn2, self.cap))
- def _got_put((rc, out, err)):
+ def _got_put(xxx_todo_changeme63):
+ (rc, out, err) = xxx_todo_changeme63
self.failUnlessEqual(rc, 0)
self.failUnlessIn(self.cap, out)
d.addCallback(_got_put)
# Now get the cap. We should see the data we just put there.
d.addCallback(lambda ignored:
self.do_cli("get", self.cap))
- def _got_data((rc, out, err)):
+ def _got_data(xxx_todo_changeme64):
+ (rc, out, err) = xxx_todo_changeme64
self.failUnlessEqual(rc, 0)
self.failUnlessEqual(out, data2)
d.addCallback(_got_data)
self.basedir = "cli/Put/put_with_nonexistent_alias"
self.set_up_grid()
d = self.do_cli("put", "somefile", "fake:afile")
- def _check((rc, out, err)):
+ def _check(xxx_todo_changeme65):
+ (rc, out, err) = xxx_todo_changeme65
self.failUnlessReallyEqual(rc, 1)
self.failUnlessIn("error:", err)
self.failUnlessReallyEqual(out, "")
d.addCallback(lambda res:
self.do_cli("put", rel_fn.encode(get_io_encoding()), a_trier_arg))
- def _uploaded((rc, out, err)):
+ def _uploaded(xxx_todo_changeme66):
+ (rc, out, err) = xxx_todo_changeme66
readcap = out.strip()
self.failUnless(readcap.startswith("URI:LIT:"), readcap)
self.failUnlessIn("201 Created", err)
d.addCallback(lambda res:
self.do_cli("get", "tahoe:" + a_trier_arg))
- d.addCallback(lambda (rc, out, err):
- self.failUnlessReallyEqual(out, DATA))
+ d.addCallback(lambda rc_out_err12:
+ self.failUnlessReallyEqual(rc_out_err12[1], DATA))
return d
def test_generate_keypair(self):
d = self.do_cli("admin", "generate-keypair")
- def _done( (stdout, stderr) ):
+ def _done(xxx_todo_changeme67 ):
+ (stdout, stderr) = xxx_todo_changeme67
lines = [line.strip() for line in stdout.splitlines()]
privkey_bits = lines[0].split()
pubkey_bits = lines[1].split()
def test_derive_pubkey(self):
priv1,pub1 = keyutil.make_keypair()
d = self.do_cli("admin", "derive-pubkey", priv1)
- def _done( (stdout, stderr) ):
+ def _done(xxx_todo_changeme68 ):
+ (stdout, stderr) = xxx_todo_changeme68
lines = stdout.split("\n")
privkey_line = lines[0].strip()
pubkey_line = lines[1].strip()
d.addCallback(_stash_goodcap)
d.addCallback(lambda ign: self.rootnode.create_subdirectory(u"1share"))
d.addCallback(lambda n:
- self.delete_shares_numbered(n.get_uri(), range(1,10)))
+ self.delete_shares_numbered(n.get_uri(), list(range(1,10))))
d.addCallback(lambda ign: self.rootnode.create_subdirectory(u"0share"))
d.addCallback(lambda n:
- self.delete_shares_numbered(n.get_uri(), range(0,10)))
+ self.delete_shares_numbered(n.get_uri(), list(range(0,10))))
d.addCallback(lambda ign:
self.do_cli("add-alias", "tahoe", self.rooturi))
d.addCallback(lambda ign: self.do_cli("ls"))
- def _check1((rc,out,err)):
+ def _check1(xxx_todo_changeme69):
+ (rc,out,err) = xxx_todo_changeme69
if good_out is None:
self.failUnlessReallyEqual(rc, 1)
self.failUnlessIn("files whose names could not be converted", err)
self.failUnlessReallyEqual(sorted(out.splitlines()), sorted(["0share", "1share", good_out]))
d.addCallback(_check1)
d.addCallback(lambda ign: self.do_cli("ls", "missing"))
- def _check2((rc,out,err)):
+ def _check2(xxx_todo_changeme70):
+ (rc,out,err) = xxx_todo_changeme70
self.failIfEqual(rc, 0)
self.failUnlessReallyEqual(err.strip(), "No such file or directory")
self.failUnlessReallyEqual(out, "")
d.addCallback(_check2)
d.addCallback(lambda ign: self.do_cli("ls", "1share"))
- def _check3((rc,out,err)):
+ def _check3(xxx_todo_changeme71):
+ (rc,out,err) = xxx_todo_changeme71
self.failIfEqual(rc, 0)
self.failUnlessIn("Error during GET: 410 Gone", err)
self.failUnlessIn("UnrecoverableFileError:", err)
d.addCallback(_check3)
d.addCallback(lambda ign: self.do_cli("ls", "0share"))
d.addCallback(_check3)
- def _check4((rc, out, err)):
+ def _check4(xxx_todo_changeme72):
+ (rc, out, err) = xxx_todo_changeme72
if good_out is None:
self.failUnlessReallyEqual(rc, 1)
self.failUnlessIn("files whose names could not be converted", err)
d.addCallback(lambda ign: self.do_cli("ls", "-l", self.rooturi + ":./" + good_arg))
d.addCallback(_check4)
- def _check5((rc, out, err)):
+ def _check5(xxx_todo_changeme73):
# listing a raw filecap should not explode, but it will have no
# metadata, just the size
+ (rc, out, err) = xxx_todo_changeme73
self.failUnlessReallyEqual(rc, 0)
self.failUnlessReallyEqual("-r-- %d -" % len(small), out.strip())
d.addCallback(lambda ign: self.do_cli("ls", "-l", self.goodcap))
d.addCallback(lambda ign: self.rootnode.move_child_to(u"g\u00F6\u00F6d", self.rootnode, u"good"))
d.addCallback(lambda ign: self.do_cli("ls"))
- def _check1_ascii((rc,out,err)):
+ def _check1_ascii(xxx_todo_changeme74):
+ (rc,out,err) = xxx_todo_changeme74
self.failUnlessReallyEqual(rc, 0)
self.failUnlessReallyEqual(err, "")
self.failUnlessReallyEqual(sorted(out.splitlines()), sorted(["0share", "1share", "good"]))
d.addCallback(_check1_ascii)
- def _check4_ascii((rc, out, err)):
+ def _check4_ascii(xxx_todo_changeme75):
# listing a file (as dir/filename) should have the edge metadata,
# including the filename
+ (rc, out, err) = xxx_todo_changeme75
self.failUnlessReallyEqual(rc, 0)
self.failUnlessIn("good", out)
self.failIfIn("-r-- %d -" % len(small), out,
return self.rootnode.create_subdirectory(u"unknown", initial_children=kids,
mutable=False)
d.addCallback(_create_unknown)
- def _check6((rc, out, err)):
+ def _check6(xxx_todo_changeme76):
# listing a directory referencing an unknown object should print
# an extra message to stderr
+ (rc, out, err) = xxx_todo_changeme76
self.failUnlessReallyEqual(rc, 0)
self.failUnlessIn("?r-- ? - unknownchild-imm\n", out)
self.failUnlessIn("included unknown objects", err)
d.addCallback(lambda ign: self.do_cli("ls", "-l", "unknown"))
d.addCallback(_check6)
- def _check7((rc, out, err)):
+ def _check7(xxx_todo_changeme77):
# listing an unknown cap directly should print an extra message
# to stderr (currently this only works if the URI starts with 'URI:'
# after any 'ro.' or 'imm.' prefix, otherwise it will be confused
# with an alias).
+ (rc, out, err) = xxx_todo_changeme77
self.failUnlessReallyEqual(rc, 0)
self.failUnlessIn("?r-- ? -\n", out)
self.failUnlessIn("included unknown objects", err)
self.basedir = "cli/List/list_without_alias"
self.set_up_grid()
d = self.do_cli("ls")
- def _check((rc, out, err)):
+ def _check(xxx_todo_changeme78):
+ (rc, out, err) = xxx_todo_changeme78
self.failUnlessReallyEqual(rc, 1)
self.failUnlessIn("error:", err)
self.failUnlessReallyEqual(out, "")
self.basedir = "cli/List/list_with_nonexistent_alias"
self.set_up_grid()
d = self.do_cli("ls", "nonexistent:")
- def _check((rc, out, err)):
+ def _check(xxx_todo_changeme79):
+ (rc, out, err) = xxx_todo_changeme79
self.failUnlessReallyEqual(rc, 1)
self.failUnlessIn("error:", err)
self.failUnlessIn("nonexistent", err)
d3 = n.add_file(u"immutable", immutable_data)
ds = [d1, d2, d3]
dl = defer.DeferredList(ds)
- def _made_files((r1, r2, r3)):
+ def _made_files(xxx_todo_changeme38):
+ (r1, r2, r3) = xxx_todo_changeme38
self.failUnless(r1[0])
self.failUnless(r2[0])
self.failUnless(r3[0])
d = self._create_directory_structure()
d.addCallback(lambda ignored:
self.do_cli("ls", self._dircap))
- def _got_ls((rc, out, err)):
+ def _got_ls(xxx_todo_changeme80):
+ (rc, out, err) = xxx_todo_changeme80
self.failUnlessEqual(rc, 0)
self.failUnlessEqual(err, "")
self.failUnlessIn("immutable", out)
d = self._create_directory_structure()
d.addCallback(lambda ignored:
self.do_cli("ls", "--json", self._dircap))
- def _got_json((rc, out, err)):
+ def _got_json(xxx_todo_changeme81):
+ (rc, out, err) = xxx_todo_changeme81
self.failUnlessEqual(rc, 0)
self.failUnlessEqual(err, "")
self.failUnlessIn(self._mdmf_uri, out)
# (we should be able to rename files)
d.addCallback(lambda res:
self.do_cli("mv", "tahoe:file1", "tahoe:file3"))
- d.addCallback(lambda (rc, out, err):
- self.failUnlessIn("OK", out, "mv didn't rename a file"))
+ d.addCallback(lambda rc_out_err13:
+ self.failUnlessIn("OK", rc_out_err13[1], "mv didn't rename a file"))
# do mv file3 file2
# (This should succeed without issue)
d.addCallback(lambda res:
self.do_cli("mv", "tahoe:file3", "tahoe:file2"))
# Out should contain "OK" to show that the transfer worked.
- d.addCallback(lambda (rc,out,err):
- self.failUnlessIn("OK", out, "mv didn't output OK after mving"))
+ d.addCallback(lambda rc_out_err14:
+ self.failUnlessIn("OK", rc_out_err14[1], "mv didn't output OK after mving"))
# Next, make a remote directory.
d.addCallback(lambda res:
# client should support this)
d.addCallback(lambda res:
self.do_cli("mv", "tahoe:file2", "tahoe:directory"))
- d.addCallback(lambda (rc, out, err):
+ d.addCallback(lambda rc_out_err15:
self.failUnlessIn(
- "Error: You can't overwrite a directory with a file", err,
+ "Error: You can't overwrite a directory with a file", rc_out_err15[2],
"mv shouldn't overwrite directories" ))
# mv file2 directory/
d.addCallback(lambda res:
self.do_cli("mv", "tahoe:file2", "tahoe:directory/"))
# We should see an "OK"...
- d.addCallback(lambda (rc, out, err):
- self.failUnlessIn("OK", out,
+ d.addCallback(lambda rc_out_err16:
+ self.failUnlessIn("OK", rc_out_err16[1],
"mv didn't mv a file into a directory"))
# ... and be able to GET the file
d.addCallback(lambda res:
self.do_cli("get", "tahoe:directory/file2", self.basedir + "new"))
- d.addCallback(lambda (rc, out, err):
+ d.addCallback(lambda rc_out_err17:
self.failUnless(os.path.exists(self.basedir + "new"),
"mv didn't write the destination file"))
# ... and not find the file where it was before.
d.addCallback(lambda res:
self.do_cli("get", "tahoe:file2", "file2"))
- d.addCallback(lambda (rc, out, err):
- self.failUnlessIn("404", err,
+ d.addCallback(lambda rc_out_err18:
+ self.failUnlessIn("404", rc_out_err18[2],
"mv left the source file intact"))
# Let's build:
# We should have just some_file in tahoe:directory3
d.addCallback(lambda res:
self.do_cli("get", "tahoe:directory3/some_file", "some_file"))
- d.addCallback(lambda (rc, out, err):
- self.failUnless("404" not in err,
+ d.addCallback(lambda rc_out_err19:
+ self.failUnless("404" not in rc_out_err19[2],
"mv didn't handle nested directories correctly"))
d.addCallback(lambda res:
self.do_cli("get", "tahoe:directory3/directory", "directory"))
- d.addCallback(lambda (rc, out, err):
- self.failUnlessIn("404", err,
+ d.addCallback(lambda rc_out_err20:
+ self.failUnlessIn("404", rc_out_err20[2],
"mv moved the wrong thing"))
return d
# do mv file1 file2
d.addCallback(lambda res:
self.do_cli("mv", "tahoe:file1", "tahoe:file2"))
- def _check( (rc, out, err) ):
+ def _check(xxx_todo_changeme82 ):
+ (rc, out, err) = xxx_todo_changeme82
self.failIfIn("OK", out, "mv printed 'OK' even though the DELETE failed")
self.failUnlessEqual(rc, 2)
d.addCallback(_check)
self.basedir = "cli/Mv/mv_without_alias"
self.set_up_grid()
d = self.do_cli("mv", "afile", "anotherfile")
- def _check((rc, out, err)):
+ def _check(xxx_todo_changeme83):
+ (rc, out, err) = xxx_todo_changeme83
self.failUnlessReallyEqual(rc, 1)
self.failUnlessIn("error:", err)
self.failUnlessReallyEqual(out, "")
self.basedir = "cli/Mv/mv_with_nonexistent_alias"
self.set_up_grid()
d = self.do_cli("mv", "fake:afile", "fake:anotherfile")
- def _check((rc, out, err)):
+ def _check(xxx_todo_changeme84):
+ (rc, out, err) = xxx_todo_changeme84
self.failUnlessReallyEqual(rc, 1)
self.failUnlessIn("error:", err)
self.failUnlessIn("fake", err)
d.addCallback(lambda res: self.do_cli("cp", fn1_arg, "tahoe:"))
d.addCallback(lambda res: self.do_cli("get", "tahoe:" + artonwall_arg))
- d.addCallback(lambda (rc,out,err): self.failUnlessReallyEqual(out, DATA1))
+ d.addCallback(lambda rc_out_err21: self.failUnlessReallyEqual(rc_out_err21[1], DATA1))
d.addCallback(lambda res: self.do_cli("cp", fn2, "tahoe:"))
d.addCallback(lambda res: self.do_cli("get", "tahoe:Metallica"))
- d.addCallback(lambda (rc,out,err): self.failUnlessReallyEqual(out, DATA2))
+ d.addCallback(lambda rc_out_err22: self.failUnlessReallyEqual(rc_out_err22[1], DATA2))
d.addCallback(lambda res: self.do_cli("ls", "tahoe:"))
- def _check((rc, out, err)):
+ def _check(xxx_todo_changeme85):
+ (rc, out, err) = xxx_todo_changeme85
try:
unicode_to_output(u"\u00C4rtonwall")
except UnicodeEncodeError:
d = self.do_cli("create-alias", "tahoe")
d.addCallback(lambda ign: self.do_cli("put", fn1))
- def _put_file((rc, out, err)):
+ def _put_file(xxx_todo_changeme86):
+ (rc, out, err) = xxx_todo_changeme86
self.failUnlessReallyEqual(rc, 0)
self.failUnlessIn("200 OK", err)
# keep track of the filecap
# Let's try copying this to the disk using the filecap
# cp FILECAP filename
d.addCallback(lambda ign: self.do_cli("cp", self.filecap, fn2))
- def _copy_file((rc, out, err)):
+ def _copy_file(xxx_todo_changeme87):
+ (rc, out, err) = xxx_todo_changeme87
self.failUnlessReallyEqual(rc, 0)
results = fileutil.read(fn2)
self.failUnlessReallyEqual(results, DATA1)
# Test with ./ (see #761)
# cp FILECAP localdir
d.addCallback(lambda ign: self.do_cli("cp", self.filecap, outdir))
- def _resp((rc, out, err)):
+ def _resp(xxx_todo_changeme88):
+ (rc, out, err) = xxx_todo_changeme88
self.failUnlessReallyEqual(rc, 1)
self.failUnlessIn("error: you must specify a destination filename",
err)
# Create a directory, linked at tahoe:test
d.addCallback(lambda ign: self.do_cli("mkdir", "tahoe:test"))
- def _get_dir((rc, out, err)):
+ def _get_dir(xxx_todo_changeme89):
+ (rc, out, err) = xxx_todo_changeme89
self.failUnlessReallyEqual(rc, 0)
self.dircap = out.strip()
d.addCallback(_get_dir)
# Upload a file to the directory
d.addCallback(lambda ign:
self.do_cli("put", fn1, "tahoe:test/test_file"))
- d.addCallback(lambda (rc, out, err): self.failUnlessReallyEqual(rc, 0))
+ d.addCallback(lambda rc_out_err23: self.failUnlessReallyEqual(rc_out_err23[0], 0))
# cp DIRCAP/filename localdir
d.addCallback(lambda ign:
self.do_cli("cp", self.dircap + "/test_file", outdir))
- def _get_resp((rc, out, err)):
+ def _get_resp(xxx_todo_changeme90):
+ (rc, out, err) = xxx_todo_changeme90
self.failUnlessReallyEqual(rc, 0)
results = fileutil.read(os.path.join(outdir, "test_file"))
self.failUnlessReallyEqual(results, DATA1)
# cp -r DIRCAP/filename filename2
d.addCallback(lambda ign:
self.do_cli("cp", self.dircap + "/test_file", fn3))
- def _get_resp2((rc, out, err)):
+ def _get_resp2(xxx_todo_changeme91):
+ (rc, out, err) = xxx_todo_changeme91
self.failUnlessReallyEqual(rc, 0)
results = fileutil.read(fn3)
self.failUnlessReallyEqual(results, DATA1)
# cp --verbose filename3 dircap:test_file
d.addCallback(lambda ign:
self.do_cli("cp", "--verbose", '--recursive', self.basedir, self.dircap))
- def _test_for_wrong_indices((rc, out, err)):
+ def _test_for_wrong_indices(xxx_todo_changeme92):
+ (rc, out, err) = xxx_todo_changeme92
self.failUnless('examining 1 of 1\n' in err)
d.addCallback(_test_for_wrong_indices)
return d
self.basedir = "cli/Cp/cp_with_nonexistent_alias"
self.set_up_grid()
d = self.do_cli("cp", "fake:file1", "fake:file2")
- def _check((rc, out, err)):
+ def _check(xxx_todo_changeme93):
+ (rc, out, err) = xxx_todo_changeme93
self.failUnlessReallyEqual(rc, 1)
self.failUnlessIn("error:", err)
d.addCallback(_check)
d.addCallback(lambda res: self.do_cli("mkdir", "tahoe:test/" + artonwall_arg))
d.addCallback(lambda res: self.do_cli("cp", "-r", "tahoe:test", "tahoe:test2"))
d.addCallback(lambda res: self.do_cli("ls", "tahoe:test2"))
- def _check((rc, out, err)):
+ def _check(xxx_todo_changeme94):
+ (rc, out, err) = xxx_todo_changeme94
try:
unicode_to_output(u"\u00C4rtonwall")
except UnicodeEncodeError:
self.do_cli("put", "--mutable", test_txt_path, "tahoe:test/test.txt"))
d.addCallback(lambda ignored:
self.do_cli("get", "tahoe:test/test.txt"))
- def _check((rc, out, err)):
+ def _check(xxx_todo_changeme95):
+ (rc, out, err) = xxx_todo_changeme95
self.failUnlessEqual(rc, 0)
self.failUnlessEqual(out, test_txt_contents)
d.addCallback(_check)
# file we've just uploaded.
d.addCallback(lambda ignored:
self.do_cli("ls", "--json", "tahoe:test/test.txt"))
- def _get_test_txt_uris((rc, out, err)):
+ def _get_test_txt_uris(xxx_todo_changeme96):
+ (rc, out, err) = xxx_todo_changeme96
self.failUnlessEqual(rc, 0)
filetype, data = simplejson.loads(out)
# If we get test.txt now, we should see the new data.
d.addCallback(lambda ignored:
self.do_cli("get", "tahoe:test/test.txt"))
- d.addCallback(lambda (rc, out, err):
- self.failUnlessEqual(out, new_txt_contents))
+ d.addCallback(lambda rc_out_err24:
+ self.failUnlessEqual(rc_out_err24[1], new_txt_contents))
# If we get the json of the new file, we should see that the old
# uri is there
d.addCallback(lambda ignored:
self.do_cli("ls", "--json", "tahoe:test/test.txt"))
- def _check_json((rc, out, err)):
+ def _check_json(xxx_todo_changeme97):
+ (rc, out, err) = xxx_todo_changeme97
self.failUnlessEqual(rc, 0)
filetype, data = simplejson.loads(out)
# should give us the new contents.
d.addCallback(lambda ignored:
self.do_cli("get", self.rw_uri))
- d.addCallback(lambda (rc, out, err):
- self.failUnlessEqual(out, new_txt_contents))
+ d.addCallback(lambda rc_out_err25:
+ self.failUnlessEqual(rc_out_err25[1], new_txt_contents))
# Now copy the old test.txt without an explicit destination
# file. tahoe cp will match it to the existing file and
# overwrite it appropriately.
self.do_cli("cp", test_txt_path, "tahoe:test"))
d.addCallback(lambda ignored:
self.do_cli("get", "tahoe:test/test.txt"))
- d.addCallback(lambda (rc, out, err):
- self.failUnlessEqual(out, test_txt_contents))
+ d.addCallback(lambda rc_out_err26:
+ self.failUnlessEqual(rc_out_err26[1], test_txt_contents))
d.addCallback(lambda ignored:
self.do_cli("ls", "--json", "tahoe:test/test.txt"))
d.addCallback(_check_json)
d.addCallback(lambda ignored:
self.do_cli("get", self.rw_uri))
- d.addCallback(lambda (rc, out, err):
- self.failUnlessEqual(out, test_txt_contents))
+ d.addCallback(lambda rc_out_err27:
+ self.failUnlessEqual(rc_out_err27[1], test_txt_contents))
# Now we'll make a more complicated directory structure.
# test2/
self.do_cli("put", imm_test_txt_path, "tahoe:test2/imm2"))
d.addCallback(lambda ignored:
self.do_cli("ls", "--json", "tahoe:test2"))
- def _process_directory_json((rc, out, err)):
+ def _process_directory_json(xxx_todo_changeme98):
+ (rc, out, err) = xxx_todo_changeme98
self.failUnlessEqual(rc, 0)
filetype, data = simplejson.loads(out)
# We expect that mutable1 and mutable2 are overwritten in-place,
# so they'll retain their URIs but have different content.
- def _process_file_json((rc, out, err), fn):
+ def _process_file_json(xxx_todo_changeme99, fn):
+ (rc, out, err) = xxx_todo_changeme99
self.failUnlessEqual(rc, 0)
filetype, data = simplejson.loads(out)
self.failUnlessEqual(filetype, "filenode")
for fn in ("mutable1", "mutable2"):
d.addCallback(lambda ignored, fn=fn:
self.do_cli("get", "tahoe:test2/%s" % fn))
- d.addCallback(lambda (rc, out, err), fn=fn:
- self.failUnlessEqual(out, fn * 1000))
+ d.addCallback(lambda rc_and_out_and_err, fn=fn:
+ self.failUnlessEqual(rc_and_out_and_err[1], fn * 1000))
d.addCallback(lambda ignored, fn=fn:
self.do_cli("ls", "--json", "tahoe:test2/%s" % fn))
d.addCallback(_process_file_json, fn=fn)
# should be different.
d.addCallback(lambda ignored:
self.do_cli("get", "tahoe:test2/imm1"))
- d.addCallback(lambda (rc, out, err):
- self.failUnlessEqual(out, "imm1" * 1000))
+ d.addCallback(lambda rc_out_err28:
+ self.failUnlessEqual(rc_out_err28[1], "imm1" * 1000))
d.addCallback(lambda ignored:
self.do_cli("ls", "--json", "tahoe:test2/imm1"))
d.addCallback(_process_file_json, fn="imm1")
# imm3 should have been created.
d.addCallback(lambda ignored:
self.do_cli("get", "tahoe:test2/imm3"))
- d.addCallback(lambda (rc, out, err):
- self.failUnlessEqual(out, "imm3" * 1000))
+ d.addCallback(lambda rc_out_err29:
+ self.failUnlessEqual(rc_out_err29[1], "imm3" * 1000))
# imm2 should be exactly as we left it, since our newly-copied
# directory didn't contain an imm2 entry.
d.addCallback(lambda ignored:
self.do_cli("get", "tahoe:test2/imm2"))
- d.addCallback(lambda (rc, out, err):
- self.failUnlessEqual(out, imm_test_txt_contents))
+ d.addCallback(lambda rc_out_err30:
+ self.failUnlessEqual(rc_out_err30[1], imm_test_txt_contents))
d.addCallback(lambda ignored:
self.do_cli("ls", "--json", "tahoe:test2/imm2"))
- def _process_imm2_json((rc, out, err)):
+ def _process_imm2_json(xxx_todo_changeme100):
+ (rc, out, err) = xxx_todo_changeme100
self.failUnlessEqual(rc, 0)
filetype, data = simplejson.loads(out)
self.failUnlessEqual(filetype, "filenode")
d = self.do_cli("create-alias", "tahoe:")
d.addCallback(lambda ignored:
self.do_cli("put", "--mutable", test_file_path))
- def _get_test_uri((rc, out, err)):
+ def _get_test_uri(xxx_todo_changeme101):
+ (rc, out, err) = xxx_todo_changeme101
self.failUnlessEqual(rc, 0)
# this should be a write uri
self._test_write_uri = out
d.addCallback(_get_test_uri)
d.addCallback(lambda ignored:
self.do_cli("ls", "--json", self._test_write_uri))
- def _process_test_json((rc, out, err)):
+ def _process_test_json(xxx_todo_changeme102):
+ (rc, out, err) = xxx_todo_changeme102
self.failUnlessEqual(rc, 0)
filetype, data = simplejson.loads(out)
# Now we'll link the readonly URI into the tahoe: alias.
d.addCallback(lambda ignored:
self.do_cli("ln", self._test_read_uri, "tahoe:test_file.txt"))
- d.addCallback(lambda (rc, out, err):
- self.failUnlessEqual(rc, 0))
+ d.addCallback(lambda rc_out_err31:
+ self.failUnlessEqual(rc_out_err31[0], 0))
# Let's grab the json of that to make sure that we did it right.
d.addCallback(lambda ignored:
self.do_cli("ls", "--json", "tahoe:"))
- def _process_tahoe_json((rc, out, err)):
+ def _process_tahoe_json(xxx_todo_changeme103):
+ (rc, out, err) = xxx_todo_changeme103
self.failUnlessEqual(rc, 0)
filetype, data = simplejson.loads(out)
# place of that one. We should get an error.
d.addCallback(lambda ignored:
self.do_cli("cp", replacement_file_path, "tahoe:test_file.txt"))
- def _check_error_message((rc, out, err)):
+ def _check_error_message(xxx_todo_changeme104):
+ (rc, out, err) = xxx_todo_changeme104
self.failUnlessEqual(rc, 1)
self.failUnlessIn("replace or update requested with read-only cap", err)
d.addCallback(_check_error_message)
# Make extra sure that that didn't work.
d.addCallback(lambda ignored:
self.do_cli("get", "tahoe:test_file.txt"))
- d.addCallback(lambda (rc, out, err):
- self.failUnlessEqual(out, test_file_contents))
+ d.addCallback(lambda rc_out_err32:
+ self.failUnlessEqual(rc_out_err32[1], test_file_contents))
d.addCallback(lambda ignored:
self.do_cli("get", self._test_read_uri))
- d.addCallback(lambda (rc, out, err):
- self.failUnlessEqual(out, test_file_contents))
+ d.addCallback(lambda rc_out_err33:
+ self.failUnlessEqual(rc_out_err33[1], test_file_contents))
# Now we'll do it without an explicit destination.
d.addCallback(lambda ignored:
self.do_cli("cp", test_file_path, "tahoe:"))
d.addCallback(_check_error_message)
d.addCallback(lambda ignored:
self.do_cli("get", "tahoe:test_file.txt"))
- d.addCallback(lambda (rc, out, err):
- self.failUnlessEqual(out, test_file_contents))
+ d.addCallback(lambda rc_out_err34:
+ self.failUnlessEqual(rc_out_err34[1], test_file_contents))
d.addCallback(lambda ignored:
self.do_cli("get", self._test_read_uri))
- d.addCallback(lambda (rc, out, err):
- self.failUnlessEqual(out, test_file_contents))
+ d.addCallback(lambda rc_out_err35:
+ self.failUnlessEqual(rc_out_err35[1], test_file_contents))
# Now we'll link a readonly file into a subdirectory.
d.addCallback(lambda ignored:
self.do_cli("mkdir", "tahoe:testdir"))
- d.addCallback(lambda (rc, out, err):
- self.failUnlessEqual(rc, 0))
+ d.addCallback(lambda rc_out_err36:
+ self.failUnlessEqual(rc_out_err36[0], 0))
d.addCallback(lambda ignored:
self.do_cli("ln", self._test_read_uri, "tahoe:test/file2.txt"))
- d.addCallback(lambda (rc, out, err):
- self.failUnlessEqual(rc, 0))
+ d.addCallback(lambda rc_out_err37:
+ self.failUnlessEqual(rc_out_err37[0], 0))
test_dir_path = os.path.join(self.basedir, "test")
fileutil.make_dirs(test_dir_path)
d.addCallback(_check_error_message)
d.addCallback(lambda ignored:
self.do_cli("ls", "--json", "tahoe:test"))
- def _got_testdir_json((rc, out, err)):
+ def _got_testdir_json(xxx_todo_changeme105):
+ (rc, out, err) = xxx_todo_changeme105
self.failUnlessEqual(rc, 0)
filetype, data = simplejson.loads(out)
d = self.do_cli("create-alias", "tahoe")
d.addCallback(lambda res: do_backup())
- def _check0((rc, out, err)):
+ def _check0(xxx_todo_changeme106):
+ (rc, out, err) = xxx_todo_changeme106
self.failUnlessReallyEqual(err, "")
self.failUnlessReallyEqual(rc, 0)
fu, fr, fs, dc, dr, ds = self.count_output(out)
d.addCallback(_check0)
d.addCallback(lambda res: self.do_cli("ls", "--uri", "tahoe:backups"))
- def _check1((rc, out, err)):
+ def _check1(xxx_todo_changeme107):
+ (rc, out, err) = xxx_todo_changeme107
self.failUnlessReallyEqual(err, "")
self.failUnlessReallyEqual(rc, 0)
lines = out.split("\n")
self.failUnlessReallyEqual(sorted(childnames), ["Archives", "Latest"])
d.addCallback(_check1)
d.addCallback(lambda res: self.do_cli("ls", "tahoe:backups/Latest"))
- def _check2((rc, out, err)):
+ def _check2(xxx_todo_changeme108):
+ (rc, out, err) = xxx_todo_changeme108
self.failUnlessReallyEqual(err, "")
self.failUnlessReallyEqual(rc, 0)
self.failUnlessReallyEqual(sorted(out.split()), ["empty", "parent"])
d.addCallback(_check2)
d.addCallback(lambda res: self.do_cli("ls", "tahoe:backups/Latest/empty"))
- def _check2a((rc, out, err)):
+ def _check2a(xxx_todo_changeme109):
+ (rc, out, err) = xxx_todo_changeme109
self.failUnlessReallyEqual(err, "")
self.failUnlessReallyEqual(rc, 0)
self.failUnlessReallyEqual(out.strip(), "")
d.addCallback(_check2a)
d.addCallback(lambda res: self.do_cli("get", "tahoe:backups/Latest/parent/subdir/foo.txt"))
- def _check3((rc, out, err)):
+ def _check3(xxx_todo_changeme110):
+ (rc, out, err) = xxx_todo_changeme110
self.failUnlessReallyEqual(err, "")
self.failUnlessReallyEqual(rc, 0)
self.failUnlessReallyEqual(out, "foo")
d.addCallback(_check3)
d.addCallback(lambda res: self.do_cli("ls", "tahoe:backups/Archives"))
- def _check4((rc, out, err)):
+ def _check4(xxx_todo_changeme111):
+ (rc, out, err) = xxx_todo_changeme111
self.failUnlessReallyEqual(err, "")
self.failUnlessReallyEqual(rc, 0)
self.old_archives = out.split()
d.addCallback(self.stall, 1.1)
d.addCallback(lambda res: do_backup())
- def _check4a((rc, out, err)):
+ def _check4a(xxx_todo_changeme112):
# second backup should reuse everything, if the backupdb is
# available
+ (rc, out, err) = xxx_todo_changeme112
self.failUnlessReallyEqual(err, "")
self.failUnlessReallyEqual(rc, 0)
fu, fr, fs, dc, dr, ds = self.count_output(out)
d.addCallback(self.stall, 1.1)
d.addCallback(lambda res: do_backup(verbose=True))
- def _check4b((rc, out, err)):
+ def _check4b(xxx_todo_changeme113):
# we should check all files, and re-use all of them. None of
# the directories should have been changed, so we should
# re-use all of them too.
+ (rc, out, err) = xxx_todo_changeme113
self.failUnlessReallyEqual(err, "")
self.failUnlessReallyEqual(rc, 0)
fu, fr, fs, dc, dr, ds = self.count_output(out)
d.addCallback(_check4b)
d.addCallback(lambda res: self.do_cli("ls", "tahoe:backups/Archives"))
- def _check5((rc, out, err)):
+ def _check5(xxx_todo_changeme114):
+ (rc, out, err) = xxx_todo_changeme114
self.failUnlessReallyEqual(err, "")
self.failUnlessReallyEqual(rc, 0)
self.new_archives = out.split()
self.writeto("empty", "imagine nothing being here")
return do_backup()
d.addCallback(_modify)
- def _check5a((rc, out, err)):
+ def _check5a(xxx_todo_changeme115):
# second backup should reuse bar.txt (if backupdb is available),
# and upload the rest. None of the directories can be reused.
+ (rc, out, err) = xxx_todo_changeme115
self.failUnlessReallyEqual(err, "")
self.failUnlessReallyEqual(rc, 0)
fu, fr, fs, dc, dr, ds = self.count_output(out)
self.failUnlessReallyEqual(ds, 0)
d.addCallback(_check5a)
d.addCallback(lambda res: self.do_cli("ls", "tahoe:backups/Archives"))
- def _check6((rc, out, err)):
+ def _check6(xxx_todo_changeme116):
+ (rc, out, err) = xxx_todo_changeme116
self.failUnlessReallyEqual(err, "")
self.failUnlessReallyEqual(rc, 0)
self.new_archives = out.split()
self.old_archives[0])
d.addCallback(_check6)
d.addCallback(lambda res: self.do_cli("get", "tahoe:backups/Latest/parent/subdir/foo.txt"))
- def _check7((rc, out, err)):
+ def _check7(xxx_todo_changeme117):
+ (rc, out, err) = xxx_todo_changeme117
self.failUnlessReallyEqual(err, "")
self.failUnlessReallyEqual(rc, 0)
self.failUnlessReallyEqual(out, "FOOF!")
# the old snapshot should not be modified
return self.do_cli("get", "tahoe:backups/Archives/%s/parent/subdir/foo.txt" % self.old_archives[0])
d.addCallback(_check7)
- def _check8((rc, out, err)):
+ def _check8(xxx_todo_changeme118):
+ (rc, out, err) = xxx_todo_changeme118
self.failUnlessReallyEqual(err, "")
self.failUnlessReallyEqual(rc, 0)
self.failUnlessReallyEqual(out, "foo")
d = self.do_cli("create-alias", "tahoe")
d.addCallback(lambda res: self.do_cli("backup", "--verbose", source, "tahoe:test"))
- def _check((rc, out, err)):
+ def _check(xxx_todo_changeme119):
+ (rc, out, err) = xxx_todo_changeme119
self.failUnlessReallyEqual(rc, 2)
foo2 = os.path.join(source, "foo2.txt")
self.failUnlessReallyEqual(err, "WARNING: cannot backup symlink '%s'\n" % foo2)
d = self.do_cli("create-alias", "tahoe")
d.addCallback(lambda res: self.do_cli("backup", source, "tahoe:test"))
- def _check((rc, out, err)):
+ def _check(xxx_todo_changeme120):
+ (rc, out, err) = xxx_todo_changeme120
self.failUnlessReallyEqual(rc, 2)
self.failUnlessReallyEqual(err, "WARNING: permission denied on file %s\n" % os.path.join(source, "foo.txt"))
# This is necessary for the temp files to be correctly removed
def _cleanup(self):
- os.chmod(os.path.join(source, "foo.txt"), 0644)
+ os.chmod(os.path.join(source, "foo.txt"), 0o644)
d.addCallback(_cleanup)
d.addErrback(_cleanup)
d = self.do_cli("create-alias", "tahoe")
d.addCallback(lambda res: self.do_cli("backup", source, "tahoe:test"))
- def _check((rc, out, err)):
+ def _check(xxx_todo_changeme121):
+ (rc, out, err) = xxx_todo_changeme121
self.failUnlessReallyEqual(rc, 2)
self.failUnlessReallyEqual(err, "WARNING: permission denied on directory %s\n" % os.path.join(source, "test"))
# This is necessary for the temp files to be correctly removed
def _cleanup(self):
- os.chmod(os.path.join(source, "test"), 0655)
+ os.chmod(os.path.join(source, "test"), 0o655)
d.addCallback(_cleanup)
d.addErrback(_cleanup)
return d
self.set_up_grid()
source = os.path.join(self.basedir, "file1")
d = self.do_cli('backup', source, source)
- def _check((rc, out, err)):
+ def _check(xxx_todo_changeme122):
+ (rc, out, err) = xxx_todo_changeme122
self.failUnlessReallyEqual(rc, 1)
self.failUnlessIn("error:", err)
self.failUnlessReallyEqual(out, "")
self.set_up_grid()
source = os.path.join(self.basedir, "file1")
d = self.do_cli("backup", source, "nonexistent:" + source)
- def _check((rc, out, err)):
+ def _check(xxx_todo_changeme123):
+ (rc, out, err) = xxx_todo_changeme123
self.failUnlessReallyEqual(rc, 1)
self.failUnlessIn("error:", err)
self.failUnlessIn("nonexistent", err)
d.addCallback(_stash_uri)
d.addCallback(lambda ign: self.do_cli("check", self.uri))
- def _check1((rc, out, err)):
+ def _check1(xxx_todo_changeme124):
+ (rc, out, err) = xxx_todo_changeme124
self.failUnlessReallyEqual(err, "")
self.failUnlessReallyEqual(rc, 0)
lines = out.splitlines()
d.addCallback(_check1)
d.addCallback(lambda ign: self.do_cli("check", "--raw", self.uri))
- def _check2((rc, out, err)):
+ def _check2(xxx_todo_changeme125):
+ (rc, out, err) = xxx_todo_changeme125
self.failUnlessReallyEqual(err, "")
self.failUnlessReallyEqual(rc, 0)
data = simplejson.loads(out)
d.addCallback(_stash_lit_uri)
d.addCallback(lambda ign: self.do_cli("check", self.lit_uri))
- def _check_lit((rc, out, err)):
+ def _check_lit(xxx_todo_changeme126):
+ (rc, out, err) = xxx_todo_changeme126
self.failUnlessReallyEqual(err, "")
self.failUnlessReallyEqual(rc, 0)
lines = out.splitlines()
d.addCallback(_check_lit)
d.addCallback(lambda ign: self.do_cli("check", "--raw", self.lit_uri))
- def _check_lit_raw((rc, out, err)):
+ def _check_lit_raw(xxx_todo_changeme127):
+ (rc, out, err) = xxx_todo_changeme127
self.failUnlessReallyEqual(err, "")
self.failUnlessReallyEqual(rc, 0)
data = simplejson.loads(out)
d.addCallback(_clobber_shares)
d.addCallback(lambda ign: self.do_cli("check", "--verify", self.uri))
- def _check3((rc, out, err)):
+ def _check3(xxx_todo_changeme128):
+ (rc, out, err) = xxx_todo_changeme128
self.failUnlessReallyEqual(err, "")
self.failUnlessReallyEqual(rc, 0)
lines = out.splitlines()
d.addCallback(_check3)
d.addCallback(lambda ign: self.do_cli("check", "--verify", "--raw", self.uri))
- def _check3_raw((rc, out, err)):
+ def _check3_raw(xxx_todo_changeme129):
+ (rc, out, err) = xxx_todo_changeme129
self.failUnlessReallyEqual(err, "")
self.failUnlessReallyEqual(rc, 0)
data = simplejson.loads(out)
d.addCallback(lambda ign:
self.do_cli("check", "--verify", "--repair", self.uri))
- def _check4((rc, out, err)):
+ def _check4(xxx_todo_changeme130):
+ (rc, out, err) = xxx_todo_changeme130
self.failUnlessReallyEqual(err, "")
self.failUnlessReallyEqual(rc, 0)
lines = out.splitlines()
d.addCallback(lambda ign:
self.do_cli("check", "--verify", "--repair", self.uri))
- def _check5((rc, out, err)):
+ def _check5(xxx_todo_changeme131):
+ (rc, out, err) = xxx_todo_changeme131
self.failUnlessReallyEqual(err, "")
self.failUnlessReallyEqual(rc, 0)
lines = out.splitlines()
d.addCallback(_stash_uri, "mutable")
d.addCallback(lambda ign: self.do_cli("deep-check", self.rooturi))
- def _check1((rc, out, err)):
+ def _check1(xxx_todo_changeme132):
+ (rc, out, err) = xxx_todo_changeme132
self.failUnlessReallyEqual(err, "")
self.failUnlessReallyEqual(rc, 0)
lines = out.splitlines()
d.addCallback(lambda ign: self.do_cli("deep-check", "--verbose",
self.rooturi))
- def _check2((rc, out, err)):
+ def _check2(xxx_todo_changeme133):
+ (rc, out, err) = xxx_todo_changeme133
self.failUnlessReallyEqual(err, "")
self.failUnlessReallyEqual(rc, 0)
lines = out.splitlines()
d.addCallback(_check2)
d.addCallback(lambda ign: self.do_cli("stats", self.rooturi))
- def _check_stats((rc, out, err)):
+ def _check_stats(xxx_todo_changeme134):
+ (rc, out, err) = xxx_todo_changeme134
self.failUnlessReallyEqual(err, "")
self.failUnlessReallyEqual(rc, 0)
lines = out.splitlines()
d.addCallback(lambda ign:
self.do_cli("deep-check", "--verbose", self.rooturi))
- def _check3((rc, out, err)):
+ def _check3(xxx_todo_changeme135):
+ (rc, out, err) = xxx_todo_changeme135
self.failUnlessReallyEqual(err, "")
self.failUnlessReallyEqual(rc, 0)
lines = out.splitlines()
d.addCallback(lambda ign:
self.do_cli("deep-check", "--verbose", "--verify",
self.rooturi))
- def _check4((rc, out, err)):
+ def _check4(xxx_todo_changeme136):
+ (rc, out, err) = xxx_todo_changeme136
self.failUnlessReallyEqual(err, "")
self.failUnlessReallyEqual(rc, 0)
lines = out.splitlines()
d.addCallback(lambda ign:
self.do_cli("deep-check", "--raw",
self.rooturi))
- def _check5((rc, out, err)):
+ def _check5(xxx_todo_changeme137):
+ (rc, out, err) = xxx_todo_changeme137
self.failUnlessReallyEqual(err, "")
self.failUnlessReallyEqual(rc, 0)
lines = out.splitlines()
self.do_cli("deep-check",
"--verbose", "--verify", "--repair",
self.rooturi))
- def _check6((rc, out, err)):
+ def _check6(xxx_todo_changeme138):
+ (rc, out, err) = xxx_todo_changeme138
self.failUnlessReallyEqual(err, "")
self.failUnlessReallyEqual(rc, 0)
lines = out.splitlines()
fn.add_file(u"subfile", upload.Data(DATA+"2", "")))
d.addCallback(lambda ign:
self.delete_shares_numbered(self.uris["subdir"],
- range(10)))
+ list(range(10))))
# root
# rootg\u00F6\u00F6d/
# root/subfile
d.addCallback(lambda ign: self.do_cli("manifest", self.rooturi))
- def _manifest_failed((rc, out, err)):
+ def _manifest_failed(xxx_todo_changeme139):
+ (rc, out, err) = xxx_todo_changeme139
self.failIfEqual(rc, 0)
self.failUnlessIn("ERROR: UnrecoverableFileError", err)
# the fatal directory should still show up, as the last line
d.addCallback(_manifest_failed)
d.addCallback(lambda ign: self.do_cli("deep-check", self.rooturi))
- def _deep_check_failed((rc, out, err)):
+ def _deep_check_failed(xxx_todo_changeme140):
+ (rc, out, err) = xxx_todo_changeme140
self.failIfEqual(rc, 0)
self.failUnlessIn("ERROR: UnrecoverableFileError", err)
# we want to make sure that the error indication is the last
self.basedir = "cli/Check/check_without_alias"
self.set_up_grid()
d = self.do_cli("check")
- def _check((rc, out, err)):
+ def _check(xxx_todo_changeme141):
+ (rc, out, err) = xxx_todo_changeme141
self.failUnlessReallyEqual(rc, 1)
self.failUnlessIn("error:", err)
self.failUnlessReallyEqual(out, "")
self.basedir = "cli/Check/check_with_nonexistent_alias"
self.set_up_grid()
d = self.do_cli("check", "nonexistent:")
- def _check((rc, out, err)):
+ def _check(xxx_todo_changeme142):
+ (rc, out, err) = xxx_todo_changeme142
self.failUnlessReallyEqual(rc, 1)
self.failUnlessIn("error:", err)
self.failUnlessIn("nonexistent", err)
d.addCallback(_stash_uri)
d = c0.create_dirnode()
d.addCallback(_stash_uri)
-
+
d.addCallback(lambda ign: self.do_cli("check", self.uriList[0], self.uriList[1]))
- def _check((rc, out, err)):
+ def _check(xxx_todo_changeme143):
+ (rc, out, err) = xxx_todo_changeme143
self.failUnlessReallyEqual(rc, 0)
self.failUnlessReallyEqual(err, "")
#Ensure healthy appears for each uri
- self.failUnlessIn("Healthy", out[:len(out)/2])
- self.failUnlessIn("Healthy", out[len(out)/2:])
+ self.failUnlessIn("Healthy", out[:len(out)//2])
+ self.failUnlessIn("Healthy", out[len(out)//2:])
d.addCallback(_check)
-
+
d.addCallback(lambda ign: self.do_cli("check", self.uriList[0], "nonexistent:"))
- def _check2((rc, out, err)):
+ def _check2(xxx_todo_changeme144):
+ (rc, out, err) = xxx_todo_changeme144
self.failUnlessReallyEqual(rc, 1)
self.failUnlessIn("Healthy", out)
self.failUnlessIn("error:", err)
self.failUnlessIn("nonexistent", err)
d.addCallback(_check2)
-
+
return d
d = c0.upload(upload.Data(DATA, convergence=""))
def _stash_bad(ur):
self.uri_1share = ur.get_uri()
- self.delete_shares_numbered(ur.get_uri(), range(1,10))
+ self.delete_shares_numbered(ur.get_uri(), list(range(1,10)))
d.addCallback(_stash_bad)
# the download is abandoned as soon as it's clear that we won't get
in_pending_msg = "ran out of shares: complete= pending=Share(sh0-on-fob7vqgd) overdue= unused= need 3"
d.addCallback(lambda ign: self.do_cli("get", self.uri_1share))
- def _check1((rc, out, err)):
+ def _check1(xxx_todo_changeme145):
+ (rc, out, err) = xxx_todo_changeme145
self.failIfEqual(rc, 0)
self.failUnless("410 Gone" in err, err)
self.failUnlessIn("NotEnoughSharesError: ", err)
targetf = os.path.join(self.basedir, "output")
d.addCallback(lambda ign: self.do_cli("get", self.uri_1share, targetf))
- def _check2((rc, out, err)):
+ def _check2(xxx_todo_changeme146):
+ (rc, out, err) = xxx_todo_changeme146
self.failIfEqual(rc, 0)
self.failUnless("410 Gone" in err, err)
self.failUnlessIn("NotEnoughSharesError: ", err)
"endheaders", _socket_error)
d = self.do_cli("mkdir")
- def _check_invalid((rc,stdout,stderr)):
+ def _check_invalid(xxx_todo_changeme147):
+ (rc,stdout,stderr) = xxx_todo_changeme147
self.failIfEqual(rc, 0)
self.failUnlessIn("Error trying to connect to http://127.0.0.1", stderr)
d.addCallback(_check_invalid)
self.basedir = "cli/Get/get_without_alias"
self.set_up_grid()
d = self.do_cli('get', 'file')
- def _check((rc, out, err)):
+ def _check(xxx_todo_changeme148):
+ (rc, out, err) = xxx_todo_changeme148
self.failUnlessReallyEqual(rc, 1)
self.failUnlessIn("error:", err)
self.failUnlessReallyEqual(out, "")
self.basedir = "cli/Get/get_with_nonexistent_alias"
self.set_up_grid()
d = self.do_cli("get", "nonexistent:file")
- def _check((rc, out, err)):
+ def _check(xxx_todo_changeme149):
+ (rc, out, err) = xxx_todo_changeme149
self.failUnlessReallyEqual(rc, 1)
self.failUnlessIn("error:", err)
self.failUnlessIn("nonexistent", err)
self.basedir = "cli/Manifest/manifest_without_alias"
self.set_up_grid()
d = self.do_cli("manifest")
- def _check((rc, out, err)):
+ def _check(xxx_todo_changeme150):
+ (rc, out, err) = xxx_todo_changeme150
self.failUnlessReallyEqual(rc, 1)
self.failUnlessIn("error:", err)
self.failUnlessReallyEqual(out, "")
self.basedir = "cli/Manifest/manifest_with_nonexistent_alias"
self.set_up_grid()
d = self.do_cli("manifest", "nonexistent:")
- def _check((rc, out, err)):
+ def _check(xxx_todo_changeme151):
+ (rc, out, err) = xxx_todo_changeme151
self.failUnlessReallyEqual(rc, 1)
self.failUnlessIn("error:", err)
self.failUnlessIn("nonexistent", err)
d = self.do_cli("create-alias", "tahoe")
d.addCallback(lambda res: self.do_cli("mkdir", "test"))
- def _check((rc, out, err)):
+ def _check(xxx_todo_changeme152):
+ (rc, out, err) = xxx_todo_changeme152
self.failUnlessReallyEqual(rc, 0)
self.failUnlessReallyEqual(err, "")
self.failUnlessIn("URI:", out)
self.basedir = os.path.dirname(self.mktemp())
self.set_up_grid()
d = self.do_cli("create-alias", "tahoe")
- def _check((rc, out, err), st):
+ def _check(xxx_todo_changeme153, st):
+ (rc, out, err) = xxx_todo_changeme153
self.failUnlessReallyEqual(rc, 0)
self.failUnlessReallyEqual(err, "")
self.failUnlessIn(st, out)
self.basedir = os.path.dirname(self.mktemp())
self.set_up_grid()
d = self.do_cli("mkdir", "--format=SDMF")
- def _check((rc, out, err), st):
+ def _check(xxx_todo_changeme154, st):
+ (rc, out, err) = xxx_todo_changeme154
self.failUnlessReallyEqual(rc, 0)
self.failUnlessReallyEqual(err, "")
self.failUnlessIn(st, out)
d = self.do_cli("create-alias", "tahoe")
d.addCallback(lambda res: self.do_cli("mkdir", motorhead_arg))
- def _check((rc, out, err)):
+ def _check(xxx_todo_changeme155):
+ (rc, out, err) = xxx_todo_changeme155
self.failUnlessReallyEqual(rc, 0)
self.failUnlessReallyEqual(err, "")
self.failUnlessIn("URI:", out)
self.basedir = "cli/Mkdir/mkdir_with_nonexistent_alias"
self.set_up_grid()
d = self.do_cli("mkdir", "havasu:")
- def _check((rc, out, err)):
+ def _check(xxx_todo_changeme156):
+ (rc, out, err) = xxx_todo_changeme156
self.failUnlessReallyEqual(rc, 1)
self.failUnlessIn("error:", err)
self.failUnlessReallyEqual(out, "")
self.basedir = "cli/Unlink/%s_without_alias" % (self.command,)
self.set_up_grid()
d = self.do_cli(self.command, "afile")
- def _check((rc, out, err)):
+ def _check(xxx_todo_changeme157):
+ (rc, out, err) = xxx_todo_changeme157
self.failUnlessReallyEqual(rc, 1)
self.failUnlessIn("error:", err)
self.failUnlessReallyEqual(out, "")
self.basedir = "cli/Unlink/%s_with_nonexistent_alias" % (self.command,)
self.set_up_grid()
d = self.do_cli(self.command, "nonexistent:afile")
- def _check((rc, out, err)):
+ def _check(xxx_todo_changeme158):
+ (rc, out, err) = xxx_todo_changeme158
self.failUnlessReallyEqual(rc, 1)
self.failUnlessIn("error:", err)
self.failUnlessIn("nonexistent", err)
self._create_test_file()
d = self.do_cli("create-alias", "tahoe")
d.addCallback(lambda ign: self.do_cli("put", self.datafile, "tahoe:test"))
- def _do_unlink((rc, out, err)):
+ def _do_unlink(xxx_todo_changeme159):
+ (rc, out, err) = xxx_todo_changeme159
self.failUnlessReallyEqual(rc, 0)
self.failUnless(out.startswith("URI:"), out)
return self.do_cli(self.command, out.strip('\n'))
d.addCallback(_do_unlink)
- def _check((rc, out, err)):
+ def _check(xxx_todo_changeme160):
+ (rc, out, err) = xxx_todo_changeme160
self.failUnlessReallyEqual(rc, 1)
self.failUnlessIn("'tahoe %s'" % (self.command,), err)
self.failUnlessIn("path must be given", err)
# make sure we can get stats on an empty directory too
d.addCallback(lambda ign: self.do_cli("stats", self.rooturi))
- def _check_stats((rc, out, err)):
+ def _check_stats(xxx_todo_changeme161):
+ (rc, out, err) = xxx_todo_changeme161
self.failUnlessReallyEqual(err, "")
self.failUnlessReallyEqual(rc, 0)
lines = out.splitlines()
self.basedir = "cli/Stats/stats_without_alias"
self.set_up_grid()
d = self.do_cli("stats")
- def _check((rc, out, err)):
+ def _check(xxx_todo_changeme162):
+ (rc, out, err) = xxx_todo_changeme162
self.failUnlessReallyEqual(rc, 1)
self.failUnlessIn("error:", err)
self.failUnlessReallyEqual(out, "")
self.basedir = "cli/Stats/stats_with_nonexistent_alias"
self.set_up_grid()
d = self.do_cli("stats", "havasu:")
- def _check((rc, out, err)):
+ def _check(xxx_todo_changeme163):
+ (rc, out, err) = xxx_todo_changeme163
self.failUnlessReallyEqual(rc, 1)
self.failUnlessIn("error:", err)
self.failUnlessReallyEqual(out, "")
self.basedir = "cli/Webopen/webopen_with_nonexistent_alias"
self.set_up_grid()
d = self.do_cli("webopen", "fake:")
- def _check((rc, out, err)):
+ def _check(xxx_todo_changeme164):
+ (rc, out, err) = xxx_todo_changeme164
self.failUnlessReallyEqual(rc, 1)
self.failUnlessIn("error:", err)
self.failUnlessReallyEqual(out, "")
self.basedir = "cli/Webopen/webopen"
self.set_up_grid()
d = self.do_cli("create-alias", "alias:")
- def _check_alias((rc, out, err)):
+ def _check_alias(xxx_todo_changeme39):
+ (rc, out, err) = xxx_todo_changeme39
self.failUnlessReallyEqual(rc, 0, repr((rc, out, err)))
self.failUnlessIn("Alias 'alias' created", out)
self.failUnlessReallyEqual(err, "")
self.alias_uri = get_aliases(self.get_clientdir())["alias"]
d.addCallback(_check_alias)
d.addCallback(lambda res: self.do_cli("webopen", "alias:"))
- def _check_webopen((rc, out, err)):
+ def _check_webopen(xxx_todo_changeme40):
+ (rc, out, err) = xxx_todo_changeme40
self.failUnlessReallyEqual(rc, 0, repr((rc, out, err)))
self.failUnlessReallyEqual(out, "")
self.failUnlessReallyEqual(err, "")
assert params == (size, required_shares, max_shares)
log.msg("params: %s" % (params,))
d = enc.encode(data0s)
- def _done_encoding_all((shares, shareids)):
+ def _done_encoding_all(xxx_todo_changeme1):
+ (shares, shareids) = xxx_todo_changeme1
self.failUnlessEqual(len(shares), max_shares)
self.shares = shares
self.shareids = shareids
d.addCallback(_done_encoding_all)
if fewer_shares is not None:
# also validate that the desired_shareids= parameter works
- desired_shareids = random.sample(range(max_shares), fewer_shares)
+ desired_shareids = random.sample(list(range(max_shares)), fewer_shares)
d.addCallback(lambda res: enc.encode(data0s, desired_shareids))
- def _check_fewer_shares((some_shares, their_shareids)):
+ def _check_fewer_shares(xxx_todo_changeme):
+ (some_shares, their_shareids) = xxx_todo_changeme
self.failUnlessEqual(tuple(their_shareids), tuple(desired_shareids))
d.addCallback(_check_fewer_shares)
- def _decode((shares, shareids)):
+ def _decode(xxx_todo_changeme2):
+ (shares, shareids) = xxx_todo_changeme2
dec = CRSDecoder()
dec.set_params(*params)
d1 = dec.decode(shares, shareids)
+from __future__ import print_function
import time
import os.path
left = p["remaining-sleep-time"]
self.failUnless(isinstance(left, float), left)
self.failUnless(left > 0.0, left)
- except Exception, e:
+ except Exception as e:
did_check_progress[0] = e
else:
did_check_progress[0] = True
# our buildslaves vary too much in their speeds and load levels,
# and many of them only manage to hit 7% usage when our target is
# 50%. So don't assert anything about the results, just log them.
- print
- print "crawler: got %d%% percent when trying for 50%%" % percent
- print "crawler: got %d full cycles" % c.cycles
+ print()
+ print("crawler: got %d%% percent when trying for 50%%" % percent)
+ print("crawler: got %d full cycles" % c.cycles)
d.addCallback(_done)
return d
d.addCallback(self.decode_json)
return d
- def decode_json(self, (s,url)):
+ def decode_json(self, xxx_todo_changeme8):
+ (s,url) = xxx_todo_changeme8
try:
data = simplejson.loads(s)
except ValueError:
continue
try:
yield simplejson.loads(unit)
- except ValueError, le:
+ except ValueError as le:
le.args = tuple(le.args + (unit,))
raise
def do_web_stream_manifest(self, ignored):
d = self.web(self.root, method="POST", t="stream-manifest")
- d.addCallback(lambda (output,url):
- self._check_streamed_manifest(output))
+ d.addCallback(lambda output_url:
+ self._check_streamed_manifest(output_url[0]))
return d
def _check_streamed_manifest(self, output):
d = self._run_cli(["--node-directory", basedir,
"manifest",
self.root_uri])
- def _check((out,err)):
+ def _check(xxx_todo_changeme):
+ (out,err) = xxx_todo_changeme
self.failUnlessEqual(err, "")
lines = [l for l in out.split("\n") if l]
self.failUnlessEqual(len(lines), 8)
"manifest",
"--raw",
self.root_uri])
- def _check((out,err)):
+ def _check(xxx_todo_changeme1):
+ (out,err) = xxx_todo_changeme1
self.failUnlessEqual(err, "")
# this should be the same as the POST t=stream-manifest output
self._check_streamed_manifest(out)
"manifest",
"--storage-index",
self.root_uri])
- def _check((out,err)):
+ def _check(xxx_todo_changeme2):
+ (out,err) = xxx_todo_changeme2
self.failUnlessEqual(err, "")
self._check_manifest_storage_index(out)
d.addCallback(_check)
"manifest",
"--verify-cap",
self.root_uri])
- def _check((out,err)):
+ def _check(xxx_todo_changeme3):
+ (out,err) = xxx_todo_changeme3
self.failUnlessEqual(err, "")
lines = [l for l in out.split("\n") if l]
self.failUnlessEqual(len(lines), 3)
"manifest",
"--repair-cap",
self.root_uri])
- def _check((out,err)):
+ def _check(xxx_todo_changeme4):
+ (out,err) = xxx_todo_changeme4
self.failUnlessEqual(err, "")
lines = [l for l in out.split("\n") if l]
self.failUnlessEqual(len(lines), 3)
d = self._run_cli(["--node-directory", basedir,
"stats",
self.root_uri])
- def _check3((out,err)):
+ def _check3(xxx_todo_changeme5):
+ (out,err) = xxx_todo_changeme5
lines = [l.strip() for l in out.split("\n") if l]
self.failUnless("count-immutable-files: 1" in lines)
self.failUnless("count-mutable-files: 1" in lines)
"stats",
"--raw",
self.root_uri])
- def _check4((out,err)):
+ def _check4(xxx_todo_changeme6):
+ (out,err) = xxx_todo_changeme6
data = simplejson.loads(out)
self.failUnlessEqual(data["count-immutable-files"], 1)
self.failUnlessEqual(data["count-immutable-files"], 1)
self._run_cli(["debug", "corrupt-share", sharefile])
def _delete_most_shares(self, node):
- self.delete_shares_numbered(node.get_uri(), range(1,10))
+ self.delete_shares_numbered(node.get_uri(), list(range(1,10)))
def check_is_healthy(self, cr, where):
self.failUnlessEqual(cr.get_version_counter_recoverable(), 1, where)
self.failUnlessEqual(cr.get_version_counter_unrecoverable(), 0, where)
return cr
- except Exception, le:
+ except Exception as le:
le.args = tuple(le.args + (where,))
raise
def _start_deepcheck(ignored):
return self.web(self.root, method="POST", t="stream-deep-check")
d.addCallback(_start_deepcheck)
- def _check( (output, url) ):
+ def _check(xxx_todo_changeme7 ):
+ (output, url) = xxx_todo_changeme7
units = list(self.parse_streamed_json(output))
self.failUnlessEqual(len(units), 2+COUNT+1)
d.addCallback(_check)
from allmydata.nodemaker import NodeMaker
from base64 import b32decode
import allmydata.test.common_util as testutil
+import six
class MemAccum:
implements(IConsumer)
"largest-directory-children": 2,
"largest-immutable-file": 0,
}
- for k,v in expected.iteritems():
+ for k,v in six.iteritems(expected):
self.failUnlessReallyEqual(stats[k], v,
"stats[%s] was %s, not %s" %
(k, stats[k], v))
{ 'tahoe': {'linkcrtime': "bogus"}}))
d.addCallback(lambda res: n.get_metadata_for(u"c2"))
def _has_good_linkcrtime(metadata):
- self.failUnless(metadata.has_key('tahoe'))
- self.failUnless(metadata['tahoe'].has_key('linkcrtime'))
+ self.failUnless('tahoe' in metadata)
+ self.failUnless('linkcrtime' in metadata['tahoe'])
self.failIfEqual(metadata['tahoe']['linkcrtime'], 'bogus')
d.addCallback(_has_good_linkcrtime)
def _check_children(self, children):
# Are all the expected child nodes there?
- self.failUnless(children.has_key(u'file1'))
- self.failUnless(children.has_key(u'file2'))
- self.failUnless(children.has_key(u'file3'))
+ self.failUnless(u'file1' in children)
+ self.failUnless(u'file2' in children)
+ self.failUnless(u'file3' in children)
# Are the metadata for child 3 right?
file3_rocap = "URI:CHK:cmtcxq7hwxvfxan34yiev6ivhy:qvcekmjtoetdcw4kmi7b3rtblvgx7544crnwaqtiewemdliqsokq:3:10:5"
(101, 316, 216),
(317, 1000, 684),
(1001, 3162, 99),
- (3162277660169L, 10000000000000L, 1),
+ (3162277660169, 10000000000000, 1),
])
class UCWEingMutableFileNode(MutableFileNode):
+from __future__ import print_function
# system-level upload+download roundtrip test, but using shares created from
# a previous run. This asserts that the current code is capable of decoding
n = self.c0.create_node_from_uri(immutable_uri)
c = MemoryConsumer()
- d = n.read(c, 0L, 10L)
+ d = n.read(c, 0, 10)
d.addCallback(lambda c: len("".join(c.chunks)))
d.addCallback(lambda size: self.failUnlessEqual(size, 10))
return d
n._cnode._node._build_guessed_tables(u.max_segment_size)
con1 = MemoryConsumer()
con2 = MemoryConsumer()
- d = n.read(con1, 0L, 20)
- d2 = n.read(con2, 140L, 20)
+ d = n.read(con1, 0, 20)
+ d2 = n.read(con2, 140, 20)
# con2 will be cancelled, so d2 should fail with DownloadStopped
def _con2_should_not_succeed(res):
self.fail("the second read should not have succeeded")
n._cnode._node._build_guessed_tables(u.max_segment_size)
con1 = MemoryConsumer()
con2 = MemoryConsumer()
- d = n.read(con1, 0L, 20)
- d2 = n.read(con2, 140L, 20)
+ d = n.read(con1, 0, 20)
+ d2 = n.read(con2, 140, 20)
# con2 should wait for con1 to fail and then con2 should succeed.
# In particular, we should not lose progress. If this test fails,
# it will fail with a timeout error.
n = self.c0.create_node_from_uri(immutable_uri)
cn = n._cnode
(d,c) = cn.get_segment(0)
- def _got_segment((offset,data,decodetime)):
+ def _got_segment(xxx_todo_changeme):
+ (offset,data,decodetime) = xxx_todo_changeme
self.failUnlessEqual(offset, 0)
self.failUnlessEqual(len(data), len(plaintext))
d.addCallback(_got_segment)
return d
d.addCallback(_uploaded)
def _show_results(ign):
- print
- print ("of [0:%d], corruption ignored in %s" %
- (len(self.sh0_orig), undetected.dump()))
+ print()
+ print(("of [0:%d], corruption ignored in %s" %
+ (len(self.sh0_orig), undetected.dump())))
if self.catalog_detection:
d.addCallback(_show_results)
# of [0:2070], corruption ignored in len=1133:
from allmydata.util.consumer import download_to_data
from allmydata.interfaces import IStorageBucketWriter, IStorageBucketReader
from allmydata.test.no_network import GridTestMixin
+import six
class LostPeerError(Exception):
pass
if _TMP % K != 0:
_TMP += (K - (_TMP % K))
TAIL_SEGSIZE = _TMP
- _TMP = SIZE / SEGSIZE
+ _TMP = SIZE // SEGSIZE
if SIZE % SEGSIZE != 0:
_TMP += 1
NUM_SEGMENTS = _TMP
def test_reject_insufficient(self):
dl = []
- for k in self.mindict.iterkeys():
+ for k in six.iterkeys(self.mindict):
insuffdict = self.mindict.copy()
del insuffdict[k]
d = self._test_reject(insuffdict)
def test_accept_optional(self):
dl = []
- for k in self.optional_consistent.iterkeys():
+ for k in six.iterkeys(self.optional_consistent):
mydict = self.mindict.copy()
mydict[k] = self.optional_consistent[k]
d = self._test_accept(mydict)
def test_reject_optional(self):
dl = []
- for k in self.optional_inconsistent.iterkeys():
+ for k in six.iterkeys(self.optional_inconsistent):
for v in self.optional_inconsistent[k]:
mydict = self.mindict.copy()
mydict[k] = v
+from __future__ import print_function
lumiere_nfc = u"lumi\u00E8re"
Artonwall_nfc = u"\u00C4rtonwall.mp3"
import platform
if len(sys.argv) != 2:
- print "Usage: %s lumi<e-grave>re" % sys.argv[0]
+ print("Usage: %s lumi<e-grave>re" % sys.argv[0])
sys.exit(1)
if sys.platform == "win32":
try:
from allmydata.windows.fixups import initialize
except ImportError:
- print "set PYTHONPATH to the src directory"
+ print("set PYTHONPATH to the src directory")
sys.exit(1)
initialize()
- print
- print "class MyWeirdOS(EncodingUtil, unittest.TestCase):"
- print " uname = '%s'" % ' '.join(platform.uname())
- print " argv = %s" % repr(sys.argv[1])
- print " platform = '%s'" % sys.platform
- print " filesystem_encoding = '%s'" % sys.getfilesystemencoding()
- print " io_encoding = '%s'" % sys.stdout.encoding
+ print()
+ print("class MyWeirdOS(EncodingUtil, unittest.TestCase):")
+ print(" uname = '%s'" % ' '.join(platform.uname()))
+ print(" argv = %s" % repr(sys.argv[1]))
+ print(" platform = '%s'" % sys.platform)
+ print(" filesystem_encoding = '%s'" % sys.getfilesystemencoding())
+ print(" io_encoding = '%s'" % sys.stdout.encoding)
try:
tmpdir = tempfile.mkdtemp()
for fname in TEST_FILENAMES:
else:
dirlist = os.listdir(tmpdir)
- print " dirlist = %s" % repr(dirlist)
+ print(" dirlist = %s" % repr(dirlist))
except:
- print " # Oops, I cannot write filenames containing non-ascii characters"
- print
+ print(" # Oops, I cannot write filenames containing non-ascii characters")
+ print()
shutil.rmtree(tmpdir)
sys.exit(0)
try:
os.mkdir(lumiere_nfc)
- except EnvironmentError, e:
+ except EnvironmentError as e:
raise unittest.SkipTest("%r\nIt is possible that the filesystem on which this test is being run "
"does not support Unicode, even though the platform does." % (e,))
expected_root = [
('loop',
- [0, True, 0600, 1, self.FALL_OF_BERLIN_WALL, 'alice', 'alice', '??']),
+ [0, True, 0o600, 1, self.FALL_OF_BERLIN_WALL, 'alice', 'alice', '??']),
('immutable',
- [23, False, 0600, 1, self.TURN_OF_MILLENIUM, 'alice', 'alice', '??']),
+ [23, False, 0o600, 1, self.TURN_OF_MILLENIUM, 'alice', 'alice', '??']),
('mutable',
# timestamp should be 0 if no timestamp metadata is present
- [0, False, 0600, 1, 0, 'alice', 'alice', '??'])]
+ [0, False, 0o600, 1, 0, 'alice', 'alice', '??'])]
d.addCallback(lambda root: self._compareDirLists(root, expected_root))
# this should succeed
try:
iht.set_hashes(chain, leaves={0: tagged_hash("tag", "0")})
- except hashtree.BadHashError, e:
+ except hashtree.BadHashError as e:
self.fail("bad hash: %s" % e)
self.failUnlessEqual(ht.get_leaf(0), tagged_hash("tag", "0"))
# this should succeed
try:
iht.set_hashes(chain, leaves={4: tagged_hash("tag", "4")})
- except hashtree.BadHashError, e:
+ except hashtree.BadHashError as e:
self.fail("bad hash: %s" % e)
done = []
d = self._set_up(False, "test_5_overdue_immutable")
def _reduce_max_outstanding_requests_and_download(ign):
- self._hang_shares(range(5))
+ self._hang_shares(list(range(5)))
n = self.c0.create_node_from_uri(self.uri)
n._cnode._maybe_create_download_node()
self._sf = n._cnode._node._sharefinder
return d
def _shuffled(self, num_shnums):
- shnums = range(10)
+ shnums = list(range(10))
random.shuffle(shnums)
return shnums[:num_shnums]
# test the Tahoe code...
def _then_delete_8(ign):
self.restore_all_shares(self.shares)
- self.delete_shares_numbered(self.uri, range(8))
+ self.delete_shares_numbered(self.uri, list(range(8)))
d.addCallback(_then_delete_8)
d.addCallback(lambda ign:
self.shouldFail(NotEnoughSharesError, "download-2",
removed."""
d = self.startup("immutable/Test/download_from_only_3_remaining_shares")
d.addCallback(lambda ign:
- self.delete_shares_numbered(self.uri, range(7)))
+ self.delete_shares_numbered(self.uri, list(range(7))))
d.addCallback(self._download_and_check_plaintext)
def _after_download(num_reads):
#print num_reads
enough shares out there."""
d = self.startup("download_abort_if_too_many_missing_shares")
d.addCallback(lambda ign:
- self.delete_shares_numbered(self.uri, range(8)))
+ self.delete_shares_numbered(self.uri, list(range(8))))
d.addCallback(lambda ign:
self.shouldFail(NotEnoughSharesError, "delete 8",
"Last failure: None",
from allmydata.client import Client as TahoeClient
from allmydata.util import pollmixin, keyutil, idlib, fileutil
import allmydata.test.common_util as testutil
+import six
class LoggingMultiService(service.MultiService):
def log(self, msg, **kw):
seqnum_counter = itertools.count(1)
def realseq():
- return seqnum_counter.next(), str(os.randint(1,100000))
+ return six.advance_iterator(seqnum_counter), str(os.randint(1,100000))
def make_ann(furl):
ann = { "anonymous-storage-FURL": furl,
+from __future__ import print_function
import os, re, base64
from cStringIO import StringIO
from twisted.trial import unittest
from allmydata.test.test_download import PausingConsumer, \
PausingAndStoppingConsumer, StoppingConsumer, \
ImmediatelyStoppingConsumer
+import six
def eventuaaaaaly(res=None):
d = fireEventually(res)
version=MDMF_VERSION)
def _check_server_write_counts(ignored):
sb = self.nodemaker.storage_broker
- for server in sb.servers.itervalues():
+ for server in six.itervalues(sb.servers):
self.failUnlessEqual(server.get_rref().queries, 1)
d.addCallback(_check_server_write_counts)
return d
# 10 shares
self.failUnlessEqual(len(sm.update_data), 10)
# one version
- for data in sm.update_data.itervalues():
+ for data in six.itervalues(sm.update_data):
self.failUnlessEqual(len(data), 1)
d.addCallback(_check_servermap)
return d
return output
def dump_servermap(self, servermap):
- print "SERVERMAP", servermap
- print "RECOVERABLE", [self.abbrev_verinfo(v)
- for v in servermap.recoverable_versions()]
- print "BEST", self.abbrev_verinfo(servermap.best_recoverable_version())
- print "available", self.abbrev_verinfo_dict(servermap.shares_available())
+ print("SERVERMAP", servermap)
+ print("RECOVERABLE", [self.abbrev_verinfo(v)
+ for v in servermap.recoverable_versions()])
+ print("BEST", self.abbrev_verinfo(servermap.best_recoverable_version()))
+ print("available", self.abbrev_verinfo_dict(servermap.shares_available()))
def do_download(self, servermap, version=None):
if version is None:
N = self._fn.get_total_shares()
d = defer.succeed(None)
d.addCallback(corrupt, self._storage, "pubkey",
- shnums_to_corrupt=range(0, N-k))
+ shnums_to_corrupt=list(range(0, N-k)))
d.addCallback(lambda res: self.make_servermap())
def _do_retrieve(servermap):
self.failUnless(servermap.get_problems())
else:
d = defer.succeed(None)
d.addCallback(lambda ignored:
- corrupt(None, self._storage, offset, range(5)))
+ corrupt(None, self._storage, offset, list(range(5))))
d.addCallback(lambda ignored:
self.make_servermap())
def _do_retrieve(servermap):
# On 8 of the shares, corrupt the beginning of the share data.
# The signature check during the servermap update won't catch this.
d.addCallback(lambda ignored:
- corrupt(None, self._storage, "share_data", range(8)))
+ corrupt(None, self._storage, "share_data", list(range(8))))
# On 2 of the shares, corrupt the end of the share data.
# The signature check during the servermap update won't catch
# this either, and the retrieval process will have to process
# the choice of server for share[0].
d = nm.key_generator.generate(TEST_RSA_KEY_SIZE)
- def _got_key( (pubkey, privkey) ):
+ def _got_key(xxx_todo_changeme ):
+ (pubkey, privkey) = xxx_todo_changeme
nm.key_generator = SameKeyGenerator(pubkey, privkey)
pubkey_s = pubkey.serialize()
privkey_s = privkey.serialize()
d.addCallback(lambda res: n.download_best_version())
d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
def _explain_error(f):
- print f
+ print(f)
if f.check(NotEnoughServersError):
- print "first_error:", f.value.first_error
+ print("first_error:", f.value.first_error)
return f
d.addErrback(_explain_error)
return d
def test_filehandle_read(self):
self.basedir = "mutable/FileHandle/test_filehandle_read"
chunk_size = 10
- for i in xrange(0, len(self.test_data), chunk_size):
+ for i in range(0, len(self.test_data), chunk_size):
data = self.uploadable.read(chunk_size)
data = "".join(data)
start = i
def test_datahandle_read(self):
chunk_size = 10
- for i in xrange(0, len(self.test_data), chunk_size):
+ for i in range(0, len(self.test_data), chunk_size):
data = self.uploadable.read(chunk_size)
data = "".join(data)
start = i
def _read_data(version):
c = consumer.MemoryConsumer()
d2 = defer.succeed(None)
- for i in xrange(0, len(self.data), 10000):
+ for i in range(0, len(self.data), 10000):
d2.addCallback(lambda ignored, i=i: version.read(c, i, 10000))
d2.addCallback(lambda ignored:
self.failUnlessEqual(self.data, "".join(c.chunks)))
d.addCallback(lambda ignored: "".join(c.chunks))
def _check(results):
if results != expected:
- print
- print "got: %s ... %s" % (results[:20], results[-20:])
- print "exp: %s ... %s" % (expected[:20], expected[-20:])
+ print()
+ print("got: %s ... %s" % (results[:20], results[-20:]))
+ print("exp: %s ... %s" % (expected[:20], expected[-20:]))
self.fail("results[%s] != expected" % name)
return version # daisy-chained to next call
d.addCallback(_check)
node.download_best_version())
def _check(results):
if results != expected:
- print
- print "got: %s ... %s" % (results[:20], results[-20:])
- print "exp: %s ... %s" % (expected[:20], expected[-20:])
+ print()
+ print("got: %s ... %s" % (results[:20], results[-20:]))
+ print("exp: %s ... %s" % (expected[:20], expected[-20:]))
self.fail("results != expected")
d.addCallback(_check)
return d
SEGSIZE = 128*1024
if got != expected:
- print "differences:"
+ print("differences:")
for segnum in range(len(expected)//SEGSIZE):
start = segnum * SEGSIZE
end = (segnum+1) * SEGSIZE
got_ends = "%s .. %s" % (got[start:start+20], got[end-20:end])
exp_ends = "%s .. %s" % (expected[start:start+20], expected[end-20:end])
if got_ends != exp_ends:
- print "expected[%d]: %s" % (start, exp_ends)
- print "got [%d]: %s" % (start, got_ends)
+ print("expected[%d]: %s" % (start, exp_ends))
+ print("got [%d]: %s" % (start, got_ends))
if expspans != gotspans:
- print "expected: %s" % expspans
- print "got : %s" % gotspans
+ print("expected: %s" % expspans)
+ print("got : %s" % gotspans)
open("EXPECTED","wb").write(expected)
open("GOT","wb").write(got)
- print "wrote data to EXPECTED and GOT"
+ print("wrote data to EXPECTED and GOT")
self.fail("didn't get expected data")
def test_replace_locations(self):
# exercise fencepost conditions
SEGSIZE = 128*1024
- suspects = range(SEGSIZE-3, SEGSIZE+1)+range(2*SEGSIZE-3, 2*SEGSIZE+1)
+ suspects = list(range(SEGSIZE-3, SEGSIZE+1))+list(range(2*SEGSIZE-3, 2*SEGSIZE+1))
letters = iter("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
d0 = self.do_upload_mdmf()
def _run(ign):
expected = self.data
d = defer.succeed(None)
for offset in suspects:
- new_data = letters.next()*2 # "AA", then "BB", etc
+ new_data = six.advance_iterator(letters)*2 # "AA", then "BB", etc
expected = expected[:offset]+new_data+expected[offset+2:]
d.addCallback(lambda ign:
self.mdmf_node.get_best_mutable_version())
def test_replace_locations_max_shares(self):
# exercise fencepost conditions
SEGSIZE = 128*1024
- suspects = range(SEGSIZE-3, SEGSIZE+1)+range(2*SEGSIZE-3, 2*SEGSIZE+1)
+ suspects = list(range(SEGSIZE-3, SEGSIZE+1))+list(range(2*SEGSIZE-3, 2*SEGSIZE+1))
letters = iter("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
d0 = self.do_upload_mdmf()
def _run(ign):
expected = self.data
d = defer.succeed(None)
for offset in suspects:
- new_data = letters.next()*2 # "AA", then "BB", etc
+ new_data = six.advance_iterator(letters)*2 # "AA", then "BB", etc
expected = expected[:offset]+new_data+expected[offset+2:]
d.addCallback(lambda ign:
self.mdmf_max_shares_node.get_best_mutable_version())
privdir = os.path.join(basedir, "private")
st = os.stat(privdir)
bits = stat.S_IMODE(st[stat.ST_MODE])
- self.failUnless(bits & 0001 == 0, bits)
+ self.failUnless(bits & 0o001 == 0, bits)
@patch("foolscap.logging.log.setLogDir")
def test_logdir_is_str(self, mock_setLogDir):
+from __future__ import print_function
# -*- coding: utf-8 -*-
from allmydata.test import common
from allmydata.monitor import Monitor
self.failIfBigger(delta_reads, MAX_DELTA_READS)
try:
judgement(vr)
- except unittest.FailTest, e:
+ except unittest.FailTest as e:
# FailTest just uses e.args[0] == str
new_arg = str(e.args[0]) + "\nvr.data is: " + str(vr.as_dict())
e.args = (new_arg,)
def _show_results(ign):
f = open("test_each_byte_output", "w")
for i in sorted(results.keys()):
- print >>f, "%d: %s" % (i, results[i])
+ print("%d: %s" % (i, results[i]), file=f)
f.close()
- print "Please look in _trial_temp/test_each_byte_output for results"
+ print("Please look in _trial_temp/test_each_byte_output for results")
d.addCallback(_show_results)
return d
# previously-deleted share #2.
d.addCallback(lambda ignored:
- self.delete_shares_numbered(self.uri, range(3, 10+1)))
+ self.delete_shares_numbered(self.uri, list(range(3, 10+1))))
d.addCallback(lambda ignored: download_to_data(self.c1_filenode))
d.addCallback(lambda newdata:
self.failUnlessEqual(newdata, common.TEST_DATA))
self.set_up_grid(num_clients=2)
d = self.upload_and_stash()
d.addCallback(lambda ignored:
- self.delete_shares_numbered(self.uri, range(7)))
+ self.delete_shares_numbered(self.uri, list(range(7))))
d.addCallback(lambda ignored: self._stash_counts())
d.addCallback(lambda ignored:
self.c0_filenode.check_and_repair(Monitor(),
# previously-deleted share #2.
d.addCallback(lambda ignored:
- self.delete_shares_numbered(self.uri, range(3, 10+1)))
+ self.delete_shares_numbered(self.uri, list(range(3, 10+1))))
d.addCallback(lambda ignored: download_to_data(self.c1_filenode))
d.addCallback(lambda newdata:
self.failUnlessEqual(newdata, common.TEST_DATA))
# distributing the shares widely enough to satisfy the default
# happiness setting.
def _delete_some_servers(ignored):
- for i in xrange(7):
+ for i in range(7):
self.g.remove_server(self.g.servers_by_number[i].my_nodeid)
assert len(self.g.servers_by_number) == 3
# downloading and has the right contents. This can't work
# unless it has already repaired the previously-corrupted share.
def _then_delete_7_and_try_a_download(unused=None):
- shnums = range(10)
+ shnums = list(range(10))
shnums.remove(shnum)
random.shuffle(shnums)
for sharenum in shnums[:7]:
if not same:
try:
same = os.path.samefile(root_from_cwd, root_to_check)
- except AttributeError, e:
+ except AttributeError as e:
e # hush pyflakes
if not same:
+from __future__ import print_function
import re, struct, traceback, time, calendar
from stat import S_IFREG, S_IFDIR
"%s was supposed to raise SFTPError(%r), not SFTPError(%r): %s" %
(which, expected_code, res.value.code, res))
else:
- print '@' + '@'.join(s)
+ print('@' + '@'.join(s))
self.fail("%s was supposed to raise SFTPError(%r), not get %r" %
(which, expected_code, res))
d.addBoth(_done)
gross = u"gro\u00DF".encode("utf-8")
expected_root = [
- ('empty_lit_dir', r'dr-xr-xr-x .* 0 .* empty_lit_dir$', {'permissions': S_IFDIR | 0555}),
- (gross, r'-rw-rw-rw- .* 1010 .* '+gross+'$', {'permissions': S_IFREG | 0666, 'size': 1010}),
+ ('empty_lit_dir', r'dr-xr-xr-x .* 0 .* empty_lit_dir$', {'permissions': S_IFDIR | 0o555}),
+ (gross, r'-rw-rw-rw- .* 1010 .* '+gross+'$', {'permissions': S_IFREG | 0o666, 'size': 1010}),
# The fall of the Berlin wall may have been on 9th or 10th November 1989 depending on the gateway's timezone.
#('loop', r'drwxrwxrwx .* 0 Nov (09|10) 1989 loop$', {'permissions': S_IFDIR | 0777}),
- ('loop', r'drwxrwxrwx .* 0 .* loop$', {'permissions': S_IFDIR | 0777}),
- ('mutable', r'-rw-rw-rw- .* 0 .* mutable$', {'permissions': S_IFREG | 0666}),
- ('readonly', r'-r--r--r-- .* 0 .* readonly$', {'permissions': S_IFREG | 0444}),
- ('small', r'-rw-rw-rw- .* 10 .* small$', {'permissions': S_IFREG | 0666, 'size': 10}),
- ('small2', r'-rw-rw-rw- .* 26 .* small2$', {'permissions': S_IFREG | 0666, 'size': 26}),
- ('tiny_lit_dir', r'dr-xr-xr-x .* 0 .* tiny_lit_dir$', {'permissions': S_IFDIR | 0555}),
+ ('loop', r'drwxrwxrwx .* 0 .* loop$', {'permissions': S_IFDIR | 0o777}),
+ ('mutable', r'-rw-rw-rw- .* 0 .* mutable$', {'permissions': S_IFREG | 0o666}),
+ ('readonly', r'-r--r--r-- .* 0 .* readonly$', {'permissions': S_IFREG | 0o444}),
+ ('small', r'-rw-rw-rw- .* 10 .* small$', {'permissions': S_IFREG | 0o666, 'size': 10}),
+ ('small2', r'-rw-rw-rw- .* 26 .* small2$', {'permissions': S_IFREG | 0o666, 'size': 26}),
+ ('tiny_lit_dir', r'dr-xr-xr-x .* 0 .* tiny_lit_dir$', {'permissions': S_IFDIR | 0o555}),
('unknown', r'\?--------- .* 0 .* unknown$', {'permissions': 0}),
]
# The UTC epoch may either be in Jan 1 1970 or Dec 31 1969 depending on the gateway's timezone.
expected_tiny_lit = [
- ('short', r'-r--r--r-- .* 8 (Jan 01 1970|Dec 31 1969) short$', {'permissions': S_IFREG | 0444, 'size': 8}),
+ ('short', r'-r--r--r-- .* 8 (Jan 01 1970|Dec 31 1969) short$', {'permissions': S_IFREG | 0o444, 'size': 8}),
]
d.addCallback(lambda ign: self.handler.openDirectory("tiny_lit_dir"))
d.addCallback(lambda res: self._compareDirLists(res, expected_tiny_lit))
d.addCallback(lambda ign: self.handler.getAttrs("small", True))
- d.addCallback(lambda attrs: self._compareAttributes(attrs, {'permissions': S_IFREG | 0666, 'size': 10}))
+ d.addCallback(lambda attrs: self._compareAttributes(attrs, {'permissions': S_IFREG | 0o666, 'size': 10}))
d.addCallback(lambda ign: self.handler.setAttrs("small", {}))
d.addCallback(lambda res: self.failUnlessReallyEqual(res, None))
d.addCallback(lambda ign: self.handler.getAttrs("small", True))
- d.addCallback(lambda attrs: self._compareAttributes(attrs, {'permissions': S_IFREG | 0666, 'size': 10}))
+ d.addCallback(lambda attrs: self._compareAttributes(attrs, {'permissions': S_IFREG | 0o666, 'size': 10}))
d.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_OP_UNSUPPORTED, "setAttrs size",
rf.readChunk, 11, 1))
d2.addCallback(lambda ign: rf.getAttrs())
- d2.addCallback(lambda attrs: self._compareAttributes(attrs, {'permissions': S_IFREG | 0666, 'size': 10}))
+ d2.addCallback(lambda attrs: self._compareAttributes(attrs, {'permissions': S_IFREG | 0o666, 'size': 10}))
d2.addCallback(lambda ign: self.handler.getAttrs("small", followLinks=0))
- d2.addCallback(lambda attrs: self._compareAttributes(attrs, {'permissions': S_IFREG | 0666, 'size': 10}))
+ d2.addCallback(lambda attrs: self._compareAttributes(attrs, {'permissions': S_IFREG | 0o666, 'size': 10}))
d2.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "writeChunk on read-only handle denied",
rf.readChunk, 1011, 1))
d2.addCallback(lambda ign: rf.getAttrs())
- d2.addCallback(lambda attrs: self._compareAttributes(attrs, {'permissions': S_IFREG | 0666, 'size': 1010}))
+ d2.addCallback(lambda attrs: self._compareAttributes(attrs, {'permissions': S_IFREG | 0o666, 'size': 1010}))
d2.addCallback(lambda ign: self.handler.getAttrs(gross, followLinks=0))
- d2.addCallback(lambda attrs: self._compareAttributes(attrs, {'permissions': S_IFREG | 0666, 'size': 1010}))
+ d2.addCallback(lambda attrs: self._compareAttributes(attrs, {'permissions': S_IFREG | 0o666, 'size': 1010}))
d2.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "writeChunk on read-only handle denied",
d2.addCallback(lambda ign: wf.writeChunk(13, "abc"))
d2.addCallback(lambda ign: wf.getAttrs())
- d2.addCallback(lambda attrs: self._compareAttributes(attrs, {'permissions': S_IFREG | 0666, 'size': 16}))
+ d2.addCallback(lambda attrs: self._compareAttributes(attrs, {'permissions': S_IFREG | 0o666, 'size': 16}))
d2.addCallback(lambda ign: self.handler.getAttrs("newfile", followLinks=0))
- d2.addCallback(lambda attrs: self._compareAttributes(attrs, {'permissions': S_IFREG | 0666, 'size': 16}))
+ d2.addCallback(lambda attrs: self._compareAttributes(attrs, {'permissions': S_IFREG | 0o666, 'size': 16}))
d2.addCallback(lambda ign: wf.setAttrs({}))
def _write_mutable_setattr(wf):
d2 = wf.writeChunk(8, "read-only link from parent")
- d2.addCallback(lambda ign: self.handler.setAttrs("mutable", {'permissions': 0444}))
+ d2.addCallback(lambda ign: self.handler.setAttrs("mutable", {'permissions': 0o444}))
d2.addCallback(lambda ign: self.root.get(u"mutable"))
d2.addCallback(lambda node: self.failUnless(node.is_readonly()))
d2.addCallback(lambda ign: wf.getAttrs())
- d2.addCallback(lambda attrs: self.failUnlessReallyEqual(attrs['permissions'], S_IFREG | 0666))
+ d2.addCallback(lambda attrs: self.failUnlessReallyEqual(attrs['permissions'], S_IFREG | 0o666))
d2.addCallback(lambda ign: self.handler.getAttrs("mutable", followLinks=0))
- d2.addCallback(lambda attrs: self.failUnlessReallyEqual(attrs['permissions'], S_IFREG | 0444))
+ d2.addCallback(lambda attrs: self.failUnlessReallyEqual(attrs['permissions'], S_IFREG | 0o444))
d2.addCallback(lambda ign: wf.close())
return d2
def _write_mutable2_setattr(wf):
d2 = wf.writeChunk(7, "2")
- d2.addCallback(lambda ign: wf.setAttrs({'permissions': 0444, 'size': 8}))
+ d2.addCallback(lambda ign: wf.setAttrs({'permissions': 0o444, 'size': 8}))
# The link isn't made read-only until the file is closed.
d2.addCallback(lambda ign: self.root.get(u"mutable2"))
d2.addCallback(lambda node: self.failIf(node.is_readonly()))
d2.addCallback(lambda ign: wf.getAttrs())
- d2.addCallback(lambda attrs: self.failUnlessReallyEqual(attrs['permissions'], S_IFREG | 0444))
+ d2.addCallback(lambda attrs: self.failUnlessReallyEqual(attrs['permissions'], S_IFREG | 0o444))
d2.addCallback(lambda ign: self.handler.getAttrs("mutable2", followLinks=0))
- d2.addCallback(lambda attrs: self.failUnlessReallyEqual(attrs['permissions'], S_IFREG | 0666))
+ d2.addCallback(lambda attrs: self.failUnlessReallyEqual(attrs['permissions'], S_IFREG | 0o666))
d2.addCallback(lambda ign: wf.close())
return d2
d.addCallback(lambda ign: self.handler.makeDirectory("newdir", {'ext_foo': 'bar', 'ctime': 42}))
d.addCallback(lambda ign: self.root.get_child_and_metadata(u"newdir"))
- def _got( (child, metadata) ):
+ def _got(xxx_todo_changeme ):
+ (child, metadata) = xxx_todo_changeme
self.failUnless(IDirectoryNode.providedBy(child))
self.failUnless(child.is_mutable())
# FIXME
d.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "makeDirectory newdir2 permissions:0444 denied",
self.handler.makeDirectory, "newdir2",
- {'permissions': 0444}))
+ {'permissions': 0o444}))
d.addCallback(lambda ign: self.failUnlessEqual(sftpd.all_heisenfiles, {}))
d.addCallback(lambda ign: self.failUnlessEqual(self.handler._heisenfiles, {}))
from allmydata.test.common_web import WebRenderingMixin
from allmydata.test.no_network import NoNetworkServer
from allmydata.web.storage import StorageStatus, remove_prefix
+import six
class Marker:
pass
self.failUnlessIn('maximum-mutable-share-size', sv1)
def allocate(self, ss, storage_index, sharenums, size, canary=None):
- renew_secret = hashutil.tagged_hash("blah", "%d" % self._lease_secret.next())
- cancel_secret = hashutil.tagged_hash("blah", "%d" % self._lease_secret.next())
+ renew_secret = hashutil.tagged_hash("blah", "%d" % six.advance_iterator(self._lease_secret))
+ cancel_secret = hashutil.tagged_hash("blah", "%d" % six.advance_iterator(self._lease_secret))
if not canary:
canary = FakeCanary()
return ss.remote_allocate_buckets(storage_index,
def test_remove_incoming(self):
ss = self.create("test_remove_incoming")
- already, writers = self.allocate(ss, "vid", range(3), 10)
+ already, writers = self.allocate(ss, "vid", list(range(3)), 10)
for i,wb in writers.items():
wb.remote_write(0, "%10d" % i)
wb.remote_close()
self.failIfEqual(ss.allocated_size(), 0)
# Now abort the writers.
- for writer in writers.itervalues():
+ for writer in six.itervalues(writers):
writer.remote_abort()
self.failUnlessEqual(ss.allocated_size(), 0)
# now there should be ALLOCATED=1001+12+72=1085 bytes allocated, and
# 5000-1085=3915 free, therefore we can fit 39 100byte shares
- already3,writers3 = self.allocate(ss,"vid3", range(100), 100, canary)
+ already3,writers3 = self.allocate(ss,"vid3", list(range(100)), 100, canary)
self.failUnlessEqual(len(writers3), 39)
self.failUnlessEqual(len(ss._active_writers), 39)
def test_leases(self):
ss = self.create("test_leases")
canary = FakeCanary()
- sharenums = range(5)
+ sharenums = list(range(5))
size = 100
- rs0,cs0 = (hashutil.tagged_hash("blah", "%d" % self._lease_secret.next()),
- hashutil.tagged_hash("blah", "%d" % self._lease_secret.next()))
+ rs0,cs0 = (hashutil.tagged_hash("blah", "%d" % six.advance_iterator(self._lease_secret)),
+ hashutil.tagged_hash("blah", "%d" % six.advance_iterator(self._lease_secret)))
already,writers = ss.remote_allocate_buckets("si0", rs0, cs0,
sharenums, size, canary)
self.failUnlessEqual(len(already), 0)
self.failUnlessEqual(len(leases), 1)
self.failUnlessEqual(set([l.renew_secret for l in leases]), set([rs0]))
- rs1,cs1 = (hashutil.tagged_hash("blah", "%d" % self._lease_secret.next()),
- hashutil.tagged_hash("blah", "%d" % self._lease_secret.next()))
+ rs1,cs1 = (hashutil.tagged_hash("blah", "%d" % six.advance_iterator(self._lease_secret)),
+ hashutil.tagged_hash("blah", "%d" % six.advance_iterator(self._lease_secret)))
already,writers = ss.remote_allocate_buckets("si1", rs1, cs1,
sharenums, size, canary)
for wb in writers.values():
wb.remote_close()
# take out a second lease on si1
- rs2,cs2 = (hashutil.tagged_hash("blah", "%d" % self._lease_secret.next()),
- hashutil.tagged_hash("blah", "%d" % self._lease_secret.next()))
+ rs2,cs2 = (hashutil.tagged_hash("blah", "%d" % six.advance_iterator(self._lease_secret)),
+ hashutil.tagged_hash("blah", "%d" % six.advance_iterator(self._lease_secret)))
already,writers = ss.remote_allocate_buckets("si1", rs2, cs2,
sharenums, size, canary)
self.failUnlessEqual(len(already), 5)
self.failUnlessEqual(set([l.renew_secret for l in leases]), set([rs1, rs2]))
# and a third lease, using add-lease
- rs2a,cs2a = (hashutil.tagged_hash("blah", "%d" % self._lease_secret.next()),
- hashutil.tagged_hash("blah", "%d" % self._lease_secret.next()))
+ rs2a,cs2a = (hashutil.tagged_hash("blah", "%d" % six.advance_iterator(self._lease_secret)),
+ hashutil.tagged_hash("blah", "%d" % six.advance_iterator(self._lease_secret)))
ss.remote_add_lease("si1", rs2a, cs2a)
leases = list(ss.get_leases("si1"))
self.failUnlessEqual(len(leases), 3)
"ss should not have a 'remote_cancel_lease' method/attribute")
# test overlapping uploads
- rs3,cs3 = (hashutil.tagged_hash("blah", "%d" % self._lease_secret.next()),
- hashutil.tagged_hash("blah", "%d" % self._lease_secret.next()))
- rs4,cs4 = (hashutil.tagged_hash("blah", "%d" % self._lease_secret.next()),
- hashutil.tagged_hash("blah", "%d" % self._lease_secret.next()))
+ rs3,cs3 = (hashutil.tagged_hash("blah", "%d" % six.advance_iterator(self._lease_secret)),
+ hashutil.tagged_hash("blah", "%d" % six.advance_iterator(self._lease_secret)))
+ rs4,cs4 = (hashutil.tagged_hash("blah", "%d" % six.advance_iterator(self._lease_secret)),
+ hashutil.tagged_hash("blah", "%d" % six.advance_iterator(self._lease_secret)))
already,writers = ss.remote_allocate_buckets("si3", rs3, cs3,
sharenums, size, canary)
self.failUnlessEqual(len(already), 0)
def test_bad_magic(self):
ss = self.create("test_bad_magic")
- self.allocate(ss, "si1", "we1", self._lease_secret.next(), set([0]), 10)
+ self.allocate(ss, "si1", "we1", six.advance_iterator(self._lease_secret), set([0]), 10)
fn = os.path.join(ss.sharedir, storage_index_to_dir("si1"), "0")
f = open(fn, "rb+")
f.seek(0)
def test_container_size(self):
ss = self.create("test_container_size")
- self.allocate(ss, "si1", "we1", self._lease_secret.next(),
+ self.allocate(ss, "si1", "we1", six.advance_iterator(self._lease_secret),
set([0,1,2]), 100)
read = ss.remote_slot_readv
rstaraw = ss.remote_slot_testv_and_readv_and_writev
def test_allocate(self):
ss = self.create("test_allocate")
- self.allocate(ss, "si1", "we1", self._lease_secret.next(),
+ self.allocate(ss, "si1", "we1", six.advance_iterator(self._lease_secret),
set([0,1,2]), 100)
read = ss.remote_slot_readv
def test_remove(self):
ss = self.create("test_remove")
- self.allocate(ss, "si1", "we1", self._lease_secret.next(),
+ self.allocate(ss, "si1", "we1", six.advance_iterator(self._lease_secret),
set([0,1,2]), 100)
readv = ss.remote_slot_readv
writev = ss.remote_slot_testv_and_readv_and_writev
self.block = "aa"
self.salt = "a" * 16
self.block_hash = "a" * 32
- self.block_hash_tree = [self.block_hash for i in xrange(6)]
+ self.block_hash_tree = [self.block_hash for i in range(6)]
self.share_hash = self.block_hash
- self.share_hash_chain = dict([(i, self.share_hash) for i in xrange(6)])
+ self.share_hash_chain = dict([(i, self.share_hash) for i in range(6)])
self.signature = "foobarbaz"
self.verification_key = "vvvvvv"
self.encprivkey = "private"
self.root_hash = self.block_hash
self.salt_hash = self.root_hash
- self.salt_hash_tree = [self.salt_hash for i in xrange(6)]
+ self.salt_hash_tree = [self.salt_hash for i in range(6)]
self.block_hash_tree_s = self.serialize_blockhashes(self.block_hash_tree)
self.share_hash_chain_s = self.serialize_sharehashes(self.share_hash_chain)
# blockhashes and salt hashes are serialized in the same way,
# Now we'll build the offsets.
sharedata = ""
if not tail_segment and not empty:
- for i in xrange(6):
+ for i in range(6):
sharedata += self.salt + self.block
elif tail_segment:
- for i in xrange(5):
+ for i in range(5):
sharedata += self.salt + self.block
sharedata += self.salt + "a"
# and the verification key
data += self.verification_key
# Then we'll add in gibberish until we get to the right point.
- nulls = "".join([" " for i in xrange(len(data), share_data_offset)])
+ nulls = "".join([" " for i in range(len(data), share_data_offset)])
data += nulls
# Then the share data
mr = MDMFSlotReadProxy(self.rref, "si1", 0)
# Check that every method equals what we expect it to.
d = defer.succeed(None)
- def _check_block_and_salt((block, salt)):
+ def _check_block_and_salt(xxx_todo_changeme):
+ (block, salt) = xxx_todo_changeme
self.failUnlessEqual(block, self.block)
self.failUnlessEqual(salt, self.salt)
- for i in xrange(6):
+ for i in range(6):
d.addCallback(lambda ignored, i=i:
mr.get_block_and_salt(i))
d.addCallback(_check_block_and_salt)
d.addCallback(lambda ignored:
mr.get_encoding_parameters())
- def _check_encoding_parameters((k, n, segsize, datalen)):
+ def _check_encoding_parameters(xxx_todo_changeme1):
+ (k, n, segsize, datalen) = xxx_todo_changeme1
self.failUnlessEqual(k, 3)
self.failUnlessEqual(n, 10)
self.failUnlessEqual(segsize, 6)
self.write_test_share_to_server("si1")
mr = MDMFSlotReadProxy(self.rref, "si1", 0)
d = mr.get_encoding_parameters()
- def _check_encoding_parameters((k, n, segment_size, datalen)):
+ def _check_encoding_parameters(xxx_todo_changeme2):
+ (k, n, segment_size, datalen) = xxx_todo_changeme2
self.failUnlessEqual(k, 3)
self.failUnlessEqual(n, 10)
self.failUnlessEqual(segment_size, 6)
# is working appropriately.
mw = self._make_new_mw("si1", 0)
- for i in xrange(6):
+ for i in range(6):
mw.put_block(self.block, i, self.salt)
mw.put_encprivkey(self.encprivkey)
mw.put_blockhashes(self.block_hash_tree)
def test_private_key_after_share_hash_chain(self):
mw = self._make_new_mw("si1", 0)
d = defer.succeed(None)
- for i in xrange(6):
+ for i in range(6):
d.addCallback(lambda ignored, i=i:
mw.put_block(self.block, i, self.salt))
d.addCallback(lambda ignored:
mw = self._make_new_mw("si1", 0)
d = defer.succeed(None)
# Put everything up to and including the verification key.
- for i in xrange(6):
+ for i in range(6):
d.addCallback(lambda ignored, i=i:
mw.put_block(self.block, i, self.salt))
d.addCallback(lambda ignored:
self.failIf(result)
def _write_share(mw):
- for i in xrange(6):
+ for i in range(6):
mw.put_block(self.block, i, self.salt)
mw.put_encprivkey(self.encprivkey)
mw.put_blockhashes(self.block_hash_tree)
mw = self._make_new_mw("si1", 0)
mw.set_checkstring("this is a lie")
- for i in xrange(6):
+ for i in range(6):
mw.put_block(self.block, i, self.salt)
mw.put_encprivkey(self.encprivkey)
mw.put_blockhashes(self.block_hash_tree)
SHARE_HASH_CHAIN_SIZE
written_block_size = 2 + len(self.salt)
written_block = self.block + self.salt
- for i in xrange(6):
+ for i in range(6):
mw.put_block(self.block, i, self.salt)
mw.put_encprivkey(self.encprivkey)
self.failUnlessEqual(len(results), 2)
result, ign = results
self.failUnless(result, "publish failed")
- for i in xrange(6):
+ for i in range(6):
self.failUnlessEqual(read("si1", [0], [(expected_sharedata_offset + (i * written_block_size), written_block_size)]),
{0: [written_block]})
# more than 6
# blocks into each share.
d = defer.succeed(None)
- for i in xrange(6):
+ for i in range(6):
d.addCallback(lambda ignored, i=i:
mw.put_block(self.block, i, self.salt))
d.addCallback(lambda ignored:
# a block hash tree, and a share hash tree. Otherwise, we'll see
# failures that match what we are looking for, but are caused by
# the constraints imposed on operation ordering.
- for i in xrange(6):
+ for i in range(6):
d.addCallback(lambda ignored, i=i:
mw.put_block(self.block, i, self.salt))
d.addCallback(lambda ignored:
self.shouldFail(LayoutInvalid, "test blocksize too large",
None,
mw.put_block, invalid_block, 0, self.salt))
- for i in xrange(5):
+ for i in range(5):
d.addCallback(lambda ignored, i=i:
mw.put_block(self.block, i, self.salt))
# Try to put an invalid tail segment
mw0 = self._make_new_mw("si0", 0)
# Write some shares
d = defer.succeed(None)
- for i in xrange(6):
+ for i in range(6):
d.addCallback(lambda ignored, i=i:
mw0.put_block(self.block, i, self.salt))
# Write a share using the mutable writer, and make sure that the
# reader knows how to read everything back to us.
d = defer.succeed(None)
- for i in xrange(6):
+ for i in range(6):
d.addCallback(lambda ignored, i=i:
mw.put_block(self.block, i, self.salt))
d.addCallback(lambda ignored:
mw.finish_publishing())
mr = MDMFSlotReadProxy(self.rref, "si1", 0)
- def _check_block_and_salt((block, salt)):
+ def _check_block_and_salt(xxx_todo_changeme3):
+ (block, salt) = xxx_todo_changeme3
self.failUnlessEqual(block, self.block)
self.failUnlessEqual(salt, self.salt)
- for i in xrange(6):
+ for i in range(6):
d.addCallback(lambda ignored, i=i:
mr.get_block_and_salt(i))
d.addCallback(_check_block_and_salt)
d.addCallback(lambda ignored:
mr.get_encoding_parameters())
- def _check_encoding_parameters((k, n, segsize, datalen)):
+ def _check_encoding_parameters(xxx_todo_changeme4):
+ (k, n, segsize, datalen) = xxx_todo_changeme4
self.failUnlessEqual(k, 3)
self.failUnlessEqual(n, 10)
self.failUnlessEqual(segsize, 6)
d.addCallback(_make_mr, 123)
d.addCallback(lambda mr:
mr.get_block_and_salt(0))
- def _check_block_and_salt((block, salt)):
+ def _check_block_and_salt(xxx_todo_changeme5):
+ (block, salt) = xxx_todo_changeme5
self.failUnlessEqual(block, self.block)
self.failUnlessEqual(salt, self.salt)
self.failUnlessEqual(self.rref.read_count, 1)
d.addCallback(_make_mr, 123)
d.addCallback(lambda mr:
mr.get_block_and_salt(0))
- def _check_block_and_salt((block, salt)):
+ def _check_block_and_salt(xxx_todo_changeme6):
+ (block, salt) = xxx_todo_changeme6
self.failUnlessEqual(block, self.block * 6)
self.failUnlessEqual(salt, self.salt)
# TODO: Fix the read routine so that it reads only the data
+from __future__ import print_function
import os, re, sys, time, simplejson
from cStringIO import StringIO
# TODO: move this to common or common_util
from allmydata.test.test_runner import RunBinTahoeMixin
+import six
LARGE_DATA = """
This is some data to publish to the remote grid.., which needs to be large
base32.b2a(storage_index))
self.failUnless(expected in output)
except unittest.FailTest:
- print
- print "dump-share output was:"
- print output
+ print()
+ print("dump-share output was:")
+ print(output)
raise
d.addCallback(_test_debug)
return d1
d.addCallback(_check_download_2)
- def _check_download_3((res, newnode)):
+ def _check_download_3(xxx_todo_changeme):
+ (res, newnode) = xxx_todo_changeme
self.failUnlessEqual(res, DATA)
# replace the data
log.msg("starting replace1")
self.failUnless("Subscription Summary: storage: 5" in res)
self.failUnless("tahoe.css" in res)
except unittest.FailTest:
- print
- print "GET %s output was:" % self.introweb_url
- print res
+ print()
+ print("GET %s output was:" % self.introweb_url)
+ print(res)
raise
d.addCallback(_check)
# make sure it serves the CSS too
self.failUnlessEqual(data["announcement_distinct_hosts"],
{"storage": 1})
except unittest.FailTest:
- print
- print "GET %s?t=json output was:" % self.introweb_url
- print res
+ print()
+ print("GET %s?t=json output was:" % self.introweb_url)
+ print(res)
raise
d.addCallback(_check_json)
return d
"largest-directory-children": 3,
"largest-immutable-file": 112,
}
- for k,v in expected.iteritems():
+ for k,v in six.iteritems(expected):
self.failUnlessEqual(stats[k], v,
"stats[%s] was %s, not %s" %
(k, stats[k], v))
form.append('')
form.append('UTF-8')
form.append(sep)
- for name, value in fields.iteritems():
+ for name, value in six.iteritems(fields):
if isinstance(value, tuple):
filename, value = value
form.append('Content-Disposition: form-data; name="%s"; '
newargs = nodeargs + [verb] + list(args)
return self._run_cli(newargs, stdin=stdin)
- def _check_ls((out,err), expected_children, unexpected_children=[]):
+ def _check_ls(xxx_todo_changeme1, expected_children, unexpected_children=[]):
+ (out,err) = xxx_todo_changeme1
self.failUnlessEqual(err, "")
for s in expected_children:
self.failUnless(s in out, (s,out))
for s in unexpected_children:
self.failIf(s in out, (s,out))
- def _check_ls_root((out,err)):
+ def _check_ls_root(xxx_todo_changeme2):
+ (out,err) = xxx_todo_changeme2
self.failUnless("personal" in out)
self.failUnless("s2-ro" in out)
self.failUnless("s2-rw" in out)
d.addCallback(_check_ls, ["personal", "s2-ro", "s2-rw"])
d.addCallback(run, "list-aliases")
- def _check_aliases_1((out,err)):
+ def _check_aliases_1(xxx_todo_changeme3):
+ (out,err) = xxx_todo_changeme3
self.failUnlessEqual(err, "")
self.failUnlessEqual(out.strip(" \n"), "tahoe: %s" % private_uri)
d.addCallback(_check_aliases_1)
# new files
d.addCallback(lambda res: os.unlink(root_file))
d.addCallback(run, "list-aliases")
- def _check_aliases_2((out,err)):
+ def _check_aliases_2(xxx_todo_changeme4):
+ (out,err) = xxx_todo_changeme4
self.failUnlessEqual(err, "")
self.failUnlessEqual(out, "")
d.addCallback(_check_aliases_2)
d.addCallback(run, "mkdir")
- def _got_dir( (out,err) ):
+ def _got_dir(xxx_todo_changeme5 ):
+ (out,err) = xxx_todo_changeme5
self.failUnless(uri.from_string_dirnode(out.strip()))
return out.strip()
d.addCallback(_got_dir)
d.addCallback(lambda newcap: run(None, "add-alias", "tahoe", newcap))
d.addCallback(run, "list-aliases")
- def _check_aliases_3((out,err)):
+ def _check_aliases_3(xxx_todo_changeme6):
+ (out,err) = xxx_todo_changeme6
self.failUnlessEqual(err, "")
self.failUnless("tahoe: " in out)
d.addCallback(_check_aliases_3)
- def _check_empty_dir((out,err)):
+ def _check_empty_dir(xxx_todo_changeme7):
+ (out,err) = xxx_todo_changeme7
self.failUnlessEqual(out, "")
self.failUnlessEqual(err, "")
d.addCallback(run, "ls")
d.addCallback(_check_empty_dir)
- def _check_missing_dir((out,err)):
+ def _check_missing_dir(xxx_todo_changeme8):
# TODO: check that rc==2
+ (out,err) = xxx_todo_changeme8
self.failUnlessEqual(out, "")
self.failUnlessEqual(err, "No such file or directory\n")
d.addCallback(run, "ls", "bogus")
datas.append(data)
open(fn,"wb").write(data)
- def _check_stdout_against((out,err), filenum=None, data=None):
+ def _check_stdout_against(xxx_todo_changeme9, filenum=None, data=None):
+ (out,err) = xxx_todo_changeme9
self.failUnlessEqual(err, "")
if filenum is not None:
self.failUnlessEqual(out, datas[filenum])
# test all both forms of put: from a file, and from stdin
# tahoe put bar FOO
d.addCallback(run, "put", files[0], "tahoe-file0")
- def _put_out((out,err)):
+ def _put_out(xxx_todo_changeme10):
+ (out,err) = xxx_todo_changeme10
self.failUnless("URI:LIT:" in out, out)
self.failUnless("201 Created" in err, err)
uri0 = out.strip()
return run(None, "get", uri0)
d.addCallback(_put_out)
- d.addCallback(lambda (out,err): self.failUnlessEqual(out, datas[0]))
+ d.addCallback(lambda out_err: self.failUnlessEqual(out_err[0], datas[0]))
d.addCallback(run, "put", files[1], "subdir/tahoe-file1")
# tahoe put bar tahoe:FOO
d.addCallback(run, "put", files[2], "tahoe:file2")
d.addCallback(run, "put", "--format=SDMF", files[3], "tahoe:file3")
- def _check_put_mutable((out,err)):
+ def _check_put_mutable(xxx_todo_changeme11):
+ (out,err) = xxx_todo_changeme11
self._mutable_file3_uri = out.strip()
d.addCallback(_check_put_mutable)
d.addCallback(run, "get", "tahoe:file3")
d.addCallback(_check_stdout_against, 1)
outfile0 = os.path.join(self.basedir, "outfile0")
d.addCallback(run, "get", "file2", outfile0)
- def _check_outfile0((out,err)):
+ def _check_outfile0(xxx_todo_changeme12):
+ (out,err) = xxx_todo_changeme12
data = open(outfile0,"rb").read()
self.failUnlessEqual(data, "data to be uploaded: file2\n")
d.addCallback(_check_outfile0)
outfile1 = os.path.join(self.basedir, "outfile0")
d.addCallback(run, "get", "tahoe:subdir/tahoe-file1", outfile1)
- def _check_outfile1((out,err)):
+ def _check_outfile1(xxx_todo_changeme13):
+ (out,err) = xxx_todo_changeme13
data = open(outfile1,"rb").read()
self.failUnlessEqual(data, "data to be uploaded: file1\n")
d.addCallback(_check_outfile1)
d.addCallback(_check_ls, [], ["tahoe-file0", "file2"])
d.addCallback(run, "ls", "-l")
- def _check_ls_l((out,err)):
+ def _check_ls_l(xxx_todo_changeme14):
+ (out,err) = xxx_todo_changeme14
lines = out.split("\n")
for l in lines:
if "tahoe-file-stdin" in l:
d.addCallback(_check_ls_l)
d.addCallback(run, "ls", "--uri")
- def _check_ls_uri((out,err)):
+ def _check_ls_uri(xxx_todo_changeme15):
+ (out,err) = xxx_todo_changeme15
lines = out.split("\n")
for l in lines:
if "file3" in l:
d.addCallback(_check_ls_uri)
d.addCallback(run, "ls", "--readonly-uri")
- def _check_ls_rouri((out,err)):
+ def _check_ls_rouri(xxx_todo_changeme16):
+ (out,err) = xxx_todo_changeme16
lines = out.split("\n")
for l in lines:
if "file3" in l:
# copy from tahoe into disk
target_filename = os.path.join(self.basedir, "file-out")
d.addCallback(run, "cp", "tahoe:file4", target_filename)
- def _check_cp_out((out,err)):
+ def _check_cp_out(xxx_todo_changeme17):
+ (out,err) = xxx_todo_changeme17
self.failUnless(os.path.exists(target_filename))
got = open(target_filename,"rb").read()
self.failUnlessEqual(got, datas[4])
# copy from disk to disk (silly case)
target2_filename = os.path.join(self.basedir, "file-out-copy")
d.addCallback(run, "cp", target_filename, target2_filename)
- def _check_cp_out2((out,err)):
+ def _check_cp_out2(xxx_todo_changeme18):
+ (out,err) = xxx_todo_changeme18
self.failUnless(os.path.exists(target2_filename))
got = open(target2_filename,"rb").read()
self.failUnlessEqual(got, datas[4])
# copy from tahoe into disk, overwriting an existing file
d.addCallback(run, "cp", "tahoe:file3", target_filename)
- def _check_cp_out3((out,err)):
+ def _check_cp_out3(xxx_todo_changeme19):
+ (out,err) = xxx_todo_changeme19
self.failUnless(os.path.exists(target_filename))
got = open(target_filename,"rb").read()
self.failUnlessEqual(got, datas[3])
# and back out again
dn_copy = os.path.join(self.basedir, "dir1-copy")
d.addCallback(run, "cp", "--verbose", "-r", "tahoe:dir1", dn_copy)
- def _check_cp_r_out((out,err)):
+ def _check_cp_r_out(xxx_todo_changeme20):
+ (out,err) = xxx_todo_changeme20
def _cmp(name):
old = open(os.path.join(dn, name), "rb").read()
newfn = os.path.join(dn_copy, name)
# and again, only writing filecaps
dn_copy2 = os.path.join(self.basedir, "dir1-copy-capsonly")
d.addCallback(run, "cp", "-r", "--caps-only", "tahoe:dir1", dn_copy2)
- def _check_capsonly((out,err)):
+ def _check_capsonly(xxx_todo_changeme21):
# these should all be LITs
+ (out,err) = xxx_todo_changeme21
x = open(os.path.join(dn_copy2, "subdir2", "rfile4")).read()
y = uri.from_string_filenode(x)
self.failUnlessEqual(y.data, "rfile4")
d = self.run_bintahoe(['debug', 'trial', '--reporter=verbose',
'allmydata.test.trialtest'])
- def _check_failure( (out, err, rc) ):
+ def _check_failure(xxx_todo_changeme22 ):
+ (out, err, rc) = xxx_todo_changeme22
self.failUnlessEqual(rc, 1)
lines = out.split('\n')
_check_for_line(lines, "[SKIPPED]", "test_skip")
# the --quiet argument regression-tests a problem in finding which arguments to pass to trial
d.addCallback(lambda ign: self.run_bintahoe(['--quiet', 'debug', 'trial', '--reporter=verbose',
'allmydata.test.trialtest.Success']))
- def _check_success( (out, err, rc) ):
+ def _check_success(xxx_todo_changeme23 ):
+ (out, err, rc) = xxx_todo_changeme23
self.failUnlessEqual(rc, 0)
lines = out.split('\n')
_check_for_line(lines, "[SKIPPED]", "test_skip")
from allmydata.storage_client import StorageFarmBroker
from allmydata.storage.server import storage_index_to_dir
from allmydata.client import Client
+from functools import reduce
+import six
MiB = 1024*1024
d = self.shouldFail(UploadUnhappinessError, "first_error_all",
"server selection failed",
upload_data, self.u, DATA)
- def _check((f,)):
+ def _check(xxx_todo_changeme):
+ (f,) = xxx_todo_changeme
self.failUnlessIn("placed 0 shares out of 100 total", str(f.value))
# there should also be a 'last failure was' message
self.failUnlessIn("ServerError", str(f.value))
d = self.shouldFail(UploadUnhappinessError, "second_error_all",
"server selection failed",
upload_data, self.u, DATA)
- def _check((f,)):
+ def _check(xxx_todo_changeme1):
+ (f,) = xxx_todo_changeme1
self.failUnlessIn("placed 10 shares out of 100 total", str(f.value))
# there should also be a 'last failure was' message
self.failUnlessIn("ServerError", str(f.value))
n = len(pool)
if r > n:
return
- indices = range(r)
+ indices = list(range(r))
yield tuple(pool[i] for i in indices)
while True:
- for i in reversed(range(r)):
+ for i in reversed(list(range(r))):
if indices[i] != i + n - r:
break
else:
if len(servertoshnums) < h:
return False
# print "servertoshnums: ", servertoshnums, h, k
- for happysetcombo in combinations(servertoshnums.iterkeys(), h):
+ for happysetcombo in combinations(six.iterkeys(servertoshnums), h):
# print "happysetcombo: ", happysetcombo
for subsetcombo in combinations(happysetcombo, k):
shnums = reduce(set.union, [ servertoshnums[s] for s in subsetcombo ])
assert self.g, "I tried to find a grid at self.g, but failed"
servertoshnums = {} # k: server, v: set(shnum)
- for i, c in self.g.servers_by_number.iteritems():
+ for i, c in six.iteritems(self.g.servers_by_number):
for (dirp, dirns, fns) in os.walk(c.sharedir):
for fn in fns:
try:
d = selector.get_shareholders(broker, sh, storage_index,
share_size, block_size, num_segments,
10, 3, 4)
- def _have_shareholders((upload_trackers, already_servers)):
+ def _have_shareholders(xxx_todo_changeme2):
+ (upload_trackers, already_servers) = xxx_todo_changeme2
assert servers_to_break <= len(upload_trackers)
- for index in xrange(servers_to_break):
+ for index in range(servers_to_break):
tracker = list(upload_trackers)[index]
for share in tracker.buckets.keys():
tracker.buckets[share].abort()
self._add_server_with_share(server_number=1, share_number=2))
# Copy all of the other shares to server number 2
def _copy_shares(ign):
- for i in xrange(0, 10):
+ for i in range(0, 10):
self._copy_share_to_server(i, 2)
d.addCallback(_copy_shares)
# Remove the first server, and add a placeholder with share 0
readonly=True))
# Copy all of the other shares to server number 2
def _copy_shares(ign):
- for i in xrange(1, 10):
+ for i in range(1, 10):
self._copy_share_to_server(i, 2)
d.addCallback(_copy_shares)
# Remove server 0, and add another in its place
self._add_server_with_share(server_number=2, share_number=0,
readonly=True))
def _copy_shares(ign):
- for i in xrange(1, 10):
+ for i in range(1, 10):
self._copy_share_to_server(i, 2)
d.addCallback(_copy_shares)
d.addCallback(lambda ign:
# return the first argument unchanged.
self.failUnlessEqual(shares, merge_servers(shares, set([])))
trackers = []
- for (i, server) in [(i, "server%d" % i) for i in xrange(5, 9)]:
+ for (i, server) in [(i, "server%d" % i) for i in range(5, 9)]:
t = FakeServerTracker(server, [i])
trackers.append(t)
expected = {
shares3 = {}
trackers = []
expected = {}
- for (i, server) in [(i, "server%d" % i) for i in xrange(10)]:
+ for (i, server) in [(i, "server%d" % i) for i in range(10)]:
shares3[i] = set([server])
t = FakeServerTracker(server, [i])
trackers.append(t)
# FakeServerTracker whose job is to hold those instance variables to
# test that part.
trackers = []
- for (i, server) in [(i, "server%d" % i) for i in xrange(5, 9)]:
+ for (i, server) in [(i, "server%d" % i) for i in range(5, 9)]:
t = FakeServerTracker(server, [i])
trackers.append(t)
# Recall that test1 is a server layout with servers_of_happiness
def test_shares_by_server(self):
- test = dict([(i, set(["server%d" % i])) for i in xrange(1, 5)])
+ test = dict([(i, set(["server%d" % i])) for i in range(1, 5)])
sbs = shares_by_server(test)
self.failUnlessEqual(set([1]), sbs["server1"])
self.failUnlessEqual(set([2]), sbs["server2"])
d.addCallback(lambda ign:
self._add_server(4))
def _copy_shares(ign):
- for i in xrange(1, 10):
+ for i in range(1, 10):
self._copy_share_to_server(i, 1)
d.addCallback(_copy_shares)
d.addCallback(lambda ign:
self.basedir = self.mktemp()
d = self._setup_and_upload()
def _setup(ign):
- for i in xrange(1, 11):
+ for i in range(1, 11):
self._add_server(server_number=i)
self.g.remove_server(self.g.servers_by_number[0].my_nodeid)
c = self.g.clients[0]
d.addCallback(lambda ign:
self._setup_and_upload())
def _then(ign):
- for i in xrange(1, 11):
+ for i in range(1, 11):
self._add_server(server_number=i)
self._add_server(server_number=11, readonly=True)
self._add_server(server_number=12, readonly=True)
self._setup_and_upload())
def _next(ign):
- for i in xrange(1, 11):
+ for i in range(1, 11):
self._add_server(server_number=i)
# Copy all of the shares to server 9, since that will be
# the first one that the selector sees.
- for i in xrange(10):
+ for i in range(10):
self._copy_share_to_server(i, 9)
# Remove server 0, and its contents
self.g.remove_server(self.g.servers_by_number[0].my_nodeid)
self.basedir = self.mktemp()
d = self._setup_and_upload()
def _then(ign):
- for i in xrange(1, 11):
+ for i in range(1, 11):
self._add_server(server_number=i, readonly=True)
self.g.remove_server(self.g.servers_by_number[0].my_nodeid)
c = self.g.clients[0]
self._add_server_with_share(server_number=8, share_number=4)
self._add_server_with_share(server_number=5, share_number=5)
self._add_server_with_share(server_number=10, share_number=7)
- for i in xrange(4):
+ for i in range(4):
self._copy_share_to_server(i, 2)
return self.g.clients[0]
d.addCallback(_server_setup)
+from __future__ import print_function
+import six
+from six.moves import filter
def foo(): pass # keep the line number constant
self.failUnlessEqual(hr(foo), "<foo() at test_util.py:2>")
self.failUnlessEqual(hr(self.test_repr),
"<bound method HumanReadable.test_repr of <allmydata.test.test_util.HumanReadable testMethod=test_repr>>")
- self.failUnlessEqual(hr(1L), "1")
+ self.failUnlessEqual(hr(1), "1")
self.failUnlessEqual(hr(10**40),
"100000000000000000...000000000000000000")
self.failUnlessEqual(hr(self), "<allmydata.test.test_util.HumanReadable testMethod=test_repr>")
self.failUnlessEqual(hr({1:2}), "{1:2}")
try:
raise ValueError
- except Exception, e:
+ except Exception as e:
self.failUnless(
hr(e) == "<ValueError: ()>" # python-2.4
or hr(e) == "ValueError()") # python-2.5
try:
raise ValueError("oops")
- except Exception, e:
+ except Exception as e:
self.failUnless(
hr(e) == "<ValueError: 'oops'>" # python-2.4
or hr(e) == "ValueError('oops',)") # python-2.5
try:
raise NoArgumentException
- except Exception, e:
+ except Exception as e:
self.failUnless(
hr(e) == "<NoArgumentException>" # python-2.4
or hr(e) == "NoArgumentException()") # python-2.5
def should_assert(self, func, *args, **kwargs):
try:
func(*args, **kwargs)
- except AssertionError, e:
+ except AssertionError as e:
return str(e)
- except Exception, e:
+ except Exception as e:
self.fail("assert failed with non-AssertionError: %s" % e)
self.fail("assert was not caught")
def should_not_assert(self, func, *args, **kwargs):
try:
func(*args, **kwargs)
- except AssertionError, e:
+ except AssertionError as e:
self.fail("assertion fired when it should not have: %s" % e)
- except Exception, e:
+ except Exception as e:
self.fail("assertion (which shouldn't have failed) failed with non-AssertionError: %s" % e)
return # we're happy
self.failUnlessEqual("postcondition: othermsg: 'message2' <type 'str'>", m)
class FileUtil(unittest.TestCase):
- def mkdir(self, basedir, path, mode=0777):
+ def mkdir(self, basedir, path, mode=0o777):
fn = os.path.join(basedir, path)
fileutil.make_dirs(fn, mode)
d = os.path.join(basedir, "doomed")
self.mkdir(d, "a/b")
self.touch(d, "a/b/1.txt")
- self.touch(d, "a/b/2.txt", 0444)
+ self.touch(d, "a/b/2.txt", 0o444)
self.touch(d, "a/b/3.txt", 0)
self.mkdir(d, "a/c")
self.touch(d, "a/c/1.txt")
- self.touch(d, "a/c/2.txt", 0444)
+ self.touch(d, "a/c/2.txt", 0o444)
self.touch(d, "a/c/3.txt", 0)
- os.chmod(os.path.join(d, "a/c"), 0444)
+ os.chmod(os.path.join(d, "a/c"), 0o444)
self.mkdir(d, "a/d")
self.touch(d, "a/d/1.txt")
- self.touch(d, "a/d/2.txt", 0444)
+ self.touch(d, "a/d/2.txt", 0o444)
self.touch(d, "a/d/3.txt", 0)
os.chmod(os.path.join(d, "a/d"), 0)
d[fake3] = fake7
d[3] = 7
d[3] = 8
- self.failUnless(filter(lambda x: x is 8, d.itervalues()))
- self.failUnless(filter(lambda x: x is fake7, d.itervalues()))
+ self.failUnless(filter(lambda x: x is 8, six.itervalues(d)))
+ self.failUnless(filter(lambda x: x is fake7, six.itervalues(d)))
# The real 7 should have been ejected by the d[3] = 8.
- self.failUnless(not filter(lambda x: x is 7, d.itervalues()))
- self.failUnless(filter(lambda x: x is fake3, d.iterkeys()))
- self.failUnless(filter(lambda x: x is 3, d.iterkeys()))
+ self.failUnless(not filter(lambda x: x is 7, six.itervalues(d)))
+ self.failUnless(filter(lambda x: x is fake3, six.iterkeys(d)))
+ self.failUnless(filter(lambda x: x is 3, six.iterkeys(d)))
d[fake3] = 8
d.clear()
fake7 = EqButNotIs(7)
d[fake3] = fake7
d[3] = 8
- self.failUnless(filter(lambda x: x is 8, d.itervalues()))
- self.failUnless(filter(lambda x: x is fake7, d.itervalues()))
+ self.failUnless(filter(lambda x: x is 8, six.itervalues(d)))
+ self.failUnless(filter(lambda x: x is fake7, six.itervalues(d)))
# The real 7 should have been ejected by the d[3] = 8.
- self.failUnless(not filter(lambda x: x is 7, d.itervalues()))
- self.failUnless(filter(lambda x: x is fake3, d.iterkeys()))
- self.failUnless(filter(lambda x: x is 3, d.iterkeys()))
+ self.failUnless(not filter(lambda x: x is 7, six.itervalues(d)))
+ self.failUnless(filter(lambda x: x is fake3, six.iterkeys(d)))
+ self.failUnless(filter(lambda x: x is 3, six.iterkeys(d)))
d[fake3] = 8
def test_all(self):
self.failUnlessEqual(d.get(3, "default"), "default")
self.failUnlessEqual(sorted(list(d.items())),
[(1, "b"), (2, "a")])
- self.failUnlessEqual(sorted(list(d.iteritems())),
+ self.failUnlessEqual(sorted(list(six.iteritems(d))),
[(1, "b"), (2, "a")])
self.failUnlessEqual(sorted(d.keys()), [1, 2])
self.failUnlessEqual(sorted(d.values()), ["a", "b"])
self.failUnlessEqual(d.get("c", 5), 5)
self.failUnlessEqual(sorted(list(d.items())),
[("a", 1), ("b", 2)])
- self.failUnlessEqual(sorted(list(d.iteritems())),
+ self.failUnlessEqual(sorted(list(six.iteritems(d))),
[("a", 1), ("b", 2)])
self.failUnlessEqual(sorted(d.keys()), ["a", "b"])
self.failUnlessEqual(sorted(d.values()), [1, 2])
- self.failUnless(d.has_key("a"))
- self.failIf(d.has_key("c"))
+ self.failUnless("a" in d)
+ self.failIf("c" in d)
x = d.setdefault("c", 3)
self.failUnlessEqual(x, 3)
s.add(i, 1)
return s
- def __contains__(self, (start,length)):
+ def __contains__(self, xxx_todo_changeme):
+ (start,length) = xxx_todo_changeme
for i in range(start, start+length):
if i not in self._have:
return False
s1 = Spans(3, 4) # 3,4,5,6
self._check1(s1)
- s1 = Spans(3L, 4L) # 3,4,5,6
+ s1 = Spans(3, 4) # 3,4,5,6
self._check1(s1)
s2 = Spans(s1)
self.failUnlessEqual(ds.get(2, 4), "fear")
ds = klass()
- ds.add(2L, "four")
- ds.add(3L, "ea")
- self.failUnlessEqual(ds.get(2L, 4L), "fear")
+ ds.add(2, "four")
+ ds.add(3, "ea")
+ self.failUnlessEqual(ds.get(2, 4), "fear")
def do_scan(self, klass):
p_added = set(range(start, end))
b = base()
if DEBUG:
- print
- print dump(b), which
+ print()
+ print(dump(b), which)
add = klass(); add.add(start, S[start:end])
- print dump(add)
+ print(dump(add))
b.add(start, S[start:end])
if DEBUG:
- print dump(b)
+ print(dump(b))
# check that the new span is there
d = b.get(start, end-start)
self.failUnlessEqual(d, S[start:end], which)
try:
check_requirement("foolscap[secure_connections] >= 0.6.0", {"foolscap": ("0.6.1+", "", None)})
# succeeding is ok
- except PackagingError, e:
+ except PackagingError as e:
self.failUnlessIn("could not parse", str(e))
self.failUnlessRaises(PackagingError, check_requirement,
+from __future__ import print_function
import os.path, re, urllib, time, cgi
import simplejson
from StringIO import StringIO
HTTPClientHEADFactory
from allmydata.client import Client, SecretHolder
from allmydata.introducer import IntroducerNode
+import six
# create a fake uploader/downloader, and a couple of fake dirnodes, then
# create a webserver that works against them
u"blockingfile", u"empty", u"n\u00fc.txt", u"quux.txt", u"sub"])
kids = dict( [(unicode(name),value)
for (name,value)
- in data[1]["children"].iteritems()] )
+ in six.iteritems(data[1]["children"])] )
self.failUnlessEqual(kids[u"sub"][0], "dirnode")
self.failUnlessIn("metadata", kids[u"sub"][1])
self.failUnlessIn("tahoe", kids[u"sub"][1]["metadata"])
form.append('')
form.append('UTF-8')
form.append(sep)
- for name, value in fields.iteritems():
+ for name, value in six.iteritems(fields):
if isinstance(value, tuple):
filename, value = value
form.append('Content-Disposition: form-data; name="%s"; '
# serverids[] keys are strings, since that's what JSON does, but
# we'd really like them to be ints
self.failUnlessEqual(data["serverids"]["0"], "phwrsjte")
- self.failUnless(data["serverids"].has_key("1"),
+ self.failUnless("1" in data["serverids"],
str(data["serverids"]))
self.failUnlessEqual(data["serverids"]["1"], "cmpuvkjm",
str(data["serverids"]))
headers = {"range": "bytes=1-10"}
d = self.GET(self.public_url + "/foo/bar.txt", headers=headers,
return_response=True)
- def _got((res, status, headers)):
+ def _got(xxx_todo_changeme):
+ (res, status, headers) = xxx_todo_changeme
self.failUnlessReallyEqual(int(status), 206)
- self.failUnless(headers.has_key("content-range"))
+ self.failUnless("content-range" in headers)
self.failUnlessReallyEqual(headers["content-range"][0],
"bytes 1-10/%d" % len(self.BAR_CONTENTS))
self.failUnlessReallyEqual(res, self.BAR_CONTENTS[1:11])
length = len(self.BAR_CONTENTS)
d = self.GET(self.public_url + "/foo/bar.txt", headers=headers,
return_response=True)
- def _got((res, status, headers)):
+ def _got(xxx_todo_changeme4):
+ (res, status, headers) = xxx_todo_changeme4
self.failUnlessReallyEqual(int(status), 206)
- self.failUnless(headers.has_key("content-range"))
+ self.failUnless("content-range" in headers)
self.failUnlessReallyEqual(headers["content-range"][0],
"bytes 5-%d/%d" % (length-1, length))
self.failUnlessReallyEqual(res, self.BAR_CONTENTS[5:])
length = len(self.BAR_CONTENTS)
d = self.GET(self.public_url + "/foo/bar.txt", headers=headers,
return_response=True)
- def _got((res, status, headers)):
+ def _got(xxx_todo_changeme5):
+ (res, status, headers) = xxx_todo_changeme5
self.failUnlessReallyEqual(int(status), 206)
- self.failUnless(headers.has_key("content-range"))
+ self.failUnless("content-range" in headers)
self.failUnlessReallyEqual(headers["content-range"][0],
"bytes %d-%d/%d" % (length-5, length-1, length))
self.failUnlessReallyEqual(res, self.BAR_CONTENTS[-5:])
headers = {"range": "bytes=1-10"}
d = self.HEAD(self.public_url + "/foo/bar.txt", headers=headers,
return_response=True)
- def _got((res, status, headers)):
+ def _got(xxx_todo_changeme6):
+ (res, status, headers) = xxx_todo_changeme6
self.failUnlessReallyEqual(res, "")
self.failUnlessReallyEqual(int(status), 206)
- self.failUnless(headers.has_key("content-range"))
+ self.failUnless("content-range" in headers)
self.failUnlessReallyEqual(headers["content-range"][0],
"bytes 1-10/%d" % len(self.BAR_CONTENTS))
d.addCallback(_got)
length = len(self.BAR_CONTENTS)
d = self.HEAD(self.public_url + "/foo/bar.txt", headers=headers,
return_response=True)
- def _got((res, status, headers)):
+ def _got(xxx_todo_changeme7):
+ (res, status, headers) = xxx_todo_changeme7
self.failUnlessReallyEqual(int(status), 206)
- self.failUnless(headers.has_key("content-range"))
+ self.failUnless("content-range" in headers)
self.failUnlessReallyEqual(headers["content-range"][0],
"bytes 5-%d/%d" % (length-1, length))
d.addCallback(_got)
length = len(self.BAR_CONTENTS)
d = self.HEAD(self.public_url + "/foo/bar.txt", headers=headers,
return_response=True)
- def _got((res, status, headers)):
+ def _got(xxx_todo_changeme8):
+ (res, status, headers) = xxx_todo_changeme8
self.failUnlessReallyEqual(int(status), 206)
- self.failUnless(headers.has_key("content-range"))
+ self.failUnless("content-range" in headers)
self.failUnlessReallyEqual(headers["content-range"][0],
"bytes %d-%d/%d" % (length-5, length-1, length))
d.addCallback(_got)
headers = {"range": "BOGUS=fizbop-quarnak"}
d = self.GET(self.public_url + "/foo/bar.txt", headers=headers,
return_response=True)
- def _got((res, status, headers)):
+ def _got(xxx_todo_changeme9):
+ (res, status, headers) = xxx_todo_changeme9
self.failUnlessReallyEqual(int(status), 200)
- self.failUnless(not headers.has_key("content-range"))
+ self.failUnless("content-range" not in headers)
self.failUnlessReallyEqual(res, self.BAR_CONTENTS)
d.addCallback(_got)
return d
def test_HEAD_FILEURL(self):
d = self.HEAD(self.public_url + "/foo/bar.txt", return_response=True)
- def _got((res, status, headers)):
+ def _got(xxx_todo_changeme10):
+ (res, status, headers) = xxx_todo_changeme10
self.failUnlessReallyEqual(res, "")
self.failUnlessReallyEqual(headers["content-length"][0],
str(len(self.BAR_CONTENTS)))
uri = "/uri/%s" % self._bar_txt_uri
d = self.GET(uri, return_response=True)
# extract the ETag
- d.addCallback(lambda (data, code, headers):
- headers['etag'][0])
+ d.addCallback(lambda data_code_headers:
+ data_code_headers[2]['etag'][0])
# do a GET that's supposed to match the ETag
d.addCallback(lambda etag:
self.GET(uri, return_response=True,
headers={"If-None-Match": etag}))
# make sure it short-circuited (304 instead of 200)
- d.addCallback(lambda (data, code, headers):
- self.failUnlessEqual(int(code), http.NOT_MODIFIED))
+ d.addCallback(lambda data_code_headers1:
+ self.failUnlessEqual(int(data_code_headers1[1]), http.NOT_MODIFIED))
return d
d.addCallback(_check_match)
def _no_etag(uri, t):
target = "/uri/%s?t=%s" % (uri, t)
d = self.GET(target, return_response=True, followRedirect=True)
- d.addCallback(lambda (data, code, headers):
- self.failIf("etag" in headers, target))
+ d.addCallback(lambda data_code_headers2:
+ self.failIf("etag" in data_code_headers2[2], target))
return d
def _yes_etag(uri, t):
target = "/uri/%s?t=%s" % (uri, t)
d = self.GET(target, return_response=True, followRedirect=True)
- d.addCallback(lambda (data, code, headers):
- self.failUnless("etag" in headers, target))
+ d.addCallback(lambda data_code_headers3:
+ self.failUnless("etag" in data_code_headers3[2], target))
return d
d.addCallback(lambda ign: _yes_etag(self._bar_txt_uri, ""))
def test_GET_FILEURL_save(self):
d = self.GET(self.public_url + "/foo/bar.txt?filename=bar.txt&save=true",
return_response=True)
- def _got((res, statuscode, headers)):
+ def _got(xxx_todo_changeme11):
+ (res, statuscode, headers) = xxx_todo_changeme11
content_disposition = headers["content-disposition"][0]
self.failUnless(content_disposition == 'attachment; filename="bar.txt"', content_disposition)
self.failUnlessIsBarDotTxt(res)
"largest-directory-children": 8,
"largest-immutable-file": 19,
}
- for k,v in expected.iteritems():
+ for k,v in six.iteritems(expected):
self.failUnlessReallyEqual(stats[k], v,
"stats[%s] was %s, not %s" %
(k, stats[k], v))
return d
def dump_root(self):
- print "NODEWALK"
+ print("NODEWALK")
w = webish.DirnodeWalkerMixin()
def visitor(childpath, childnode, metadata):
- print childpath
+ print(childpath)
d = w.walk(self.public_root, visitor)
return d
self.failUnlessEqual(parsed[0], "dirnode")
children = dict( [(unicode(name),value)
for (name,value)
- in parsed[1]["children"].iteritems()] )
+ in six.iteritems(parsed[1]["children"])] )
self.failUnlessIn(u"new.txt", children)
new_json = children[u"new.txt"]
self.failUnlessEqual(new_json[0], "filenode")
d.addCallback(lambda res:
self.HEAD(self.public_url + "/foo/new.txt",
return_response=True))
- def _got_headers((res, status, headers)):
+ def _got_headers(xxx_todo_changeme12):
+ (res, status, headers) = xxx_todo_changeme12
self.failUnlessReallyEqual(res, "")
self.failUnlessReallyEqual(headers["content-length"][0],
str(len(NEW2_CONTENTS)))
# will be rather terse and unhelpful. addErrback this method to the
# end of your chain to get more information out of these errors.
if f.check(error.Error):
- print "web.error.Error:"
- print f
- print f.value.response
+ print("web.error.Error:")
+ print(f)
+ print(f.value.response)
return f
def test_POST_upload_replace(self):
for line in res.splitlines()
if line]
except ValueError:
- print "response is:", res
- print "undecodeable line was '%s'" % line
+ print("response is:", res)
+ print("undecodeable line was '%s'" % line)
raise
self.failUnlessReallyEqual(len(units), 5+1)
# should be parent-first
d.addCallback(lambda ign:
self.delete_shares_numbered(self.uris["subdir"],
- range(1, 10)))
+ list(range(1, 10))))
# root
# root/good
d.addCallback(lambda ign: c0.upload(upload.Data(DATA, convergence="")))
def _stash_bad(ur):
self.fileurls["1share"] = "uri/" + urllib.quote(ur.get_uri())
- self.delete_shares_numbered(ur.get_uri(), range(1,10))
+ self.delete_shares_numbered(ur.get_uri(), list(range(1,10)))
u = uri.from_string(ur.get_uri())
u.key = testutil.flip_bit(u.key, 0)
u = n.get_uri()
url = self.fileurls["dir-1share"] = "uri/" + urllib.quote(u) + "/"
self.fileurls["dir-1share-json"] = url + "?t=json"
- self.delete_shares_numbered(u, range(1,10))
+ self.delete_shares_numbered(u, list(range(1,10)))
d.addCallback(_mangle_dirnode_1share)
d.addCallback(lambda ign: c0.create_dirnode())
def _mangle_dirnode_0share(n):
u = n.get_uri()
url = self.fileurls["dir-0share"] = "uri/" + urllib.quote(u) + "/"
self.fileurls["dir-0share-json"] = url + "?t=json"
- self.delete_shares_numbered(u, range(0,10))
+ self.delete_shares_numbered(u, list(range(0,10)))
d.addCallback(_mangle_dirnode_0share)
# NotEnoughSharesError should be reported sensibly, with a
summary, and 'misc/coverage2html.py' will produce a more useful HTML report.
"""
+from __future__ import print_function
from twisted.trial.reporter import TreeReporter, VerboseTextReporter
def stop_coverage(self):
cov.stop()
cov.save()
- print "Coverage results written to .coverage"
+ print("Coverage results written to .coverage")
def printSummary(self):
# for twisted-2.5.x
self.stop_coverage()
# trigger exceptions. So it is a guide to what methods are invoked on a
# Reporter.
def __init__(self, *args, **kwargs):
- print "START HERE"
+ print("START HERE")
self.r = TreeReporter(*args, **kwargs)
self.shouldStop = self.r.shouldStop
self.separator = self.r.separator
def write(self, *args):
if not self._starting2:
self._starting2 = True
- print "FIRST WRITE"
+ print("FIRST WRITE")
return self.r.write(*args)
def startTest(self, *args, **kwargs):
return self.r.writeln(*args, **kwargs)
def printSummary(self, *args, **kwargs):
- print "PRINT SUMMARY"
+ print("PRINT SUMMARY")
return self.r.printSummary(*args, **kwargs)
def wasSuccessful(self, *args, **kwargs):
else:
error = MustBeReadonlyError(kind + " used in a read-only context", name)
- except BadURIError, e:
+ except BadURIError as e:
error = e
return UnknownURI(u, error=error)
if s < 120:
return _plural(s, "second")
if s < 3*HOUR:
- return _plural(s/60, "minute")
+ return _plural(s//60, "minute")
if s < 2*DAY:
- return _plural(s/HOUR, "hour")
+ return _plural(s//HOUR, "hour")
if s < 2*MONTH:
- return _plural(s/DAY, "day")
+ return _plural(s//DAY, "day")
if s < 4*YEAR:
- return _plural(s/MONTH, "month")
- return _plural(s/YEAR, "year")
+ return _plural(s//MONTH, "month")
+ return _plural(s//YEAR, "year")
def abbreviate_space(s, SI=True):
if s is None:
"""
from allmydata.util.humanreadable import hr
+from allmydata.util.sixutil import map
def _assert(___cond=False, *___args, **___kwargs):
if ___cond:
msgbuf.append("%s: %s %s" % ((___kwargs.items()[0][0],) + tuple(map(hr, (___kwargs.items()[0][1], type(___kwargs.items()[0][1]),)))))
msgbuf.extend([", %s: %s %s" % tuple(map(hr, (k, v, type(v),))) for k, v in ___kwargs.items()[1:]])
- raise AssertionError, "".join(msgbuf)
+ raise AssertionError("".join(msgbuf))
def precondition(___cond=False, *___args, **___kwargs):
if ___cond:
msgbuf.append("%s: %s %s" % ((___kwargs.items()[0][0],) + tuple(map(hr, (___kwargs.items()[0][1], type(___kwargs.items()[0][1]),)))))
msgbuf.extend([", %s: %s %s" % tuple(map(hr, (k, v, type(v),))) for k, v in ___kwargs.items()[1:]])
- raise AssertionError, "".join(msgbuf)
+ raise AssertionError("".join(msgbuf))
def postcondition(___cond=False, *___args, **___kwargs):
if ___cond:
msgbuf.append("%s: %s %s" % ((___kwargs.items()[0][0],) + tuple(map(hr, (___kwargs.items()[0][1], type(___kwargs.items()[0][1]),)))))
msgbuf.extend([", %s: %s %s" % tuple(map(hr, (k, v, type(v),))) for k, v in ___kwargs.items()[1:]])
- raise AssertionError, "".join(msgbuf)
+ raise AssertionError("".join(msgbuf))
import string
from allmydata.util.assertutil import precondition
+from allmydata.util.sixutil import map
z_base_32_alphabet = "ybndrfg8ejkmcpqxot1uwisza345h769" # Zooko's choice, rationale in "DESIGN" doc
rfc3548_alphabet = "abcdefghijklmnopqrstuvwxyz234567" # RFC3548 standard used by Gnutella, Content-Addressable Web, THEX, Bitzi, Web-Calculus...
chars = rfc3548_alphabet
-vals = ''.join(map(chr, range(32)))
+vals = ''.join(map(chr, list(range(32))))
c2vtranstable = string.maketrans(chars, vals)
v2ctranstable = string.maketrans(vals, chars)
identitytranstable = string.maketrans('', '')
s.extend(_get_trailing_chars_without_lsbs(N+1, d=d))
i = 0
while i < len(chars):
- if not d.has_key(i):
+ if i not in d:
d[i] = None
s.append(chars[i])
i = i + 2**N
@return the contents of os in base-32 encoded form
"""
precondition(isinstance(lengthinbits, (int, long,)), "lengthinbits is required to be an integer.", lengthinbits=lengthinbits)
- precondition((lengthinbits+7)/8 == len(os), "lengthinbits is required to specify a number of bits storable in exactly len(os) octets.", lengthinbits=lengthinbits, lenos=len(os))
+ precondition((lengthinbits+7)//8 == len(os), "lengthinbits is required to specify a number of bits storable in exactly len(os) octets.", lengthinbits=lengthinbits, lenos=len(os))
os = map(ord, os)
- numquintets = (lengthinbits+4)/5
- numoctetsofdata = (lengthinbits+7)/8
+ numquintets = (lengthinbits+4)//5
+ numoctetsofdata = (lengthinbits+7)//8
# print "numoctetsofdata: %s, len(os): %s, lengthinbits: %s, numquintets: %s" % (numoctetsofdata, len(os), lengthinbits, numquintets,)
# strip trailing octets that won't be used
del os[numoctetsofdata:]
os[-1] = os[-1] >> (8-(lengthinbits % 8))
os[-1] = os[-1] << (8-(lengthinbits % 8))
# append zero octets for padding if needed
- numoctetsneeded = (numquintets*5+7)/8 + 1
+ numoctetsneeded = (numquintets*5+7)//8 + 1
os.extend([0]*(numoctetsneeded-len(os)))
quintets = []
cutoff = 256
continue
cutoff = cutoff * 8
- quintet = num / cutoff
+ quintet = num // cutoff
quintets.append(quintet)
num = num - (quintet * cutoff)
- cutoff = cutoff / 32
- quintet = num / cutoff
+ cutoff = cutoff // 32
+ quintet = num // cutoff
quintets.append(quintet)
num = num - (quintet * cutoff)
precondition(isinstance(s, str), s)
if s == '':
return True
- assert lengthinbits%5 < len(s5), lengthinbits
- assert ord(s[-1]) < s5[lengthinbits%5]
- return (((lengthinbits+4)/5) == len(s)) and s5[lengthinbits%5][ord(s[-1])] and not string.translate(s, identitytranstable, chars)
+ assert int(lengthinbits%5) < len(s5), lengthinbits
+ #FIXME assert ord(s[-1]) < s5[lengthinbits%5]
+ return (((lengthinbits+4)//5) == len(s)) and s5[lengthinbits%5][ord(s[-1])] and not string.translate(s, identitytranstable, chars)
def num_octets_that_encode_to_this_many_quintets(numqs):
# Here is a computation that conveniently expresses this:
- return (numqs*5+3)/8
+ return (numqs*5+3)//8
def a2b(cs):
"""
qs = map(ord, string.translate(cs, c2vtranstable))
- numoctets = (lengthinbits+7)/8
- numquintetsofdata = (lengthinbits+4)/5
+ numoctets = (lengthinbits+7)//8
+ numquintetsofdata = (lengthinbits+4)//5
# strip trailing quintets that won't be used
del qs[numquintetsofdata:]
# zero out any unused bits in the final quintet
qs[-1] = qs[-1] >> (5-(lengthinbits % 5))
qs[-1] = qs[-1] << (5-(lengthinbits % 5))
# append zero quintets for padding if needed
- numquintetsneeded = (numoctets*8+4)/5
+ numquintetsneeded = (numoctets*8+4)//5
qs.extend([0]*(numquintetsneeded-len(qs)))
octets = []
i = 1
while len(octets) < numoctets:
while pos > 256:
- pos = pos / 32
+ pos = pos // 32
num = num + (qs[i] * pos)
i = i + 1
- octet = num / 256
+ octet = num // 256
octets.append(octet)
num = num - (octet * 256)
num = num * 256
from bisect import bisect_left, insort_left
from allmydata.util.assertutil import _assert, precondition
+from functools import reduce
+import six
+from allmydata.util.sixutil import map
+from six.moves import zip
def move(k, d1, d2, strict=False):
"""
Move item with key k from d1 to d2.
"""
- if strict and not d1.has_key(k):
- raise KeyError, k
+ if strict and k not in d1:
+ raise KeyError(k)
d2[k] = d1[k]
del d1[k]
"""
if len(d1) > len(d2):
for k in d2.keys():
- if d1.has_key(k):
+ if k in d1:
del d1[k]
else:
for k in d1.keys():
- if d2.has_key(k):
+ if k in d2:
del d1[k]
return d1
self[key] = set([value])
def update(self, otherdictofsets):
- for key, values in otherdictofsets.iteritems():
+ for key, values in six.iteritems(otherdictofsets):
if key in self:
self[key].update(values)
else:
self.update(initialdata)
def del_if_present(self, key):
- if self.has_key(key):
+ if key in self:
del self[key]
def items_sorted_by_value(self):
"""
@return a sequence of (key, value,) pairs sorted according to value
"""
- l = [(x[1], x[0],) for x in self.d.iteritems()]
+ l = [(x[1], x[0],) for x in six.iteritems(self.d)]
l.sort()
return [(x[1], x[0],) for x in l]
def __cmp__(self, other):
try:
return self.d.__cmp__(other)
- except TypeError, le:
+ except TypeError as le:
# maybe we should look for a .d member in other. I know this is insanely kludgey, but the Right Way To Do It is for dict.__cmp__ to use structural typing ("duck typing")
try:
return self.d.__cmp__(other.d)
"""
@return a sequence of (key, value,) pairs sorted according to value
"""
- l = [(x[1], x[0],) for x in self.d.iteritems()]
+ l = [(x[1], x[0],) for x in six.iteritems(self.d)]
l.sort()
return [(x[1], x[0],) for x in l]
def item_with_largest_value(self):
- it = self.d.iteritems()
- (winner, winnerval,) = it.next()
+ it = six.iteritems(self.d)
+ (winner, winnerval,) = six.advance_iterator(it)
try:
while True:
- n, nv = it.next()
+ n, nv = six.advance_iterator(it)
if nv > winnerval:
winner = n
winnerval = nv
def __cmp__(self, other):
try:
return self.d.__cmp__(other)
- except TypeError, le:
+ except TypeError as le:
# maybe we should look for a .d member in other. I know this is insanely kludgey, but the Right Way To Do It is for dict.__cmp__ to use structural typing ("duck typing")
try:
return self.d.__cmp__(other.d)
return self.d.values(*args, **kwargs)
def del_if_present(d, k):
- if d.has_key(k):
+ if k in d:
del d[k]
class ValueOrderedDict:
return self
def next(self):
precondition(self.i <= len(self.c.l), "The iterated ValueOrderedDict doesn't have this many elements. Most likely this is because someone altered the contents of the ValueOrderedDict while the iteration was in progress.", self.i, self.c)
- precondition((self.i == len(self.c.l)) or self.c.d.has_key(self.c.l[self.i][1]), "The iterated ValueOrderedDict doesn't have this key. Most likely this is because someone altered the contents of the ValueOrderedDict while the iteration was in progress.", self.i, (self.i < len(self.c.l)) and self.c.l[self.i], self.c)
+ precondition((self.i == len(self.c.l)) or self.c.l[self.i][1] in self.c.d, "The iterated ValueOrderedDict doesn't have this key. Most likely this is because someone altered the contents of the ValueOrderedDict while the iteration was in progress.", self.i, (self.i < len(self.c.l)) and self.c.l[self.i], self.c)
if self.i == len(self.c.l):
raise StopIteration
le = self.c.l[self.i]
return self
def next(self):
precondition(self.i <= len(self.c.l), "The iterated ValueOrderedDict doesn't have this many elements. Most likely this is because someone altered the contents of the ValueOrderedDict while the iteration was in progress.", self.i, self.c)
- precondition((self.i == len(self.c.l)) or self.c.d.has_key(self.c.l[self.i][1]), "The iterated ValueOrderedDict doesn't have this key. Most likely this is because someone altered the contents of the ValueOrderedDict while the iteration was in progress.", self.i, (self.i < len(self.c.l)) and self.c.l[self.i], self.c)
+ precondition((self.i == len(self.c.l)) or self.c.l[self.i][1] in self.c.d, "The iterated ValueOrderedDict doesn't have this key. Most likely this is because someone altered the contents of the ValueOrderedDict while the iteration was in progress.", self.i, (self.i < len(self.c.l)) and self.c.l[self.i], self.c)
if self.i == len(self.c.l):
raise StopIteration
le = self.c.l[self.i]
return self
def next(self):
precondition(self.i <= len(self.c.l), "The iterated ValueOrderedDict doesn't have this many elements. Most likely this is because someone altered the contents of the ValueOrderedDict while the iteration was in progress.", self.i, self.c)
- precondition((self.i == len(self.c.l)) or self.c.d.has_key(self.c.l[self.i][1]), "The iterated ValueOrderedDict doesn't have this key. Most likely this is because someone altered the contents of the ValueOrderedDict while the iteration was in progress.", self.i, (self.i < len(self.c.l)) and self.c.l[self.i], self.c)
+ precondition((self.i == len(self.c.l)) or self.c.l[self.i][1] in self.c.d, "The iterated ValueOrderedDict doesn't have this key. Most likely this is because someone altered the contents of the ValueOrderedDict while the iteration was in progress.", self.i, (self.i < len(self.c.l)) and self.c.l[self.i], self.c)
if self.i == len(self.c.l):
raise StopIteration
le = self.c.l[self.i]
def __repr_n__(self, n=None):
s = ["{",]
try:
- iter = self.iteritems()
- x = iter.next()
+ iter = six.iteritems(self)
+ x = six.advance_iterator(iter)
s.append(str(x[0])); s.append(": "); s.append(str(x[1]))
i = 1
while (n is None) or (i < n):
- x = iter.next()
+ x = six.advance_iterator(iter)
s.append(", "); s.append(str(x[0])); s.append(": "); s.append(str(x[1]))
except StopIteration:
pass
return "<%s %s>" % (self.__class__.__name__, self.__repr_n__(16),)
def __eq__(self, other):
- for (k, v,) in other.iteritems():
- if not self.d.has_key(k) or self.d[k] != v:
+ for (k, v,) in six.iteritems(other):
+ if k not in self.d or self.d[k] != v:
return False
return True
def _assert_invariants(self):
iter = self.l.__iter__()
try:
- oldx = iter.next()
+ oldx = six.advance_iterator(iter)
while True:
- x = iter.next()
+ x = six.advance_iterator(iter)
# self.l is required to be sorted
_assert(x >= oldx, x, oldx)
# every element of self.l is required to appear in self.d
- _assert(self.d.has_key(x[1]), x)
+ _assert(x[1] in self.d, x)
oldx =x
except StopIteration:
pass
- for (k, v,) in self.d.iteritems():
+ for (k, v,) in six.iteritems(self.d):
i = bisect_left(self.l, (v, k,))
while (self.l[i][0] is not v) or (self.l[i][1] is not k):
i += 1
def setdefault(self, key, default=None):
assert self._assert_invariants()
- if not self.has_key(key):
+ if key not in self:
self[key] = default
assert self._assert_invariants()
return self[key]
def __setitem__(self, key, val=None):
assert self._assert_invariants()
- if self.d.has_key(key):
+ if key in self.d:
oldval = self.d[key]
if oldval != val:
# re-sort
return result
def __getitem__(self, key, default=None, strictkey=True):
- if not self.d.has_key(key):
+ if key not in self.d:
if strictkey:
- raise KeyError, key
+ raise KeyError(key)
else:
return default
return self.d[key]
that key and strictkey is False
"""
assert self._assert_invariants()
- if self.d.has_key(key):
+ if key in self.d:
val = self.d.pop(key)
i = bisect_left(self.l, (val, key,))
while (self.l[i][0] is not val) or (self.l[i][1] is not key):
return val
elif strictkey:
assert self._assert_invariants()
- raise KeyError, key
+ raise KeyError(key)
else:
assert self._assert_invariants()
return default
@return: self
"""
assert self._assert_invariants()
- for (k, v,) in otherdict.iteritems():
+ for (k, v,) in six.iteritems(otherdict):
self.insert(k, v)
assert self._assert_invariants()
return self
def has_key(self, key):
assert self._assert_invariants()
- return self.d.has_key(key)
+ return key in self.d
def popitem(self):
if not self.l:
- raise KeyError, 'popitem(): dictionary is empty'
+ raise KeyError('popitem(): dictionary is empty')
le = self.l.pop(0)
del self.d[le[1]]
return (le[1], le[0],)
def pop(self, k, default=None, strictkey=False):
- if not self.d.has_key(k):
+ if k not in self.d:
if strictkey:
- raise KeyError, k
+ raise KeyError(k)
else:
return default
v = self.d.pop(k)
import locale
from allmydata.util import log
from allmydata.util.fileutil import abspath_expanduser_unicode
+from allmydata.util.sixutil import map
def canonical_encoding(encoding):
# from /usr/src/linux/include/linux/inotify.h
-IN_ACCESS = 0x00000001L # File was accessed
-IN_MODIFY = 0x00000002L # File was modified
-IN_ATTRIB = 0x00000004L # Metadata changed
-IN_CLOSE_WRITE = 0x00000008L # Writeable file was closed
-IN_CLOSE_NOWRITE = 0x00000010L # Unwriteable file closed
-IN_OPEN = 0x00000020L # File was opened
-IN_MOVED_FROM = 0x00000040L # File was moved from X
-IN_MOVED_TO = 0x00000080L # File was moved to Y
-IN_CREATE = 0x00000100L # Subfile was created
-IN_DELETE = 0x00000200L # Subfile was delete
-IN_DELETE_SELF = 0x00000400L # Self was deleted
-IN_MOVE_SELF = 0x00000800L # Self was moved
-IN_UNMOUNT = 0x00002000L # Backing fs was unmounted
-IN_Q_OVERFLOW = 0x00004000L # Event queued overflowed
-IN_IGNORED = 0x00008000L # File was ignored
+IN_ACCESS = 0x00000001 # File was accessed
+IN_MODIFY = 0x00000002 # File was modified
+IN_ATTRIB = 0x00000004 # Metadata changed
+IN_CLOSE_WRITE = 0x00000008 # Writeable file was closed
+IN_CLOSE_NOWRITE = 0x00000010 # Unwriteable file closed
+IN_OPEN = 0x00000020 # File was opened
+IN_MOVED_FROM = 0x00000040 # File was moved from X
+IN_MOVED_TO = 0x00000080 # File was moved to Y
+IN_CREATE = 0x00000100 # Subfile was created
+IN_DELETE = 0x00000200 # Subfile was delete
+IN_DELETE_SELF = 0x00000400 # Self was deleted
+IN_MOVE_SELF = 0x00000800 # Self was moved
+IN_UNMOUNT = 0x00002000 # Backing fs was unmounted
+IN_Q_OVERFLOW = 0x00004000 # Event queued overflowed
+IN_IGNORED = 0x00008000 # File was ignored
IN_ONLYDIR = 0x01000000 # only watch the path if it is a directory
IN_DONT_FOLLOW = 0x02000000 # don't follow a sym link
for i in range(tries-1):
try:
return os.rename(src, dst)
- except EnvironmentError, le:
+ except EnvironmentError as le:
# XXX Tighten this to check if this is a permission denied error (possibly due to another Windows process having the file open and execute the superkludge only in this case.
log.msg("XXX KLUDGE Attempting to move file %s => %s; got %s; sleeping %s seconds" % (src, dst, le, basedelay,))
time.sleep(basedelay)
for i in range(tries-1):
try:
return os.remove(f)
- except EnvironmentError, le:
+ except EnvironmentError as le:
# XXX Tighten this to check if this is a permission denied error (possibly due to another Windows process having the file open and execute the superkludge only in this case.
if not os.path.exists(f):
return
self.file.truncate(newsize)
-def make_dirs(dirname, mode=0777):
+def make_dirs(dirname, mode=0o777):
"""
An idempotent version of os.makedirs(). If the dir already exists, do
nothing and return without raising an exception. If this call creates the
tx = None
try:
os.makedirs(dirname, mode)
- except OSError, x:
+ except OSError as x:
tx = x
if not os.path.isdir(dirname):
if tx:
raise tx
- raise exceptions.IOError, "unknown error prevented creation of directory, or deleted the directory immediately after creation: %s" % dirname # careful not to construct an IOError with a 2-tuple, as that has a special meaning...
+ raise exceptions.IOError("unknown error prevented creation of directory, or deleted the directory immediately after creation: %s" % dirname) # careful not to construct an IOError with a 2-tuple, as that has a special meaning...
def rm_dir(dirname):
"""
else:
remove(fullname)
os.rmdir(dirname)
- except Exception, le:
+ except Exception as le:
# Ignore "No such file or directory"
if (not isinstance(le, OSError)) or le.args[0] != 2:
excs.append(le)
if len(excs) == 1:
raise excs[0]
if len(excs) == 0:
- raise OSError, "Failed to remove dir for unknown reason."
- raise OSError, excs
+ raise OSError("Failed to remove dir for unknown reason.")
+ raise OSError(excs)
def remove_if_possible(f):
"""
from copy import deepcopy
+import six
+from allmydata.util.sixutil import map
def failure_message(peer_count, k, happy, effective_happy):
# If peer_count < needed_shares, this error message makes more
dictionary of sets of shares, indexed by peerids.
"""
ret = {}
- for shareid, peers in servermap.iteritems():
+ for shareid, peers in six.iteritems(servermap):
assert isinstance(peers, set)
for peerid in peers:
ret.setdefault(peerid, set()).add(shareid)
# The implementation here is an adapation of an algorithm described in
# "Introduction to Algorithms", Cormen et al, 2nd ed., pp 658-662.
dim = len(graph)
- flow_function = [[0 for sh in xrange(dim)] for s in xrange(dim)]
+ flow_function = [[0 for sh in range(dim)] for s in range(dim)]
residual_graph, residual_function = residual_network(graph, flow_function)
while augmenting_path_for(residual_graph):
path = augmenting_path_for(residual_graph)
# is the amount of unused capacity on that edge. Taking the
# minimum of a list of those values for each edge in the
# augmenting path gives us our delta.
- delta = min(map(lambda (u, v), rf=residual_function: rf[u][v],
+ delta = min(map(lambda u_and_v, rf=residual_function: rf[u_and_v[0]][u_and_v[1]],
path))
for (u, v) in path:
flow_function[u][v] += delta
# our graph, so we can stop after summing flow across those. The
# value of a flow computed in this way is the size of a maximum
# matching on the bipartite graph described above.
- return sum([flow_function[0][v] for v in xrange(1, num_servers+1)])
+ return sum([flow_function[0][v] for v in range(1, num_servers+1)])
def flow_network_for(sharemap):
"""
graph.append(sharemap[k])
# For each share, add an entry that has an edge to the sink.
sink_num = num_servers + num_shares + 1
- for i in xrange(num_shares):
+ for i in range(num_shares):
graph.append([sink_num])
# Add an empty entry for the sink, which has no outbound edges.
graph.append([])
# Number the shares
for k in ret:
for shnum in ret[k]:
- if not shares.has_key(shnum):
+ if shnum not in shares:
shares[shnum] = num
num += 1
ret[k] = map(lambda x: shares[x], ret[k])
flow network represented by my graph and f arguments. graph is a
flow network in adjacency-list form, and f is a flow in graph.
"""
- new_graph = [[] for i in xrange(len(graph))]
- cf = [[0 for s in xrange(len(graph))] for sh in xrange(len(graph))]
- for i in xrange(len(graph)):
+ new_graph = [[] for i in range(len(graph))]
+ cf = [[0 for s in range(len(graph))] for sh in range(len(graph))]
+ for i in range(len(graph)):
for v in graph[i]:
if f[i][v] == 1:
# We add an edge (v, i) with cf[v,i] = 1. This means
GRAY = 1
# BLACK vertices are those we have seen and explored
BLACK = 2
- color = [WHITE for i in xrange(len(graph))]
- predecessor = [None for i in xrange(len(graph))]
- distance = [-1 for i in xrange(len(graph))]
+ color = [WHITE for i in range(len(graph))]
+ predecessor = [None for i in range(len(graph))]
+ distance = [-1 for i in range(len(graph))]
queue = [s] # vertices that we haven't explored yet.
color[s] = GRAY
distance[s] = 0
def repr_function(self, obj, level):
if hasattr(obj, 'func_code'):
- return '<' + obj.func_name + '() at ' + os.path.basename(obj.func_code.co_filename) + ':' + str(obj.func_code.co_firstlineno) + '>'
+ return '<' + obj.__name__ + '() at ' + os.path.basename(obj.__code__.co_filename) + ':' + str(obj.__code__.co_firstlineno) + '>'
else:
- return '<' + obj.func_name + '() at (builtin)'
+ return '<' + obj.__name__ + '() at (builtin)'
def repr_instance_method(self, obj, level):
if hasattr(obj, 'func_code'):
- return '<' + obj.im_class.__name__ + '.' + obj.im_func.__name__ + '() at ' + os.path.basename(obj.im_func.func_code.co_filename) + ':' + str(obj.im_func.func_code.co_firstlineno) + '>'
+ return '<' + obj.__self__.__class__.__name__ + '.' + obj.__func__.__name__ + '() at ' + os.path.basename(obj.__func__.__code__.co_filename) + ':' + str(obj.__func__.__code__.co_firstlineno) + '>'
else:
- return '<' + obj.im_class.__name__ + '.' + obj.im_func.__name__ + '() at (builtin)'
+ return '<' + obj.__self__.__class__.__name__ + '.' + obj.__func__.__name__ + '() at (builtin)'
def repr_long(self, obj, level):
- s = `obj` # XXX Hope this isn't too slow...
+ s = repr(obj) # XXX Hope this isn't too slow...
if len(s) > self.maxlong:
i = max(0, (self.maxlong-3)/2)
j = max(0, self.maxlong-3-i)
def _query(path, args, regex):
env = {'LANG': 'en_US.UTF-8'}
TRIES = 5
- for trial in xrange(TRIES):
+ for trial in range(TRIES):
try:
p = subprocess.Popen([path] + list(args), stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)
(output, err) = p.communicate()
break
- except OSError, e:
+ except OSError as e:
if e.errno == errno.EINTR and trial < TRIES-1:
continue
raise
"""
The smallest integer k such that k*d >= n.
"""
- return (n/d) + (n%d != 0)
+ return (n//d) + (n%d != 0)
def next_multiple(n, k):
"""
return k**x
def ave(l):
- return sum(l) / len(l)
+ return sum(l) // len(l)
def log_ceil(n, b):
"""
+from __future__ import print_function
import time
from twisted.internet import task
if not e.check(*self._poll_should_ignore_these_errors):
errs.append(e)
if errs:
- print errs
+ print(errs)
self.fail("Errors snooped, terminating early")
--- /dev/null
+
+def map(f, xs, ys=None):
+ if ys is None:
+ return [f(x) for x in xs]
+ else:
+ if len(xs) != len(ys):
+ raise AssertionError("iterators must be the same length")
+ return [f(x, y) for (x, y) in zip(xs, ys)]
\ No newline at end of file
+from __future__ import print_function
class Spans:
"""I represent a compressed list of booleans, one per index (an integer).
assert start > prev_end
prev_end = start+length
except AssertionError:
- print "BAD:", self.dump()
+ print("BAD:", self.dump())
raise
def add(self, start, length):
not_other = bounds - other
return self - not_other
- def __contains__(self, (start,length)):
+ def __contains__(self, xxx_todo_changeme):
+ (start,length) = xxx_todo_changeme
for span_start,span_length in self._spans:
o = overlap(start, length, span_start, span_length)
if o:
for start, data in self.spans[1:]:
if not start > prev_end:
# adjacent or overlapping: bad
- print "ASSERTION FAILED", self.spans
+ print("ASSERTION FAILED", self.spans)
raise AssertionError
def get(self, start, length):
# Transitive Grace Period Public License, version 1 or later.
from __future__ import division
+from __future__ import print_function
from allmydata.util.mathutil import round_sigfigs
import math
import sys
+from functools import reduce
def pr_file_loss(p_list, k):
"""
significant digits.
"""
for k, p in enumerate(pmf):
- print >>out, "i=" + str(k) + ":", round_sigfigs(p, n)
+ print("i=" + str(k) + ":", round_sigfigs(p, n), file=out)
def pr_backup_file_loss(p_list, backup_p, k):
"""
"""
m = _conversion_re.match(isotime)
if not m:
- raise ValueError, (isotime, "not a complete ISO8601 timestamp")
+ raise ValueError(isotime, "not a complete ISO8601 timestamp")
year, month, day = int(m.group('year')), int(m.group('month')), int(m.group('day'))
hour, minute, second = int(m.group('hour')), int(m.group('minute')), int(m.group('second'))
subsecstr = m.group('subsecond')
self._cannot_compare(other)
return self.parts == other.parts
+ def __hash__(self):
+ return hash(self.parts)
+
def __lt__(self, other):
if not isinstance(other, NormalizedVersion):
self._cannot_compare(other)
from allmydata.mutable.common import UnrecoverableFileError
from allmydata.util import abbreviate
from allmydata.util.encodingutil import to_str, quote_output
+import six
TIME_FORMAT = "%H:%M:%S %d-%b-%Y"
children = {}
if children_json:
data = simplejson.loads(children_json)
- for (namex, (ctype, propdict)) in data.iteritems():
+ for (namex, (ctype, propdict)) in six.iteritems(data):
namex = unicode(namex)
writecap = to_str(propdict.get("rw_uri"))
readcap = to_str(propdict.get("ro_uri"))
+from __future__ import print_function
import simplejson
import urllib
from allmydata.web.operations import ReloadMixin
from allmydata.web.check_results import json_check_results, \
json_check_and_repair_results
+import six
class BlockingFileError(Exception):
# TODO: catch and transform
def got_child(self, node_or_failure, ctx, name):
DEBUG = False
- if DEBUG: print "GOT_CHILD", name, node_or_failure
+ if DEBUG: print("GOT_CHILD", name, node_or_failure)
req = IRequest(ctx)
method = req.method
nonterminal = len(req.postpath) > 1
f = node_or_failure
f.trap(NoSuchChildError)
# No child by this name. What should we do about it?
- if DEBUG: print "no child", name
- if DEBUG: print "postpath", req.postpath
+ if DEBUG: print("no child", name)
+ if DEBUG: print("postpath", req.postpath)
if nonterminal:
- if DEBUG: print " intermediate"
+ if DEBUG: print(" intermediate")
if should_create_intermediate_directories(req):
# create intermediate directories
- if DEBUG: print " making intermediate directory"
+ if DEBUG: print(" making intermediate directory")
d = self.node.create_subdirectory(name)
d.addCallback(make_handler_for,
self.client, self.node, name)
return d
else:
- if DEBUG: print " terminal"
+ if DEBUG: print(" terminal")
# terminal node
if (method,t) in [ ("POST","mkdir"), ("PUT","mkdir"),
("POST", "mkdir-with-children"),
("POST", "mkdir-immutable") ]:
- if DEBUG: print " making final directory"
+ if DEBUG: print(" making final directory")
# final directory
kids = {}
if t in ("mkdir-with-children", "mkdir-immutable"):
self.client, self.node, name)
return d
if (method,t) in ( ("PUT",""), ("PUT","uri"), ):
- if DEBUG: print " PUT, making leaf placeholder"
+ if DEBUG: print(" PUT, making leaf placeholder")
# we were trying to find the leaf filenode (to put a new
# file in its place), and it didn't exist. That's ok,
# since that's the leaf node that we're about to create.
# We make a dummy one, which will respond to the PUT
# request by replacing itself.
return PlaceHolderNodeHandler(self.client, self.node, name)
- if DEBUG: print " 404"
+ if DEBUG: print(" 404")
# otherwise, we just return a no-such-child error
return f
if not IDirectoryNode.providedBy(node):
# we would have put a new directory here, but there was a
# file in the way.
- if DEBUG: print "blocking"
+ if DEBUG: print("blocking")
raise WebError("Unable to create directory '%s': "
"a file was in the way" % name,
http.CONFLICT)
- if DEBUG: print "good child"
+ if DEBUG: print("good child")
return make_handler_for(node, self.client, self.node, name)
def render_DELETE(self, ctx):
body = req.content.read()
try:
children = simplejson.loads(body)
- except ValueError, le:
+ except ValueError as le:
le.args = tuple(le.args + (body,))
# TODO test handling of bad JSON
raise
cs = {}
- for name, (file_or_dir, mddict) in children.iteritems():
+ for name, (file_or_dir, mddict) in six.iteritems(children):
name = unicode(name) # simplejson-2.0.1 returns str *or* unicode
writecap = mddict.get('rw_uri')
if writecap is not None:
d = dirnode.list()
def _got(children):
kids = {}
- for name, (childnode, metadata) in children.iteritems():
+ for name, (childnode, metadata) in six.iteritems(children):
assert IFilesystemNode.providedBy(childnode), childnode
rw_uri = childnode.get_write_uri()
ro_uri = childnode.get_readonly_uri()
def data_items(self, ctx, data):
return self.monitor.get_status()["manifest"]
- def render_row(self, ctx, (path, cap)):
+ def render_row(self, ctx, xxx_todo_changeme):
+ (path, cap) = xxx_todo_changeme
ctx.fillSlots("path", self.slashify_path(path))
root = get_root(ctx)
# TODO: we need a clean consistent way to get the type of a cap string
from allmydata import get_package_versions_string
from allmydata.util import idlib
from allmydata.web.common import getxmlfile, get_arg, TIME_FORMAT
+import six
class IntroducerRoot(rend.Page):
res["announcement_summary"] = announcement_summary
distinct_hosts = dict([(name, len(hosts))
for (name, hosts)
- in service_hosts.iteritems()])
+ in six.iteritems(service_hosts)])
res["announcement_distinct_hosts"] = distinct_hosts
return simplejson.dumps(res, indent=1) + "\n"
HOUR = 60*MINUTE
DAY = 24*HOUR
-(MONITOR, RENDERER, WHEN_ADDED) = range(3)
+(MONITOR, RENDERER, WHEN_ADDED) = list(range(3))
class OphandleTable(rend.Page, service.Service):
implements(IOpHandleTable)
rows = []
for ev in events:
ev = ev.copy()
- if ev.has_key('server'):
+ if 'server' in ev:
ev["serverid"] = ev["server"].get_longname()
del ev["server"]
# find an empty slot in the rows
+from __future__ import print_function
done = False
# So be paranoid about catching errors and reporting them to original_stderr,
# so that we can at least see them.
def _complain(message):
- print >>original_stderr, isinstance(message, str) and message or repr(message)
+ print(isinstance(message, str) and message or repr(message), file=original_stderr)
log.msg(message, level=log.WEIRD)
# Work around <http://bugs.python.org/issue6058>.
if self._hConsole is None:
try:
self._stream.flush()
- except Exception, e:
+ except Exception as e:
_complain("%s.flush: %r from %r" % (self.name, e, self._stream))
raise
remaining -= n.value
if remaining == 0: break
text = text[n.value:]
- except Exception, e:
+ except Exception as e:
_complain("%s.write: %r" % (self.name, e))
raise
try:
for line in lines:
self.write(line)
- except Exception, e:
+ except Exception as e:
_complain("%s.writelines: %r" % (self.name, e))
raise
sys.stderr = UnicodeOutput(hStderr, None, STDERR_FILENO, '<Unicode console stderr>')
else:
sys.stderr = UnicodeOutput(None, sys.stderr, old_stderr_fileno, '<Unicode redirected stderr>')
- except Exception, e:
+ except Exception as e:
_complain("exception %r while fixing up sys.stdout and sys.stderr" % (e,))
# This works around <http://bugs.python.org/issue2128>.
return re.sub(ur'\x7F[0-9a-fA-F]*\;', lambda m: unichr(int(m.group(0)[1:-1], 16)), s)
try:
- argv = [unmangle(argv_unicode[i]).encode('utf-8') for i in xrange(0, argc.value)]
- except Exception, e:
+ argv = [unmangle(argv_unicode[i]).encode('utf-8') for i in range(0, argc.value)]
+ except Exception as e:
_complain("%s: could not unmangle Unicode arguments.\n%r"
- % (sys.argv[0], [argv_unicode[i] for i in xrange(0, argc.value)]))
+ % (sys.argv[0], [argv_unicode[i] for i in range(0, argc.value)]))
raise
# Take only the suffix with the same number of arguments as sys.argv.
_BDIR_KEY = 'Base Dir Path'
if sys.platform not in ('win32'):
- raise ImportError, "registry cannot be used on non-windows systems"
+ raise ImportError("registry cannot be used on non-windows systems")
class WindowsError(Exception): # stupid voodoo to appease pyflakes
pass
regkey = _winreg.OpenKey(topkey, key)
sublen, vallen, timestamp = _winreg.QueryInfoKey(regkey)
- for validx in xrange(vallen):
+ for validx in range(vallen):
keyname, value, keytype = _winreg.EnumValue(regkey, validx)
if keyname == name and keytype == _winreg.REG_SZ:
return value
except WindowsError:
continue
# We didn't find the key:
- raise KeyError, (key, name, "registry setting not found")
+ raise KeyError(key, name, "registry setting not found")
def set_registry_setting(key, name, data, reg_type=_winreg.REG_SZ,
_topkey=_winreg.HKEY_LOCAL_MACHINE, create_key_if_missing=True):
if create_key_if_missing:
regkey = _winreg.CreateKey(_topkey, key)
else:
- raise KeyError, (key, "registry key not found")
+ raise KeyError(key, "registry key not found")
try:
_winreg.DeleteValue(regkey, name)