from allmydata.interfaces import RIBucketWriter, RIBucketReader
from allmydata.util import base32, fileutil, log
from allmydata.util.assertutil import precondition
-from allmydata.util.hashutil import constant_time_compare
+from allmydata.util.hashutil import timing_safe_compare
from allmydata.storage.lease import LeaseInfo
from allmydata.storage.common import UnknownImmutableContainerVersionError, \
DataTooLargeError
def renew_lease(self, renew_secret, new_expire_time):
for i,lease in enumerate(self.get_leases()):
- if constant_time_compare(lease.renew_secret, renew_secret):
+ if timing_safe_compare(lease.renew_secret, renew_secret):
# yup. See if we need to update the owner time.
if new_expire_time > lease.expiration_time:
# yes
leases = list(self.get_leases())
num_leases_removed = 0
for i,lease in enumerate(leases):
- if constant_time_compare(lease.cancel_secret, cancel_secret):
+ if timing_safe_compare(lease.cancel_secret, cancel_secret):
leases[i] = None
num_leases_removed += 1
if not num_leases_removed:
from allmydata.interfaces import BadWriteEnablerError
from allmydata.util import idlib, log
from allmydata.util.assertutil import precondition
-from allmydata.util.hashutil import constant_time_compare
+from allmydata.util.hashutil import timing_safe_compare
from allmydata.storage.lease import LeaseInfo
from allmydata.storage.common import UnknownMutableContainerVersionError, \
DataTooLargeError
accepting_nodeids = set()
f = open(self.home, 'rb+')
for (leasenum,lease) in self._enumerate_leases(f):
- if constant_time_compare(lease.renew_secret, renew_secret):
+ if timing_safe_compare(lease.renew_secret, renew_secret):
# yup. See if we need to update the owner time.
if new_expire_time > lease.expiration_time:
# yes
f = open(self.home, 'rb+')
for (leasenum,lease) in self._enumerate_leases(f):
accepting_nodeids.add(lease.nodeid)
- if constant_time_compare(lease.cancel_secret, cancel_secret):
+ if timing_safe_compare(lease.cancel_secret, cancel_secret):
self._write_lease_record(f, leasenum, blank_lease)
modified += 1
else:
f.close()
# avoid a timing attack
#if write_enabler != real_write_enabler:
- if not constant_time_compare(write_enabler, real_write_enabler):
+ if not timing_safe_compare(write_enabler, real_write_enabler):
# accomodate share migration by reporting the nodeid used for the
# old write enabler.
self.log(format="bad write enabler on SI %(si)s,"
h2.update("foo")
self.failUnlessEqual(h1, h2.digest())
- def test_constant_time_compare(self):
- self.failUnless(hashutil.constant_time_compare("a", "a"))
- self.failUnless(hashutil.constant_time_compare("ab", "ab"))
- self.failIf(hashutil.constant_time_compare("a", "b"))
- self.failIf(hashutil.constant_time_compare("a", "aa"))
+ def test_timing_safe_compare(self):
+ self.failUnless(hashutil.timing_safe_compare("a", "a"))
+ self.failUnless(hashutil.timing_safe_compare("ab", "ab"))
+ self.failIf(hashutil.timing_safe_compare("a", "b"))
+ self.failIf(hashutil.timing_safe_compare("a", "aa"))
def _testknown(self, hashf, expected_a, *args):
got = hashf(*args)
def ssk_storage_index_hash(readkey):
return tagged_hash(MUTABLE_STORAGEINDEX_TAG, readkey, KEYLEN)
-def constant_time_compare(a, b):
+def timing_safe_compare(a, b):
n = os.urandom(8)
return bool(tagged_hash(n, a) == tagged_hash(n, b))