]> git.rkrishnan.org Git - tahoe-lafs/tahoe-lafs.git/blobdiff - src/allmydata/storage/server.py
storage: remove the storage server's "remote_cancel_lease" function
[tahoe-lafs/tahoe-lafs.git] / src / allmydata / storage / server.py
index 2f71c71ec74112263985139128f90785c700cc86..8350e813c11c709af7dd6d97aab355c76e6d23c9 100644 (file)
@@ -5,7 +5,7 @@ from twisted.application import service
 
 from zope.interface import implements
 from allmydata.interfaces import RIStorageServer, IStatsProducer
-from allmydata.util import fileutil, log, time_format
+from allmydata.util import fileutil, idlib, log, time_format
 import allmydata # for __full_version__
 
 from allmydata.storage.common import si_b2a, si_a2b, storage_index_to_dir
@@ -36,16 +36,6 @@ class StorageServer(service.MultiService, Referenceable):
     implements(RIStorageServer, IStatsProducer)
     name = 'storage'
     LeaseCheckerClass = LeaseCheckingCrawler
-    windows = False
-
-    try:
-        import win32api, win32con
-        windows = True
-        # <http://msdn.microsoft.com/en-us/library/ms680621%28VS.85%29.aspx>
-        win32api.SetErrorMode(win32con.SEM_FAILCRITICALERRORS |
-                              win32con.SEM_NOOPENFILEERRORBOX)
-    except ImportError:
-        pass
 
     def __init__(self, storedir, nodeid, reserved_space=0,
                  discard_storage=False, readonly_storage=False,
@@ -76,7 +66,7 @@ class StorageServer(service.MultiService, Referenceable):
         self._clean_incomplete()
         fileutil.make_dirs(self.incomingdir)
         self._active_writers = weakref.WeakKeyDictionary()
-        lp = log.msg("StorageServer created", facility="tahoe.storage")
+        log.msg("StorageServer created", facility="tahoe.storage")
 
         if reserved_space:
             if self.get_available_space() is None:
@@ -106,6 +96,9 @@ class StorageServer(service.MultiService, Referenceable):
                                    expiration_sharetypes)
         self.lease_checker.setServiceParent(self)
 
+    def __repr__(self):
+        return "<StorageServer %s>" % (idlib.shortnodeid_b2a(self.my_nodeid),)
+
     def add_bucket_counter(self):
         statefile = os.path.join(self.storedir, "bucket_counter.state")
         self.bucket_counter = BucketCountingCrawler(self, statefile)
@@ -123,12 +116,15 @@ class StorageServer(service.MultiService, Referenceable):
 
     def get_latencies(self):
         """Return a dict, indexed by category, that contains a dict of
-        latency numbers for each category. Each dict will contain the
+        latency numbers for each category. If there are sufficient samples
+        for unambiguous interpretation, each dict will contain the
         following keys: mean, 01_0_percentile, 10_0_percentile,
         50_0_percentile (median), 90_0_percentile, 95_0_percentile,
-        99_0_percentile, 99_9_percentile. If no samples have been collected
-        for the given category, then that category name will not be present
-        in the return value."""
+        99_0_percentile, 99_9_percentile.  If there are insufficient
+        samples for a given percentile to be interpreted unambiguously
+        that percentile will be reported as None. If no samples have been
+        collected for the given category, then that category name will
+        not be present in the return value. """
         # note that Amazon's Dynamo paper says they use 99.9% percentile.
         output = {}
         for category in self.latencies:
@@ -136,16 +132,25 @@ class StorageServer(service.MultiService, Referenceable):
                 continue
             stats = {}
             samples = self.latencies[category][:]
-            samples.sort()
             count = len(samples)
-            stats["mean"] = sum(samples) / count
-            stats["01_0_percentile"] = samples[int(0.01 * count)]
-            stats["10_0_percentile"] = samples[int(0.1 * count)]
-            stats["50_0_percentile"] = samples[int(0.5 * count)]
-            stats["90_0_percentile"] = samples[int(0.9 * count)]
-            stats["95_0_percentile"] = samples[int(0.95 * count)]
-            stats["99_0_percentile"] = samples[int(0.99 * count)]
-            stats["99_9_percentile"] = samples[int(0.999 * count)]
+            stats["samplesize"] = count
+            samples.sort()
+            if count > 1:
+                stats["mean"] = sum(samples) / count
+            else:
+                stats["mean"] = None
+
+            orderstatlist = [(0.01, "01_0_percentile", 100), (0.1, "10_0_percentile", 10),\
+                             (0.50, "50_0_percentile", 10), (0.90, "90_0_percentile", 10),\
+                             (0.95, "95_0_percentile", 20), (0.99, "99_0_percentile", 100),\
+                             (0.999, "99_9_percentile", 1000)]
+
+            for percentile, percentilestring, minnumtoobserve in orderstatlist:
+                if count >= minnumtoobserve:
+                    stats[percentilestring] = samples[int(percentile*count)]
+                else:
+                    stats[percentilestring] = None
+
             output[category] = stats
         return output
 
@@ -157,57 +162,6 @@ class StorageServer(service.MultiService, Referenceable):
     def _clean_incomplete(self):
         fileutil.rm_dir(self.incomingdir)
 
-    def get_disk_stats(self):
-        """Return disk statistics for the storage disk, in the form of a dict
-        with the following fields.
-          total:            total bytes on disk
-          free_for_root:    bytes actually free on disk
-          free_for_nonroot: bytes free for "a non-privileged user" [Unix] or
-                              the current user [Windows]; might take into
-                              account quotas depending on platform
-          used:             bytes used on disk
-          avail:            bytes available excluding reserved space
-        An AttributeError can occur if the OS has no API to get disk information.
-        An EnvironmentError can occur if the OS call fails."""
-
-        if self.windows:
-            # For Windows systems, where os.statvfs is not available, use GetDiskFreeSpaceEx.
-            # <http://docs.activestate.com/activepython/2.5/pywin32/win32api__GetDiskFreeSpaceEx_meth.html>
-            #
-            # Although the docs say that the argument should be the root directory
-            # of a disk, GetDiskFreeSpaceEx actually accepts any path on that disk
-            # (like its Win32 equivalent).
-
-            (free_for_nonroot, total, free_for_root) = self.win32api.GetDiskFreeSpaceEx(self.storedir)
-        else:
-            # For Unix-like systems.
-            # <http://docs.python.org/library/os.html#os.statvfs>
-            # <http://opengroup.org/onlinepubs/7990989799/xsh/fstatvfs.html>
-            # <http://opengroup.org/onlinepubs/7990989799/xsh/sysstatvfs.h.html>
-            s = os.statvfs(self.storedir)
-
-            # on my mac laptop:
-            #  statvfs(2) is a wrapper around statfs(2).
-            #    statvfs.f_frsize = statfs.f_bsize :
-            #     "minimum unit of allocation" (statvfs)
-            #     "fundamental file system block size" (statfs)
-            #    statvfs.f_bsize = statfs.f_iosize = stat.st_blocks : preferred IO size
-            # on an encrypted home directory ("FileVault"), it gets f_blocks
-            # wrong, and s.f_blocks*s.f_frsize is twice the size of my disk,
-            # but s.f_bavail*s.f_frsize is correct
-
-            total = s.f_frsize * s.f_blocks
-            free_for_root = s.f_frsize * s.f_bfree
-            free_for_nonroot = s.f_frsize * s.f_bavail
-
-        # valid for all platforms:
-        used = total - free_for_root
-        avail = max(free_for_nonroot - self.reserved_space, 0)
-
-        return { 'total': total, 'free_for_root': free_for_root,
-                 'free_for_nonroot': free_for_nonroot,
-                 'used': used, 'avail': avail, }
-
     def get_stats(self):
         # remember: RIStatsProvider requires that our return dict
         # contains numeric values.
@@ -218,7 +172,7 @@ class StorageServer(service.MultiService, Referenceable):
                 stats['storage_server.latencies.%s.%s' % (category, name)] = v
 
         try:
-            disk = self.get_disk_stats()
+            disk = fileutil.get_disk_stats(self.sharedir, self.reserved_space)
             writeable = disk['avail'] > 0
 
             # spacetime predictors should use disk_avail / (d(disk_used)/dt)
@@ -250,13 +204,7 @@ class StorageServer(service.MultiService, Referenceable):
 
         if self.readonly_storage:
             return 0
-        try:
-            return self.get_disk_stats()['avail']
-        except AttributeError:
-            return None
-        except EnvironmentError:
-            log.msg("OS call to get disk statistics failed", level=log.UNUSUAL)
-            return 0
+        return fileutil.get_available_space(self.sharedir, self.reserved_space)
 
     def allocated_size(self):
         space = 0
@@ -397,34 +345,6 @@ class StorageServer(service.MultiService, Referenceable):
         if not found_buckets:
             raise IndexError("no such lease to renew")
 
-    def remote_cancel_lease(self, storage_index, cancel_secret):
-        start = time.time()
-        self.count("cancel")
-
-        total_space_freed = 0
-        found_buckets = False
-        for sf in self._iter_share_files(storage_index):
-            # note: if we can't find a lease on one share, we won't bother
-            # looking in the others. Unless something broke internally
-            # (perhaps we ran out of disk space while adding a lease), the
-            # leases on all shares will be identical.
-            found_buckets = True
-            # this raises IndexError if the lease wasn't present XXXX
-            total_space_freed += sf.cancel_lease(cancel_secret)
-
-        if found_buckets:
-            storagedir = os.path.join(self.sharedir,
-                                      storage_index_to_dir(storage_index))
-            if not os.listdir(storagedir):
-                os.rmdir(storagedir)
-
-        if self.stats_provider:
-            self.stats_provider.count('storage_server.bytes_freed',
-                                      total_space_freed)
-        self.add_latency("cancel", time.time() - start)
-        if not found_buckets:
-            raise IndexError("no such storage index")
-
     def bucket_writer_closed(self, bw, consumed_size):
         if self.stats_provider:
             self.stats_provider.count('storage_server.bytes_added', consumed_size)
@@ -479,7 +399,7 @@ class StorageServer(service.MultiService, Referenceable):
         start = time.time()
         self.count("writev")
         si_s = si_b2a(storage_index)
-        lp = log.msg("storage: slot_writev %s" % si_s)
+        log.msg("storage: slot_writev %s" % si_s)
         si_dir = storage_index_to_dir(storage_index)
         (write_enabler, renew_secret, cancel_secret) = secrets
         # shares exist if there is a file for them
@@ -615,4 +535,3 @@ class StorageServer(service.MultiService, Referenceable):
                 share_type=share_type, si=si_s, shnum=shnum, reason=reason,
                 level=log.SCARY, umid="SGx2fA")
         return None
-