+ def test_exception_messages_during_peer_selection(self):
+ # server 1: readonly, no shares
+ # server 2: readonly, no shares
+ # server 3: readonly, no shares
+ # server 4: readonly, no shares
+ # server 5: readonly, no shares
+ # This will fail, but we want to make sure that the log messages
+ # are informative about why it has failed.
+ self.basedir = self.mktemp()
+ d = self._setup_and_upload()
+ d.addCallback(lambda ign:
+ self._add_server_with_share(server_number=1, readonly=True))
+ d.addCallback(lambda ign:
+ self._add_server_with_share(server_number=2, readonly=True))
+ d.addCallback(lambda ign:
+ self._add_server_with_share(server_number=3, readonly=True))
+ d.addCallback(lambda ign:
+ self._add_server_with_share(server_number=4, readonly=True))
+ d.addCallback(lambda ign:
+ self._add_server_with_share(server_number=5, readonly=True))
+ d.addCallback(lambda ign:
+ self.g.remove_server(self.g.servers_by_number[0].my_nodeid))
+ def _reset_encoding_parameters(ign):
+ client = self.g.clients[0]
+ client.DEFAULT_ENCODING_PARAMETERS['happy'] = 4
+ return client
+ d.addCallback(_reset_encoding_parameters)
+ d.addCallback(lambda client:
+ self.shouldFail(UploadHappinessError, "test_selection_exceptions",
+ "peer selection failed for <Tahoe2PeerSelector "
+ "for upload dglev>: placed 0 shares out of 10 "
+ "total (10 homeless), want to place on 4 servers,"
+ " sent 5 queries to 5 peers, 0 queries placed "
+ "some shares, 5 placed none "
+ "(of which 5 placed none due to the server being "
+ "full and 0 placed none due to an error)",
+ client.upload,
+ upload.Data("data" * 10000, convergence="")))
+
+
+ # server 1: readonly, no shares
+ # server 2: broken, no shares
+ # server 3: readonly, no shares
+ # server 4: readonly, no shares
+ # server 5: readonly, no shares
+ def _reset(ign):
+ self.basedir = self.mktemp()
+ d.addCallback(_reset)
+ d.addCallback(lambda ign:
+ self._setup_and_upload())
+ d.addCallback(lambda ign:
+ self._add_server_with_share(server_number=1, readonly=True))
+ d.addCallback(lambda ign:
+ self._add_server_with_share(server_number=2))
+ def _break_server_2(ign):
+ server = self.g.servers_by_number[2].my_nodeid
+ # We have to break the server in servers_by_id,
+ # because the ones in servers_by_number isn't wrapped,
+ # and doesn't look at its broken attribute
+ self.g.servers_by_id[server].broken = True
+ d.addCallback(_break_server_2)
+ d.addCallback(lambda ign:
+ self._add_server_with_share(server_number=3, readonly=True))
+ d.addCallback(lambda ign:
+ self._add_server_with_share(server_number=4, readonly=True))
+ d.addCallback(lambda ign:
+ self._add_server_with_share(server_number=5, readonly=True))
+ d.addCallback(lambda ign:
+ self.g.remove_server(self.g.servers_by_number[0].my_nodeid))
+ def _reset_encoding_parameters(ign):
+ client = self.g.clients[0]
+ client.DEFAULT_ENCODING_PARAMETERS['happy'] = 4
+ return client
+ d.addCallback(_reset_encoding_parameters)
+ d.addCallback(lambda client:
+ self.shouldFail(UploadHappinessError, "test_selection_exceptions",
+ "peer selection failed for <Tahoe2PeerSelector "
+ "for upload dglev>: placed 0 shares out of 10 "
+ "total (10 homeless), want to place on 4 servers,"
+ " sent 5 queries to 5 peers, 0 queries placed "
+ "some shares, 5 placed none "
+ "(of which 4 placed none due to the server being "
+ "full and 1 placed none due to an error)",
+ client.upload,
+ upload.Data("data" * 10000, convergence="")))
+ return d
+
+