class APIUpdateTestCase(unittest.TestCase): def setUp(self): self.client = Client(TEST_ENDPOINT) # We'll never hit that anyway self.accepted = Response() self.accepted.status_code = 201 self.refused = Response() self.refused.status_code = 400 self.not_found = Response() self.not_found.status_code = 404 def test_update_accepted(self): adapter = PredictableTestAdapter([self.accepted]) self.client._session.mount(TEST_ENDPOINT, adapter) self.client.update({'provider': '/api/v1/provider/2/', 'id': 2, 'resource_uri': '/api/v1/location/2/', 'name': 'us-east-1'}) self.assertEqual(1, len(adapter.requests)) update, = adapter.requests self.assertEqual("PATCH", update.method) def test_update_refused(self): adapter = PredictableTestAdapter([self.refused, self.not_found]) self.client._session.mount(TEST_ENDPOINT, adapter) obj = {'provider': '/api/v1/provider/2/', 'id': 2, 'resource_uri': '/api/v1/location/2/', 'name': 'us-east-1'} self.assertRaises(APIError, self.client.update, obj) self.assertRaises(APIError, self.client.update, obj) self.assertEqual(2, len(adapter.requests))
def setUp(self): self.client = Client(TEST_ENDPOINT) # We'll never hit that anyway self.accepted = Response() self.accepted.status_code = 201 self.refused = Response() self.refused.status_code = 400 self.not_found = Response() self.not_found.status_code = 404
def setUp(self): self.client = Client(TEST_ENDPOINT) # We'll never hit that anyway test_loc = path_join(TEST_ENDPOINT, "/created/loc/1") created = Response() created.status_code = 201 created.headers["location"] = test_loc self.adapter = PredictableTestAdapter( [created, make_json_response(OBJ_RESPONSE)]) self.client._session.mount(TEST_ENDPOINT, self.adapter)
class APIUpdateTestCase(unittest.TestCase): def setUp(self): self.client = Client(TEST_ENDPOINT) # We'll never hit that anyway self.accepted = Response() self.accepted.status_code = 201 self.refused = Response() self.refused.status_code = 400 self.not_found = Response() self.not_found.status_code = 404 def test_update_accepted(self): adapter = PredictableTestAdapter([self.accepted]) self.client._session.mount(TEST_ENDPOINT, adapter) self.client.update({ 'provider': '/api/v1/provider/2/', 'id': 2, 'resource_uri': '/api/v1/location/2/', 'name': 'us-east-1' }) self.assertEqual(1, len(adapter.requests)) update, = adapter.requests self.assertEqual("PATCH", update.method) def test_update_refused(self): adapter = PredictableTestAdapter([self.refused, self.not_found]) self.client._session.mount(TEST_ENDPOINT, adapter) obj = { 'provider': '/api/v1/provider/2/', 'id': 2, 'resource_uri': '/api/v1/location/2/', 'name': 'us-east-1' } self.assertRaises(APIError, self.client.update, obj) self.assertRaises(APIError, self.client.update, obj) self.assertEqual(2, len(adapter.requests))
def test_negative_retry(self): retry_max = 100 retry_wait = 0 retry_range = 10 client = Client(TEST_ENDPOINT, retry_max=retry_max, retry_wait=retry_wait, retry_range=retry_range) err_response = Response() err_response.status_code = 500 err_response.reason = "INTERNAL SERVER ERROR" adapter = RepeatingTestAdapter(err_response) client._session.mount(TEST_ENDPOINT, adapter) mock_sleep = MockTimeSleep() with mock_sleep: self.assertRaises(APIError, client.measurements.list) for wait_time in mock_sleep.calls: self.assertGreaterEqual(wait_time, 0)
def test_error_wrapping(self): """ Test that HTTP errors are wrapped into API Errors """ client = Client(TEST_ENDPOINT) response = Response() response.status_code = 500 response.reason = "INTERNAL SERVER ERROR" adapter = RepeatingTestAdapter(response) client._session.mount(TEST_ENDPOINT, adapter) # We want a bit more control than just assertRaises try: client.measurements.get() except APIError as e: self.assertIsNotNone(e.response) self.assertEqual(500, e.response.status_code) self.assertEqual("INTERNAL SERVER ERROR", e.response.reason) else: self.fail("No APIError was raised")
def test_retry(self): retry_max = 2 retry_wait = 7 client = Client(TEST_ENDPOINT, retry_max=retry_max, retry_wait=retry_wait) err_response = Response() err_response.status_code = 500 err_response.reason = "INTERNAL SERVER ERROR" response_data = dict(BASE_RESPONSE) response_data["objects"] = [OBJ_1_PARTIAL] response_data["total_count"] = 1 success_response = make_json_response(response_data) adapter = PredictableTestAdapter([err_response, err_response, success_response]) client._session.mount(TEST_ENDPOINT, adapter) mock_sleep = MockTimeSleep() with mock_sleep: try: response = client.measurements.list() except APIError as e: self.fail("API call wasn't retried: %s" % e) self.assertEqual([OBJ_1_PARTIAL], response) self.assertEqual([retry_wait, retry_wait], mock_sleep.calls) adapter = PredictableTestAdapter([err_response, err_response, err_response, success_response]) client._session.mount(TEST_ENDPOINT, adapter) mock_sleep = MockTimeSleep() with mock_sleep: self.assertRaises(APIError, client.measurements.list) self.assertEqual([retry_wait, retry_wait], mock_sleep.calls)
def test_random_retry(self): retry_max = 50 # We want a large sample retry_wait = 100 retry_range = 5 client = Client(TEST_ENDPOINT, retry_max=retry_max, retry_wait=retry_wait, retry_range=retry_range) err_response = Response() err_response.status_code = 500 err_response.reason = "INTERNAL SERVER ERROR" adapter = RepeatingTestAdapter(err_response) client._session.mount(TEST_ENDPOINT, adapter) mock_sleep = MockTimeSleep() with mock_sleep: self.assertRaises(APIError, client.measurements.list) for wait_time in mock_sleep.calls: self.assertGreaterEqual(wait_time, retry_wait - retry_range) self.assertLessEqual(wait_time, retry_wait + retry_range) self.assertFalse(all([wait_time == retry_wait for wait_time in mock_sleep.calls]))
def test_race_condition(self): self.client = Client(TEST_ENDPOINT) # We'll never hit that anyway response_data = dict(BASE_RESPONSE) absent = make_json_response(response_data) exists = Response() exists.status_code = 400 exists.reason = "BAD REQUEST" exists._content = six.b('{"configuration": {"__all__": ["Configuration with this I/O Mode, Block Size and I/O Depth already exists."]}}') response_data = dict(BASE_RESPONSE) response_data["objects"] = [OBJ_1_PARTIAL] response_data["total_count"] = 1 success_response = make_json_response(response_data) self.adapter = PredictableTestAdapter([absent, exists, success_response, make_json_response(OBJ_RESPONSE)]) self.client._session.mount(TEST_ENDPOINT, self.adapter) ret = self.client.configurations.get_or_create() self.assertDictEqual(OBJ_RESPONSE, ret) self.assertEqual(4, len(self.adapter.requests))
def setUp(self): self.client = Client(TEST_ENDPOINT) # We'll never hit that anyway
def main(): parser = argparse.ArgumentParser( description="Benchmark your Cloud performance") parser.add_argument( "-c", "--config", help="Path to the cloudbench configuration file (defaults to {0})". format(DEFAULT_CONFIG_FILE), default=DEFAULT_CONFIG_FILE) args = parser.parse_args() config = configparser.ConfigParser({ "fio": DEFAULT_FIO_PATH, "pidfile": DEFAULT_PID_FILE, "logfile": DEFAULT_LOG_FILE, "nobench": "", "size": DEFAULT_FILE_SIZE, "ramp": DEFAULT_RAMP_TIME, "duration": DEFAULT_DURATION, "retry_max": DEFAULT_RETRY_MAX, "retry_wait": DEFAULT_RETRY_WAIT, "retry_range": DEFAULT_RETRY_RANGE, "extra_assets": "", }) config.add_section("environment") config.add_section("general") config.read(args.config) fio_bin = config.get("environment", "fio") pid_file = config.get("environment", "pidfile") log_file = config.get("environment", "logfile") no_bench = _cnf_get_list(config, "environment", "nobench") extra_assets = _cnf_get_list(config, "environment", "extra_assets") files_preserve = setup_logging(log_file) block_sizes = _cnf_get_list(config, "benchmarks", "blocksizes") depths = _cnf_get_list(config, "benchmarks", "depths") modes = _cnf_get_list(config, "benchmarks", "modes") size = config.get("general", "size") ramp = config.get("general", "ramp") duration = config.get("general", "duration") reporting_endpoint = config.get("reporting", "endpoint") reporting_username = config.get("reporting", "username") reporting_key = config.get("reporting", "apikey") # Those two may fail, but that's fine: we haven't daemonized yet. reporting_retry_max = int(config.get("reporting", "retry_max")) reporting_retry_wait = int(config.get("reporting", "retry_wait")) reporting_retry_range = int(config.get("reporting", "retry_range")) logger.info("Cloudbench v{0}: starting".format(__version__)) with DaemonContext(files_preserve=files_preserve, pidfile=lockfile.pidlockfile.PIDLockFile(pid_file)): try: cloud = Cloud() benchmark_volumes = identify_benchmark_volumes(cloud, no_bench) api = Client(reporting_endpoint, APIKeyAuth(reporting_username, reporting_key), reporting_retry_max, reporting_retry_wait, reporting_retry_range) logger.info("Provider: %s", cloud.provider) logger.info("Location: %s", cloud.location) logger.info("Instance type: %s", cloud.instance_type) logger.info("Number of volumes: %s", len(benchmark_volumes)) for benchmark_volume in benchmark_volumes: logger.info( "%s: %s, %s", benchmark_volume.device, benchmark_volume.provider, "Persistent" if benchmark_volume.persistent else "Ephemeral") start_benchmark(cloud, api, benchmark_volumes, extra_assets, fio_bin, block_sizes, depths, modes, size, ramp, duration) except Exception as e: logger.critical("An error occurred: %s", e) response = getattr(e, "response", None) logger.exception("Fatal Exception") if response is not None: logger.critical("HTTP Error") logger.critical("URL: %s", response.request.url) logger.critical("Status: %s %s", response.status_code, response.reason) logger.critical("Response: %s", response.text) logger.warning("Cloudbench v{0}: exiting".format(__version__)) sys.exit(1)
def setUp(self): self.client = Client(TEST_ENDPOINT) self.list_response = dict(BASE_RESPONSE) # Make a copy