def __init__(self, application_id, parameters): self.logger = ScalingLog( "diff.controller.log", "controller.log", application_id) scaling_parameters = parameters["control_parameters"] self.application_id = application_id parameters.update({"app_id": application_id}) # read scaling parameters self.check_interval = scaling_parameters["check_interval"] self.trigger_down = scaling_parameters["trigger_down"] self.trigger_up = scaling_parameters["trigger_up"] self.min_cap = scaling_parameters["min_rep"] self.max_cap = scaling_parameters["max_rep"] self.actuation_size = scaling_parameters["actuation_size"] # The actuator plugin name self.actuator_type = scaling_parameters["actuator"] # The metric source plugin name self.metric_source_type = scaling_parameters["metric_source"] # We use a lock here to prevent race conditions when stopping the # controller self.running = True self.running_lock = threading.RLock() # Gets a new metric source plugin using the given name metric_source = MetricSourceBuilder().get_metric_source( self.metric_source_type, parameters) # Gets a new actuator plugin using the given name actuator = ActuatorBuilder().get_actuator(self.actuator_type, parameters=parameters) # The alarm here is responsible for deciding whether to scale up or # down, or even do nothing self.alarm = KubeJobs(actuator, metric_source, self.trigger_down, self.trigger_up, self.min_cap, self.max_cap, self.actuation_size, application_id)
class KubejobsController(Controller): def __init__(self, application_id, parameters): self.validate(parameters["control_parameters"]) self.logger = ScalingLog( "diff.controller.log", "controller.log", application_id) self.application_id = application_id parameters.update({"app_id": application_id}) # read scaling parameters self.check_interval = \ parameters["control_parameters"]["check_interval"] # We use a lock here to prevent race conditions when stopping the # controller self.running = True self.running_lock = threading.RLock() # The alarm here is responsible for deciding whether to scale up or # down, or even do nothing self.alarm = KubeJobs(parameters) def start_application_scaling(self): run = True self.logger.log("Start to control resources") while run: self.logger.log("Monitoring application: %s" % self.application_id) # Call the alarm to check the application self.alarm.check_application_state() # Wait some time time.sleep(float(self.check_interval)) with self.running_lock: run = self.running def stop_application_scaling(self): with self.running_lock: self.running = False def status(self): return self.alarm.status() def validate(self, data): data_model = { "actuator": six.string_types, "check_interval": int, "metric_source": six.string_types, "schedule_strategy": six.string_types } for key in data_model: if (key not in data): raise ex.BadRequestException( "Variable \"{}\" is missing".format(key)) if (not isinstance(data[key], data_model[key])): raise ex.BadRequestException( "\"{}\" has unexpected variable type: {}. Was expecting {}" .format(key, type(data[key]), data_model[key]))
class KubejobsController(Controller): def __init__(self, application_id, parameters): self.logger = ScalingLog( "diff.controller.log", "controller.log", application_id) scaling_parameters = parameters["control_parameters"] self.application_id = application_id parameters.update({"app_id": application_id}) # read scaling parameters self.check_interval = scaling_parameters["check_interval"] self.trigger_down = scaling_parameters["trigger_down"] self.trigger_up = scaling_parameters["trigger_up"] self.min_cap = scaling_parameters["min_rep"] self.max_cap = scaling_parameters["max_rep"] self.actuation_size = scaling_parameters["actuation_size"] # The actuator plugin name self.actuator_type = scaling_parameters["actuator"] # The metric source plugin name self.metric_source_type = scaling_parameters["metric_source"] # We use a lock here to prevent race conditions when stopping the controller self.running = True self.running_lock = threading.RLock() # Gets a new metric source plugin using the given name metric_source = MetricSourceBuilder().get_metric_source( self.metric_source_type, parameters) # Gets a new actuator plugin using the given name actuator = ActuatorBuilder().get_actuator(self.actuator_type, parameters=parameters) # The alarm here is responsible for deciding whether to scale up or down, or even do nothing self.alarm = KubeJobs(actuator, metric_source, self.trigger_down, self.trigger_up, self.min_cap, self.max_cap, self.actuation_size, application_id) def start_application_scaling(self): run = True print "Start to control resources" while run: self.logger.log("Monitoring application: %s" % self.application_id) # Call the alarm to check the application self.alarm.check_application_state() # Wait some time time.sleep(float(self.check_interval)) with self.running_lock: run = self.running def stop_application_scaling(self): with self.running_lock: self.running = False def status(self): return self.alarm.status()
def setUp(self): application_id = "000001" self.parameters = { "control_parameters": { "metric_source": "redis", "schedule_strategy": "default", "actuator": 'nop', "trigger_down": 1, "trigger_up": 1, "min_rep": 2, "max_rep": 10, "check_interval": 1, "actuation_size": 3}, "redis_ip": "192.168.0.0", "redis_port": "5000", "application_id": application_id } self.kubejobs1 = KubeJobs(self.parameters) self.kubejobs1.metric_source = \ MetricSourceMock("2018-11-26T15:00:00.000Z", -2) self.kubejobs1.actuator = ActuatorMock() self.controller = KubejobsController(application_id, self.parameters) self.controller.alarm = self.kubejobs1
def setUp(self): parameters = { "control_parameters": { "check_interval": 2, "trigger_down": 1, "trigger_up": 1, "min_rep": 2, "max_rep": 10, "actuation_size": 3, "actuator": "nop", "metric_source": "redis", "redis_ip": "192.168.0.0", "redis_port": "5000" }, "redis_ip": "192.168.0.0", "redis_port": "5000" } application_id = "000001" metric_source_1 = MetricSourceMock("2018-11-26T15:00:00.000Z", -2) actuator = ActuatorMock() trigger_down = 1 trigger_up = 1 min_cap = 2 max_cap = 10 actuation_size = 3 alarm = self.kubejobs1 = KubeJobs(actuator, metric_source_1, trigger_down, trigger_up, min_cap, max_cap, actuation_size, application_id) self.controller = KubejobsController(application_id, parameters) self.controller.alarm = alarm
def __init__(self, application_id, parameters): self.validate(parameters["control_parameters"]) self.logger = ScalingLog( "diff.controller.log", "controller.log", application_id) self.application_id = application_id parameters.update({"app_id": application_id}) # read scaling parameters self.check_interval = \ parameters["control_parameters"]["check_interval"] # We use a lock here to prevent race conditions when stopping the # controller self.running = True self.running_lock = threading.RLock() # The alarm here is responsible for deciding whether to scale up or # down, or even do nothing self.alarm = KubeJobs(parameters)
def setUp(self): metric_source_1 = MetricSourceMock("2018-11-26T15:00:00.000Z", -2) actuator = ActuatorMock() trigger_down = 1 trigger_up = 1 min_cap = 2 max_cap = 10 actuation_size = 3 application_id_1 = "00001" application_id_2 = "00002" metric_source_2 = MetricSourceMock("2017-08-06T07:00:00.000Z", 0.0002) self.kubejobs1 = KubeJobs(actuator, metric_source_1, trigger_down, trigger_up, min_cap, max_cap, actuation_size, application_id_1) self.kubejobs2 = KubeJobs(actuator, metric_source_2, trigger_down, trigger_up, min_cap, max_cap, actuation_size, application_id_2)
def setUp(self): data1 = {"metric_source": "redis", "schedule_strategy": "default", "actuator": 'nop', "trigger_down": 0, "trigger_up": 0, "min_rep": 1, "max_rep": 10, "actuation_size": 1} data2 = {"metric_source": 'redis', "schedule_strategy": "default", "actuator": "nop", "trigger_down": 0, "trigger_up": 0, "min_rep": 1, "max_rep": 10, "actuation_size": 1} self.kubejobs1 = KubeJobs({"redis_ip": "0.0.0.0", "redis_port": "2352", "application_id": "00001", "control_parameters": data1}) self.kubejobs2 = KubeJobs({"redis_ip": "0.0.0.0", "redis_port": "2352", "application_id": "00002", "control_parameters": data2}) self.kubejobs1.metric_source = \ MetricSourceMock("2017-08-06T07:00:00.000Z", -2) self.kubejobs1.actuator = ActuatorMock() # self.kubejobs1.scheduler = SchedulerMock() self.kubejobs2.metric_source = \ MetricSourceMock("2018-11-26T15:00:00.000Z", 0.0002) self.kubejobs2.actuator = ActuatorMock()
class TestKubeJobs(unittest.TestCase): """ Set up KubeJobs alarm objects """ def setUp(self): data1 = {"metric_source": "redis", "schedule_strategy": "default", "actuator": 'nop', "trigger_down": 0, "trigger_up": 0, "min_rep": 1, "max_rep": 10, "actuation_size": 1} data2 = {"metric_source": 'redis', "schedule_strategy": "default", "actuator": "nop", "trigger_down": 0, "trigger_up": 0, "min_rep": 1, "max_rep": 10, "actuation_size": 1} self.kubejobs1 = KubeJobs({"redis_ip": "0.0.0.0", "redis_port": "2352", "application_id": "00001", "control_parameters": data1}) self.kubejobs2 = KubeJobs({"redis_ip": "0.0.0.0", "redis_port": "2352", "application_id": "00002", "control_parameters": data2}) self.kubejobs1.metric_source = \ MetricSourceMock("2017-08-06T07:00:00.000Z", -2) self.kubejobs1.actuator = ActuatorMock() # self.kubejobs1.scheduler = SchedulerMock() self.kubejobs2.metric_source = \ MetricSourceMock("2018-11-26T15:00:00.000Z", 0.0002) self.kubejobs2.actuator = ActuatorMock() # self.kubejobs2.scheduler = SchedulerMock() """ """ def tearDown(self): pass """ Test that if necessary, the number of replicas is increased, or not. """ def test_check_application_state(self): initial_replicas1 = self.kubejobs1.actuator.get_number_of_replicas() self.kubejobs1.check_application_state() final_replicas1 = self.kubejobs1.actuator.get_number_of_replicas() self.assertTrue(final_replicas1 > initial_replicas1) initial_replicas2 = self.kubejobs2.actuator.get_number_of_replicas() self.kubejobs2.check_application_state() final_replicas2 = self.kubejobs2.actuator.get_number_of_replicas() self.assertTrue(final_replicas2 == initial_replicas2) def test_scale(self): self.kubejobs1.actuator.adjust_resources(4) self.assertEqual(self.kubejobs1.actuator.get_number_of_replicas(), 4) self.kubejobs1.actuator.\ adjust_resources(self.kubejobs1.actuator. get_number_of_replicas() - 2) self.assertEqual(self.kubejobs1.actuator.get_number_of_replicas(), 2) self.kubejobs1.actuator.\ adjust_resources(self.kubejobs1.actuator. get_number_of_replicas() + 8) self.assertEqual(self.kubejobs1.actuator.get_number_of_replicas(), 10) """ Test that the function _get_progress_error returns the progress error correct. """ def test_get_progress_error(self): self.assertEqual(self.kubejobs2._get_progress_error("00001"), (datetime.strptime("2018-11-26T15:00:00.000Z", '%Y-%m-%dT%H:%M:%S.%fZ'), 0.0002)) self.assertEqual(self.kubejobs1._get_progress_error("00002"), (datetime.strptime("2017-08-06T07:00:00.000Z", '%Y-%m-%dT%H:%M:%S.%fZ'), -2)) """ Test that the function _check_measurements_are_new works correctly. """ def test_check_measurements_are_new(self): self.assertFalse(self.kubejobs1._check_measurements_are_new( datetime.strptime("0001-01-01T00:00:00.0Z", '%Y-%m-%dT%H:%M:%S.%fZ'))) self.assertTrue(self.kubejobs1._check_measurements_are_new( datetime.strptime("2017-08-06T07:00:00.000Z", '%Y-%m-%dT%H:%M:%S.%fZ'))) """ Test that the status returned is correct. """ def test_status(self): self.kubejobs1.metric_source = MetricSourceMock( "2018-11-26T15:00:00.000Z", 0.0001) initial_state1 = "" self.assertEqual(self.kubejobs1.status(), initial_state1) final_state1 = "Progress error-[%s]-%f" % \ self.kubejobs1._get_progress_error("00001") self.kubejobs1.check_application_state() self.assertEqual(self.kubejobs1.status(), final_state1) self.kubejobs2.metric_source = MetricSourceMock( "0001-01-01T00:00:00.0Z", 0.0002) initial_state2 = "" self.assertEqual(self.kubejobs2.status(), initial_state2) final_state2 = "Progress error-[%s]-%f" % \ self.kubejobs2._get_progress_error("00002") \ + " Could not acquire more recent metrics" self.kubejobs2.check_application_state() self.assertEqual(self.kubejobs2.status(), final_state2)
class TestKubeJobs(unittest.TestCase): """ Set up KubeJobs alarm objects """ def setUp(self): metric_source_1 = MetricSourceMock("2018-11-26T15:00:00.000Z", -2) actuator = ActuatorMock() trigger_down = 1 trigger_up = 1 min_cap = 2 max_cap = 10 actuation_size = 3 application_id_1 = "00001" application_id_2 = "00002" metric_source_2 = MetricSourceMock("2017-08-06T07:00:00.000Z", 0.0002) self.kubejobs1 = KubeJobs(actuator, metric_source_1, trigger_down, trigger_up, min_cap, max_cap, actuation_size, application_id_1) self.kubejobs2 = KubeJobs(actuator, metric_source_2, trigger_down, trigger_up, min_cap, max_cap, actuation_size, application_id_2) """ """ def tearDown(self): pass """ Test that if necessary, the number of replicas is increased, or not. """ def test_check_application_state(self): initial_replicas1 = self.kubejobs1.actuator.get_number_of_replicas() self.kubejobs1.check_application_state() final_replicas1 = self.kubejobs1.actuator.get_number_of_replicas() self.assertTrue(final_replicas1 > initial_replicas1) initial_replicas2 = self.kubejobs2.actuator.get_number_of_replicas() self.kubejobs2.check_application_state() final_replicas2 = self.kubejobs2.actuator.get_number_of_replicas() self.assertTrue(final_replicas2 == initial_replicas2) """ Test that the scale down works, decreasing the number of replicas. """ def test_scale_down(self): self.kubejobs1.actuator.replicas = 10 self.kubejobs1._scale_down(2) self.assertEqual(self.kubejobs1.actuator.get_number_of_replicas(), 7) self.kubejobs1._scale_down(2) self.assertEqual(self.kubejobs1.actuator.get_number_of_replicas(), 4) self.kubejobs1._scale_down(2) self.assertEqual(self.kubejobs1.actuator.get_number_of_replicas(), 2) """ Test that the scale down works, increasing the number of replicas. """ def test_scale_up(self): self.kubejobs1._scale_up(-2) self.assertEqual(self.kubejobs1.actuator.get_number_of_replicas(), 4) self.kubejobs1._scale_up(-2) self.assertEqual(self.kubejobs1.actuator.get_number_of_replicas(), 7) self.kubejobs1._scale_up(-2) self.assertEqual(self.kubejobs1.actuator.get_number_of_replicas(), 10) """ Test that the function _get_progress_error returns the progress error correct. """ def test_get_progress_error(self): self.assertEqual(self.kubejobs1._get_progress_error("00001"), (datetime.strptime("2018-11-26T15:00:00.000Z", '%Y-%m-%dT%H:%M:%S.%fZ'), -2)) self.assertEqual(self.kubejobs2._get_progress_error("00002"), (datetime.strptime("2017-08-06T07:00:00.000Z", '%Y-%m-%dT%H:%M:%S.%fZ'), 0.0002)) """ Test that the function _check_measurements_are_new works correctly. """ def test_check_measurements_are_new(self): self.assertFalse( self.kubejobs1._check_measurements_are_new( datetime.strptime("0001-01-01T00:00:00.0Z", '%Y-%m-%dT%H:%M:%S.%fZ'))) self.assertTrue( self.kubejobs1._check_measurements_are_new( datetime.strptime("2017-08-06T07:00:00.000Z", '%Y-%m-%dT%H:%M:%S.%fZ'))) """ Test that the status returned is correct. """ def test_status(self): self.kubejobs1.metric_source = MetricSourceMock( "2018-11-26T15:00:00.000Z", 0.0001) initial_state1 = "" final_state1 = "Progress error-[%s]-%f" % \ self.kubejobs1._get_progress_error("00001") self.assertEqual(self.kubejobs1.status(), initial_state1) self.kubejobs1.check_application_state() self.assertEqual(self.kubejobs1.status(), final_state1) self.kubejobs2.metric_source = MetricSourceMock( "0001-01-01T00:00:00.0Z", 0.0002) initial_state2 = "" final_state2 = "Progress error-[%s]-%f" % \ self.kubejobs2._get_progress_error("00002") \ + " Could not acquire more recent metrics" self.assertEqual(self.kubejobs2.status(), initial_state2) self.kubejobs2.check_application_state() self.assertEqual(self.kubejobs2.status(), final_state2)