def __init__(self, actuator, metric_source, trigger_down, trigger_up, min_cap, max_cap, actuation_size, metric_rounding, application_id, instances): self.metric_source = metric_source self.actuator = actuator self.trigger_down = trigger_down self.trigger_up = trigger_up self.metric_rounding = metric_rounding self.application_id = application_id self.instances = instances self.load_balancer_url = api.load_balancer_url self.logger = ScalingLog("%s.generic.alarm.log" % (application_id), "controller.log", application_id) self.cap_logger = ScalingLog("%s.cap.log" % (application_id), "cap.log", application_id) self.last_error = "" self.last_error_timestamp = datetime.datetime.strptime( "0001-01-01T00:00:00.0Z", '%Y-%m-%dT%H:%M:%S.%fZ') self.last_action = "" self.cap = -1
def __init__(self, actuator, metric_source, trigger_down, trigger_up, min_cap, max_cap, metric_rounding, heuristic_options, application_id, instances): # TODO: Check parameters self.metric_source = metric_source self.actuator = actuator self.trigger_down = trigger_down self.trigger_up = trigger_up self.min_cap = min_cap self.max_cap = max_cap self.metric_rounding = metric_rounding self.heuristic_options = heuristic_options self.application_id = application_id self.instances = instances self.logger = ScalingLog( "%s.proportional.alarm.log" % (application_id), "controller.log", application_id) self.cap_logger = ScalingLog("%s.cap.log" % (application_id), "cap.log", application_id) self.last_progress_error_timestamp = datetime.datetime.strptime( "0001-01-01T00:00:00.0Z", '%Y-%m-%dT%H:%M:%S.%fZ') self.last_action = "" self.cap = -1
def __init__(self, actuator, metric_source, actuator_metric, trigger_down, trigger_up, min_quota, max_quota, application_id): # TODO: Check parameters self.metric_source = metric_source self.actuator = actuator self.actuator_metric = actuator_metric self.trigger_down = trigger_down self.trigger_up = trigger_up self.min_quota = min_quota self.max_quota = max_quota self.application_id = application_id self.logger = ScalingLog( "%s.vertical.alarm.log" % (application_id), "controller.log", application_id) self.cap_logger = ScalingLog("%s.cap.log" % ( application_id), "cap.log", application_id) self.last_progress_error_timestamp = datetime.datetime.strptime( "0001-01-01T00:00:00.0Z", '%Y-%m-%dT%H:%M:%S.%fZ') self.last_action = "" self.cap = -1
def __init__(self, application_id, plugin_info): self.logger = ScalingLog("diff.controller.log", "controller.log", application_id) self.application_id = application_id self.instances = plugin_info["instances"] self.check_interval = plugin_info["check_interval"] self.trigger_down = plugin_info["trigger_down"] self.trigger_up = plugin_info["trigger_up"] self.min_cap = plugin_info["min_cap"] self.max_cap = plugin_info["max_cap"] self.actuation_size = plugin_info["actuation_size"] self.metric_rounding = plugin_info["metric_rounding"] self.actuator_type = plugin_info["actuator"] self.metric_source_type = plugin_info["metric_source"] """ We use a lock here to prevent race conditions when stopping the controller """ self.running = True self.running_lock = threading.RLock() # Gets a new metric source plugin using the given name metric_source = MetricSourceBuilder().get_metric_source( self.metric_source_type, plugin_info) # Gets a new actuator plugin using the given name actuator = ActuatorBuilder().get_actuator(self.actuator_type) """ The alarm here is responsible for deciding whether to scale up or down, or even do nothing """ self.alarm = GenericAlarm(actuator, metric_source, self.trigger_down, self.trigger_up, self.min_cap, self.max_cap, self.actuation_size, self.metric_rounding, application_id, self.instances)
def __init__(self, application_id, plugin_info): self.logger = ScalingLog("proportional_derivative.controller.log", "controller.log", application_id) plugin_info = plugin_info["plugin_info"] self.application_id = application_id self.instances = plugin_info["instances"] self.check_interval = plugin_info["check_interval"] self.trigger_down = plugin_info["trigger_down"] self.trigger_up = plugin_info["trigger_up"] self.min_cap = plugin_info["min_cap"] self.max_cap = plugin_info["max_cap"] self.metric_rounding = plugin_info["metric_rounding"] self.actuator_type = plugin_info["actuator"] self.metric_source_type = plugin_info["metric_source"] self.heuristic_options = plugin_info["heuristic_options"] self.running = True self.running_lock = threading.RLock() # Gets a new metric source plugin using the given name metric_source = MetricSourceBuilder().get_metric_source( self.metric_source_type, plugin_info) # Gets a new actuator plugin using the given name actuator = ActuatorBuilder().get_actuator(self.actuator_type, plugin_info) """ The alarm here is responsible for deciding whether to scale up or down, or even do nothing """ self.alarm = ProportionalDerivativeAlarm( actuator, metric_source, self.trigger_down, self.trigger_up, self.min_cap, self.max_cap, self.metric_rounding, self.heuristic_options, application_id, self.instances)
def __init__(self, application_id, parameters): self.logger = ScalingLog( "diff.controller.log", "controller.log", application_id) scaling_parameters = parameters["control_parameters"] self.application_id = application_id parameters.update({"app_id": application_id}) # read scaling parameters self.check_interval = scaling_parameters["check_interval"] self.trigger_down = scaling_parameters["trigger_down"] self.trigger_up = scaling_parameters["trigger_up"] self.min_cap = scaling_parameters["min_rep"] self.max_cap = scaling_parameters["max_rep"] self.actuation_size = scaling_parameters["actuation_size"] # The actuator plugin name self.actuator_type = scaling_parameters["actuator"] # The metric source plugin name self.metric_source_type = scaling_parameters["metric_source"] # We use a lock here to prevent race conditions when stopping the # controller self.running = True self.running_lock = threading.RLock() # Gets a new metric source plugin using the given name metric_source = MetricSourceBuilder().get_metric_source( self.metric_source_type, parameters) # Gets a new actuator plugin using the given name actuator = ActuatorBuilder().get_actuator(self.actuator_type, parameters=parameters) # The alarm here is responsible for deciding whether to scale up or # down, or even do nothing self.alarm = KubeJobs(actuator, metric_source, self.trigger_down, self.trigger_up, self.min_cap, self.max_cap, self.actuation_size, application_id)
def __init__(self, app_id, plugin_info): self.logger = ScalingLog("pid.controller.log", "controller.log", app_id) self.app_id = app_id self.instances = plugin_info["instances"] self.check_interval = plugin_info["check_interval"] self.trigger_down = plugin_info["trigger_down"] self.trigger_up = plugin_info["trigger_up"] self.min_cap = plugin_info["min_cap"] self.max_cap = plugin_info["max_cap"] self.metric_rounding = plugin_info["metric_rounding"] self.actuator_type = plugin_info["actuator"] self.metric_source_type = plugin_info["metric_source"] self.heuristic_options = plugin_info["heuristic_options"] self.running = True self.running_lock = threading.RLock() metric_source = MetricSourceBuilder().get_metric_source( self.metric_source_type, plugin_info) actuator = ActuatorBuilder().get_actuator(self.actuator_type) self.alarm = PIDAlarm(actuator, metric_source, self.trigger_down, self.trigger_up, self.min_cap, self.max_cap, self.metric_rounding, self.heuristic_options, self.app_id, self.instances)
def __init__(self, data): self.validate(data) self.logger = ScalingLog("default_scheduler.log", "scheduler.log") self.trigger_down = data.get("trigger_down") self.trigger_up = data.get("trigger_up") self.max_cap = data.get("max_rep") self.min_cap = data.get("min_rep") self.actuation_size = data.get("actuation_size")
def __init__(self, data): # TODO: Check parameters scaling_parameters = data["control_parameters"] self.metric_source = MetricSourceBuilder().\ get_metric_source(scaling_parameters.get('metric_source'), data) self.app_id = data.get('app_id') scaling_parameters.update({'app_id': self.app_id}) self.scheduler = self.setup_scheduler(scaling_parameters) self.actuator = self.setup_actuator(scaling_parameters) self.logger = ScalingLog("%s.generic.alarm.log" % (self.app_id), "controller.log", self.app_id) self.cap_logger = ScalingLog("%s.cap.log" % (self.app_id), "cap.log", self.app_id) self.last_progress_error_timestamp = datetime.datetime.strptime( "0001-01-01T00:00:00.0Z", '%Y-%m-%dT%H:%M:%S.%fZ') self.last_action = "" self.cap = -1
def __init__(self, data): self.validate(data) self.logger = ScalingLog("pid_scheduler.log", "scheduler.log") heuristic_options = data.get('heuristic_options') self.max_rep = data.get('max_rep') self.min_rep = data.get('min_rep') self.proportional_gain = heuristic_options["proportional_gain"] self.derivative_gain = heuristic_options["derivative_gain"] self.integral_gain = heuristic_options["integral_gain"] self.last_error = None self.integrated_error = 0
def __init__(self, application_id, parameters): self.validate(parameters["control_parameters"]) self.logger = ScalingLog( "diff.controller.log", "controller.log", application_id) self.application_id = application_id parameters.update({"app_id": application_id}) # read scaling parameters self.check_interval = \ parameters["control_parameters"]["check_interval"] # We use a lock here to prevent race conditions when stopping the # controller self.running = True self.running_lock = threading.RLock() # The alarm here is responsible for deciding whether to scale up or # down, or even do nothing self.alarm = KubeJobs(parameters)