class Deployer(ABCDeployer): def __init__(self): self.heatclient = HeatClient() conf = SysUtil().get_sys_conf() LOG.debug("Get runtime agent: " + conf['runtime_agent']) self.runtime_agent = FactoryAgent().get_agent(conf['runtime_agent']) #self.register_agent = FactoryAgent().get_agent(conf['register_agent']) self.template_manager = FactoryAgent().get_agent( conf['template_manager']) self.db = FactoryAgent().get_agent(conf['database_manager']) self.checker = FactoryAgent().get_agent(conf['checker']) def deploy(self, topology): #deploy only when the ext_id is None otherwise the topology is already deployed if topology.ext_id is None: LOG.debug("Start Deploying topology %s" % topology.name) _name = topology.ext_name _template = self.template_manager.get_template(topology) LOG.debug("Stack name: %s" % _name) LOG.debug("Template: %s" % _template) try: stack_details = self.heatclient.deploy(name=_name, template=_template) LOG.debug("stack details after deploy: %s" % stack_details) if stack_details: topology.ext_id = stack_details['stack'].get('id') LOG.debug("stack id: %s" % topology.ext_id) else: raise Exception('Error during deployment on the testbed.') except Exception, exc: LOG.exception(exc) topology.state = 'ERROR' topology.ext_id = None raise else:
class Deployer(ABCDeployer): def __init__(self): self.heatclient = HeatClient() conf = SysUtil().get_sys_conf() logger.debug("Get runtime agent: " + conf['runtime_agent']) self.runtime_agent = FactoryAgent().get_agent(conf['runtime_agent']) #self.register_agent = FactoryAgent().get_agent(conf['register_agent']) self.template_manager = FactoryAgent().get_agent(conf['template_manager']) self.db = FactoryAgent().get_agent(conf['database_manager']) self.checker = FactoryAgent().get_agent(conf['checker']) def deploy(self, topology): #deploy only when the ext_id is None otherwise the topology is already deployed if topology.ext_id is None: logger.debug("Start Deploying topology %s" % topology.name) _name = topology.ext_name _template = self.template_manager.get_template(topology) logger.debug("Stack name: %s" % _name) logger.debug("Template: %s" % _template) try: stack_details = self.heatclient.deploy(name=_name, template=_template) logger.debug("stack details after deploy: %s" % stack_details) if stack_details: topology.ext_id = stack_details['stack'].get('id') logger.debug("stack id: %s" % topology.ext_id) else: raise Exception('Error during deployment on the testbed.') except Exception, exc: logger.exception(exc) topology.state = 'ERROR' topology.ext_id = None raise else:
def __init__(self): self.heatclient = HeatClient() conf = SysUtil().get_sys_conf() logger.debug("Get runtime agent: " + conf['runtime_agent']) self.runtime_agent = FactoryAgent().get_agent(conf['runtime_agent']) #self.register_agent = FactoryAgent().get_agent(conf['register_agent']) self.template_manager = FactoryAgent().get_agent(conf['template_manager']) self.db = FactoryAgent().get_agent(conf['database_manager']) self.checker = FactoryAgent().get_agent(conf['checker'])
def __init__(self): self.heatclient = HeatClient() conf = SysUtil().get_sys_conf() LOG.debug("Get runtime agent: " + conf['runtime_agent']) self.runtime_agent = FactoryAgent().get_agent(conf['runtime_agent']) #self.register_agent = FactoryAgent().get_agent(conf['register_agent']) self.template_manager = FactoryAgent().get_agent( conf['template_manager']) self.db = FactoryAgent().get_agent(conf['database_manager']) self.checker = FactoryAgent().get_agent(conf['checker'])
def __init__(self, topology, runtime_agent, policy, service_instance, lock): super(PolicyThread, self).__init__() self.policy = policy self.service_instance = service_instance self.topology = topology self.runtime_agent = runtime_agent self.monitor = runtime_agent.monitoring_service self.lock = lock # hack for avoiding autoscaling at startup self.counter = 0 self.is_stopped = False conf = SysUtil().get_sys_conf() self.template_manager = FactoryAgent().get_agent(conf['template_manager']) self.db = FactoryAgent().get_agent(conf['database_manager']) self.heat_client = HeatClient()
def __init__(self, topology, runtime_agent, policy, service_instance, lock): super(PolicyThread, self).__init__() self.policy = policy self.service_instance = service_instance self.topology = topology self.runtime_agent = runtime_agent self.monitor = runtime_agent.monitoring_service self.lock = lock # hack for avoiding autoscaling at startup self.counter = 0 self.is_stopped = False conf = SysUtil().get_sys_conf() self.template_manager = FactoryAgent().get_agent( conf['template_manager']) self.db = FactoryAgent().get_agent(conf['database_manager']) self.heat_client = HeatClient()
def __init__(self, topology, runtime_agent, policy, service_instance, lock, dnsaas): super(PolicyThread, self).__init__() self.policy = policy self.service_instance = service_instance self.topology = topology self.runtime_agent = runtime_agent self.monitor = runtime_agent.monitoring_service self.lock = lock if dnsaas is not None: self.is_dnsaas = True self.dns_configurator = ImsDnsClient(dnsaas) else: self.is_dnsaas = False self.counter = 0 self.is_stopped = False conf = SysUtil().get_sys_conf() self.template_manager = FactoryAgent().get_agent(conf["template_manager"]) self.db = FactoryAgent().get_agent(conf["database_manager"]) self.heat_client = HeatClient()
class PolicyThread(threading.Thread): def __init__(self, topology, runtime_agent, policy, service_instance, lock): super(PolicyThread, self).__init__() self.policy = policy self.service_instance = service_instance self.topology = topology self.runtime_agent = runtime_agent self.monitor = runtime_agent.monitoring_service self.lock = lock # hack for avoiding autoscaling at startup self.counter = 0 self.is_stopped = False conf = SysUtil().get_sys_conf() self.template_manager = FactoryAgent().get_agent(conf['template_manager']) self.db = FactoryAgent().get_agent(conf['database_manager']) self.heat_client = HeatClient() def run(self): LOG.info("Initialise policy thread for policy %s" % self.policy.name) self.wait_until_final_state() LOG.info("Starting policy thread for policy %s" % self.policy.name) if self.is_stopped: LOG.info("Cannot start policy threads. PolicyThreads are stopped.") elif self.topology.state in ['DEPLOYED','UPDATED']: self.start_policy_checker_si() LOG.info("Started policy thread for policy %s" % self.policy.name) else: LOG.error( "ERROR: Something went wrong. Seems to be an error. Topology state -> %s. Didn't start the PolicyThread" % self.topology.state) def wait_until_final_state(self, final_states=[]): if len(final_states) == 0: final_states = ['DEPLOYED','UPDATED','ERROR','DELETED'] units_count = 0 for service_instance in self.topology.service_instances: units_count += len(service_instance.units) i = 0 while not self.is_stopped and not self.topology.state in final_states and not i > units_count * 100: LOG.debug('PolicyThread for %s -> Waiting 5 seconds' % self.policy.name) time.sleep(5) i += 1 def active_policy_unit(self): LOG.debug("Start active_policy check") while not self.is_stopped: LOG.debug("Locking policy checking by %s" % self.policy.name) self.lock.acquire() for unit in self.service_instance.units: action = self.policy.action if action.scaling_adjustment > 0: if (len(self.service_instance.units) + action.scaling_adjustment) > self.service_instance.size.get( 'max'): LOG.warning( 'Check upscaling - Maximum number of unit exceeded for service instance: %s' % self.service_instance.name) break if action.scaling_adjustment < 0: if (len(self.service_instance.units) + action.scaling_adjustment) < self.service_instance.size.get( 'min'): LOG.warning( 'Check downscaling - Minimum number of unit exceeded for service instance: %s' % self.service_instance.name) break if self.service_instance.state != 'UPDATING' and self.check_alarm_unit(unit, self.monitor): LOG.debug('Execute action: %s' % repr(self.policy.action)) if action.adjustment_type == 'ChangeInCapacity': self.service_instance.state = 'UPDATING' self.topology.state = 'UPDATING' if action.scaling_adjustment > 0: if (len( self.service_instance.units) + action.scaling_adjustment) <= self.service_instance.size.get( 'max'): for i in range(action.scaling_adjustment): _hostname = '%s-%s' % ( self.service_instance.name, str(len(self.service_instance.units) + 1)) _state = 'Initialised' new_unit = Unit(hostname=_hostname, state=_state) self.service_instance.units.append(new_unit) else: LOG.warning( 'Maximum number of unit exceeded for service instance: %s' % self.service_instance.name) else: if (len( self.service_instance.units) + action.scaling_adjustment) >= self.service_instance.size.get( 'min'): for i in range(-action.scaling_adjustment): self.remove_unit(self.topology, self.service_instance) else: LOG.warning( 'Minimum number of unit exceeded for service instance: %s' % self.service_instance.name) try: self.db.update(self.topology) except Exception, msg: LOG.error(msg) self.topology.state='ERROR' self.topology.ext_id = None template = self.template_manager.get_template(self.topology) # LOG.debug("Send update to heat template with: \n%s" % template) self.heat_client.update(stack_id=self.topology.ext_id, template=template) LOG.info('Sleeping (cooldown) for %s seconds' % self.policy.action.cooldown) time.sleep(self.policy.action.cooldown) LOG.debug("Release Policy lock by %s" % self.policy.name) self.lock.release() LOG.info('Sleeping (evaluation period) for %s seconds' % self.policy.period) time.sleep(self.policy.period)
class PolicyThread(threading.Thread): def __init__(self, topology, runtime_agent, policy, service_instance, lock): super(PolicyThread, self).__init__() self.policy = policy self.service_instance = service_instance self.topology = topology self.runtime_agent = runtime_agent self.monitor = runtime_agent.monitoring_service self.lock = lock # hack for avoiding autoscaling at startup self.counter = 0 self.is_stopped = False conf = SysUtil().get_sys_conf() self.template_manager = FactoryAgent().get_agent( conf['template_manager']) self.db = FactoryAgent().get_agent(conf['database_manager']) self.heat_client = HeatClient() def run(self): LOG.info("Initialise policy thread for policy %s" % self.policy.name) self.wait_until_final_state() LOG.info("Starting policy thread for policy %s" % self.policy.name) if self.is_stopped: LOG.info("Cannot start policy threads. PolicyThreads are stopped.") elif self.topology.state in ['DEPLOYED', 'UPDATED']: self.start_policy_checker_si() LOG.info("Started policy thread for policy %s" % self.policy.name) else: LOG.error( "ERROR: Something went wrong. Seems to be an error. Topology state -> %s. Didn't start the PolicyThread" % self.topology.state) def wait_until_final_state(self, final_states=[]): if len(final_states) == 0: final_states = ['DEPLOYED', 'UPDATED', 'ERROR', 'DELETED'] units_count = 0 for service_instance in self.topology.service_instances: units_count += len(service_instance.units) i = 0 while not self.is_stopped and not self.topology.state in final_states and not i > units_count * 100: LOG.debug('PolicyThread for %s -> Waiting 5 seconds' % self.policy.name) time.sleep(5) i += 1 def active_policy_unit(self): LOG.debug("Start active_policy check") while not self.is_stopped: LOG.debug("Locking policy checking by %s" % self.policy.name) self.lock.acquire() for unit in self.service_instance.units: action = self.policy.action if action.scaling_adjustment > 0: if (len(self.service_instance.units) + action.scaling_adjustment ) > self.service_instance.size.get('max'): LOG.warning( 'Check upscaling - Maximum number of unit exceeded for service instance: %s' % self.service_instance.name) break if action.scaling_adjustment < 0: if (len(self.service_instance.units) + action.scaling_adjustment ) < self.service_instance.size.get('min'): LOG.warning( 'Check downscaling - Minimum number of unit exceeded for service instance: %s' % self.service_instance.name) break if self.service_instance.state != 'UPDATING' and self.check_alarm_unit( unit, self.monitor): LOG.debug('Execute action: %s' % repr(self.policy.action)) if action.adjustment_type == 'ChangeInCapacity': self.service_instance.state = 'UPDATING' self.topology.state = 'UPDATING' if action.scaling_adjustment > 0: if (len(self.service_instance.units) + action.scaling_adjustment ) <= self.service_instance.size.get('max'): for i in range(action.scaling_adjustment): _hostname = '%s-%s' % ( self.service_instance.name, str( len(self.service_instance.units) + 1)) _state = 'Initialised' new_unit = Unit(hostname=_hostname, state=_state) self.service_instance.units.append( new_unit) else: LOG.warning( 'Maximum number of unit exceeded for service instance: %s' % self.service_instance.name) else: if (len(self.service_instance.units) + action.scaling_adjustment ) >= self.service_instance.size.get('min'): for i in range(-action.scaling_adjustment): self.remove_unit(self.topology, self.service_instance) else: LOG.warning( 'Minimum number of unit exceeded for service instance: %s' % self.service_instance.name) try: self.db.update(self.topology) except Exception, msg: LOG.error(msg) self.topology.state = 'ERROR' self.topology.ext_id = None template = self.template_manager.get_template( self.topology) # LOG.debug("Send update to heat template with: \n%s" % template) self.heat_client.update(stack_id=self.topology.ext_id, template=template) LOG.info('Sleeping (cooldown) for %s seconds' % self.policy.action.cooldown) time.sleep(self.policy.action.cooldown) LOG.debug("Release Policy lock by %s" % self.policy.name) self.lock.release() LOG.info('Sleeping (evaluation period) for %s seconds' % self.policy.period) time.sleep(self.policy.period)
class PolicyThread(threading.Thread): def __init__(self, topology, runtime_agent, policy, service_instance, lock, dnsaas): super(PolicyThread, self).__init__() self.policy = policy self.service_instance = service_instance self.topology = topology self.runtime_agent = runtime_agent self.monitor = runtime_agent.monitoring_service self.lock = lock if dnsaas is not None: self.is_dnsaas = True self.dns_configurator = ImsDnsClient(dnsaas) else: self.is_dnsaas = False self.counter = 0 self.is_stopped = False conf = SysUtil().get_sys_conf() self.template_manager = FactoryAgent().get_agent(conf["template_manager"]) self.db = FactoryAgent().get_agent(conf["database_manager"]) self.heat_client = HeatClient() def run(self): logger.info("Initialise policy thread for policy %s" % self.policy.name) self.wait_until_final_state() logger.info("Starting policy thread for policy %s" % self.policy.name) if self.is_stopped: logger.info("Cannot start policy threads. PolicyThreads are stopped.") elif self.topology.state in ["DEPLOYED", "UPDATED"]: self.start_policy_checker_si() logger.info("Started policy thread for policy %s" % self.policy.name) else: logger.error( "ERROR: Something went wrong. Seems to be an error. Topology state -> %s. Didn't start the PolicyThread" % self.topology.state ) def wait_until_final_state(self, final_states=[]): if len(final_states) == 0: final_states = ["DEPLOYED", "UPDATED", "ERROR", "DELETED"] units_count = 0 for service_instance in self.topology.service_instances: units_count += len(service_instance.units) i = 0 while not self.is_stopped and not self.topology.state in final_states and not i > units_count * 100: logger.debug("PolicyThread for %s -> Waiting 5 seconds" % self.policy.name) time.sleep(5) i += 1 def check_alarm_unit(self, unit, monitoring_service): logger.debug("checking for alarms") alarm = self.policy.alarm logger.debug("request item value: %s" % unit.hostname) item_value = monitoring_service.get_item( res_id=unit.ext_id, item_name=alarm.meter_name, kwargs={"period": alarm.evaluation_periods} ) # item_value = 50 logger.debug("received item value: %s" % item_value) if alarm.comparison_operator == ">" or alarm.comparison_operator == "gt": logger.debug("Check upscaling: check that item value is bigger than threshold") if item_value > alarm.threshold: # hack for demo self.counter += 1 if self.counter > 1: logger.info("Counter %s Trigger the action: %s" % repr(self.counter, self.policy.action)) return True else: logger.info( "Not triggering action %s since the counter is still under 1 (%s)" % (repr(self.policy.action), self.counter) ) return False else: logger.debug("Check upscaling: item value is lower than threshold") elif alarm.comparison_operator == "<" or alarm.comparison_operator == "lt": logger.debug("Check downscaling: check that item value is lower than threshold") if item_value < alarm.threshold: logger.info("Trigger the action: %s" % repr(self.policy.action)) return True else: logger.debug("Check downscaling: item value is bigger than threshold") logger.debug("Check item values finished") return False def start_policy_checker_si(self): logger.debug( "Start active_policy check for policy %s on service instance %s" % (self.policy.name, self.service_instance.name) ) while not self.is_stopped: logger.debug("Locking policy checking from %s" % self.policy.name) self.lock.acquire() logger.debug("Locked policy checking from %s" % self.policy.name) action = self.policy.action if action.scaling_adjustment > 0: if (len(self.service_instance.units) + action.scaling_adjustment) > self.service_instance.size.get( "max" ): logger.warning( "Check upscaling - Maximum number of unit exceeded for service instance: %s" % self.service_instance.name ) logger.debug("Release Policy lock by %s" % self.policy.name) self.lock.release() time.sleep(self.policy.period) continue if action.scaling_adjustment < 0: if (len(self.service_instance.units) + action.scaling_adjustment) < self.service_instance.size.get( "min" ): logger.warning( "Check downscaling - Minimum number of unit exceeded for service instance: %s" % self.service_instance.name ) logger.debug("Release Policy lock by %s" % self.policy.name) self.lock.release() time.sleep(self.policy.period) continue if self.service_instance.state != "UPDATING" and self.check_alarm_si(): logger.debug("Execute action: %s" % repr(self.policy.action)) if action.adjustment_type == "ChangeInCapacity": self.service_instance.state = "UPDATING" self.topology.state = "UPDATING" if action.scaling_adjustment > 0: logger.info("executing scaling out action ") info = { "so_id": "idnotusefulhere", "sm_name": "imsaas", "so_phase": "update", "phase_event": "start", "response_time": 0, "tenant": "mcntub", } self.start_time = time.time() info_json = json.dumps(info) glogger.debug(info_json) if ( len(self.service_instance.units) + action.scaling_adjustment ) <= self.service_instance.size.get("max"): for i in range(action.scaling_adjustment): _hostname = "%s-%s" % ( self.service_instance.name, str(len(self.service_instance.units) + 1), ) _state = "DEFINED" new_unit = Unit(hostname=_hostname, state=_state) new_unit.service_instance_id = self.service_instance.id self.service_instance.units.append(new_unit) self.db.persist(new_unit) else: logger.warning( "Maximum number of unit exceeded for service instance: %s" % self.service_instance.name ) else: logger.info("executing scaling in action ") if ( len(self.service_instance.units) + action.scaling_adjustment ) >= self.service_instance.size.get("min"): for i in range(-action.scaling_adjustment): removed_unit = self.remove_unit(self.topology, self.service_instance) self.db.remove(removed_unit) else: logger.warning( "Minimum number of unit exceeded for service instance: %s" % self.service_instance.name ) topology = self.db.update(self.topology) template = self.template_manager.get_template(self.topology) # logger.debug("Send update to heat template with: \n%s" % template) if action.scaling_adjustment <= 0: logger.info("provisioning the unit after scaling in operation") self.configure_after_scaling(removed_unit) try: logger.info("updating the heat template including new units") self.heat_client.update(stack_id=self.topology.ext_id, template=template) self.wait_until_final_state() logger.info("wait until final state function executed") if self.topology.state not in ["DEPLOYED", "UPDATED"]: logger.error( "ERROR: Something went wrong. Seems to be an error. Topology state -> %s" % self.topology.state ) self.lock.release() return except: self.is_stopped = True self.lock.release() logger.info("entering provisioning phase") if action.scaling_adjustment > 0: logger.info("provisioning new unit after scaling out operation") # adding relations between newly added unit and existing units from dependent services self.configure_new_unit(new_unit) logger.info("Sleeping (cooldown) for %s seconds" % self.policy.action.cooldown) time.sleep(self.policy.action.cooldown) logger.debug("Release Policy lock from %s" % self.policy.name) self.lock.release() logger.info("Sleeping (evaluation period) for %s seconds" % self.policy.period) time.sleep(self.policy.period) def configure_new_unit(self, unit): logging.info("configuring new unit with hostname %s" % unit.hostname) config = {} config["hostname"] = unit.hostname config["ips"] = unit.ips config["zabbix_ip"] = os.environ["ZABBIX_IP"] config["floating_ips"] = unit.floating_ips config["hostname"] = unit.hostname try: logging.info("sending requests to the adapter %s with config" % config) self.service_instance.adapter_instance.preinit(config) self.service_instance.adapter_instance.install(config) except Exception, e: logging.error("error while configuring vnf %s" % e) self.add_relations_after_scaling(config, unit) try: self.service_instance.adapter_instance.pre_start(config) self.service_instance.adapter_instance.start(config) except Exception, e: logging.error("error while configuring vnf %s" % e)