def update(self, new_topology, old_topology): conf = sys_util().get_sys_conf() db = FactoryAgent().get_agent(conf['database_manager']) updated_topology = old_topology updated_topology.name = new_topology.name #check for additional service instances and add them to the list of new instances appended_service_instances = [] for new_service_instance in new_topology.service_instances: is_found = False for updated_service_instance in updated_topology.service_instances: if new_service_instance.name == updated_service_instance.name: is_found = True break if not is_found: appended_service_instances.append(new_service_instance) #check for removed service instances and add it to the list of removed instances removed_service_instances = [] for updated_service_instance in updated_topology.service_instances: is_found = False for new_service_instance in new_topology.service_instances: if new_service_instance.name == updated_service_instance.name: is_found = True break if not is_found: removed_service_instances.append(updated_service_instance) #remove removed service instances for removed_service_instance in removed_service_instances: updated_topology.service_instances.remove(removed_service_instance) LOG.debug('Removed ServiceInstance \"%s\" from Topology \"%s\".' % (removed_service_instance.name, updated_topology.name)) #append additional service instances for appended_service_instance in appended_service_instances: appended_service_instance.topology_id = updated_topology.id updated_topology.service_instances.append( appended_service_instance) if appended_service_instance.policies is not None: for policy in appended_service_instance.policies: db.persist(policy) db.persist(appended_service_instance) LOG.debug('Appended ServiceInstance \"%s\" to Topology \"%s\".' % (appended_service_instance.name, updated_topology.name)) #Update all values for each service instance for updated_service_instance in updated_topology.service_instances: for new_service_instance in new_topology.service_instances: if updated_service_instance.name == new_service_instance.name: updated_service_instance.size = new_service_instance.size updated_service_instance.configuration = new_service_instance.configuration updated_service_instance.policies = new_service_instance.policies #updated_service_instance.service_type = new_service_instance.service_type if new_service_instance.service_type and updated_service_instance.service_type != new_service_instance.service_type: LOG.warning( "Cannot update service_type for %s->%s. Not Implemented." % (updated_topology.name, updated_service_instance.name)) if new_service_instance.adapter and updated_service_instance.adapter != new_service_instance.adapter: LOG.warning( "Cannot update adapter for %s->%s. Not Implemented." % (updated_topology.name, updated_service_instance.name)) #updated_service_instance.flavor = new_service_instance.flavor if new_service_instance.flavor and updated_service_instance.flavor.name != new_service_instance.flavor.name: LOG.warning( "Cannot update flavor for %s->%s. Not Implemented." % (updated_topology.name, updated_service_instance.name)) #updated_service_instance.image = new_service_instance.image if new_service_instance.image and updated_service_instance.image.name != new_service_instance.image.name: LOG.warning( "Cannot update image for %s->%s. Not Implemented." % (updated_topology.name, updated_service_instance.name)) #updated_service_instance.networks = new_service_instance.networks if new_service_instance.networks is not None: LOG.warning( "Cannot update networks for %s->%s. Not Implemented." % (updated_topology.name, updated_service_instance.name)) #updated_service_instance.requirements = new_service_instance.requirements if new_service_instance.requirements is not None: LOG.warning( "Cannot update networks for %s->%s. Not Implemented." % (updated_topology.name, updated_service_instance.name)) #updated_service_instance.user_data = new_service_instance.user_data if new_service_instance.user_data is not None: LOG.warning( "Cannot update user_data for %s->%s. Not Implemented." % (updated_topology.name, updated_service_instance.name)) if new_service_instance.key and updated_service_instance.key.name != new_service_instance.key.name: LOG.warning( "Cannot update key for %s->%s without replacement." % (updated_topology.name, updated_service_instance.name)) #Add or remove units according to minimal or maximal size for updated_service_instance in updated_topology.service_instances: if updated_service_instance not in appended_service_instances: if len(updated_service_instance.units ) < updated_service_instance.size.get('min'): for i in range( updated_service_instance.size.get('min') - len(updated_service_instance.units)): _hostname = '%s-%s' % ( updated_service_instance.name, str(len(updated_service_instance.units) + 1)) _state = 'DEFINED' new_unit = Unit(hostname=_hostname, state=_state) new_unit.service_instance_id = updated_service_instance.id updated_service_instance.units.append(new_unit) db.persist(new_unit) if len(updated_service_instance.units ) > updated_service_instance.size.get('max'): for i in range( len(updated_service_instance.units) - updated_service_instance.size.get('max')): removed_unit = updated_service_instance.units.pop( len(updated_service_instance.units) - 1) db.remove(removed_unit) return updated_topology
class PolicyThread(threading.Thread): def __init__(self, topology, runtime_agent, policy, service_instance, lock, dnsaas): super(PolicyThread, self).__init__() self.policy = policy self.service_instance = service_instance self.topology = topology self.runtime_agent = runtime_agent self.monitor = runtime_agent.monitoring_service self.lock = lock if dnsaas is not None: self.is_dnsaas = True self.dns_configurator = ImsDnsClient(dnsaas) else: self.is_dnsaas = False self.counter = 0 self.is_stopped = False conf = SysUtil().get_sys_conf() self.template_manager = FactoryAgent().get_agent(conf["template_manager"]) self.db = FactoryAgent().get_agent(conf["database_manager"]) self.heat_client = HeatClient() def run(self): logger.info("Initialise policy thread for policy %s" % self.policy.name) self.wait_until_final_state() logger.info("Starting policy thread for policy %s" % self.policy.name) if self.is_stopped: logger.info("Cannot start policy threads. PolicyThreads are stopped.") elif self.topology.state in ["DEPLOYED", "UPDATED"]: self.start_policy_checker_si() logger.info("Started policy thread for policy %s" % self.policy.name) else: logger.error( "ERROR: Something went wrong. Seems to be an error. Topology state -> %s. Didn't start the PolicyThread" % self.topology.state ) def wait_until_final_state(self, final_states=[]): if len(final_states) == 0: final_states = ["DEPLOYED", "UPDATED", "ERROR", "DELETED"] units_count = 0 for service_instance in self.topology.service_instances: units_count += len(service_instance.units) i = 0 while not self.is_stopped and not self.topology.state in final_states and not i > units_count * 100: logger.debug("PolicyThread for %s -> Waiting 5 seconds" % self.policy.name) time.sleep(5) i += 1 def check_alarm_unit(self, unit, monitoring_service): logger.debug("checking for alarms") alarm = self.policy.alarm logger.debug("request item value: %s" % unit.hostname) item_value = monitoring_service.get_item( res_id=unit.ext_id, item_name=alarm.meter_name, kwargs={"period": alarm.evaluation_periods} ) # item_value = 50 logger.debug("received item value: %s" % item_value) if alarm.comparison_operator == ">" or alarm.comparison_operator == "gt": logger.debug("Check upscaling: check that item value is bigger than threshold") if item_value > alarm.threshold: # hack for demo self.counter += 1 if self.counter > 1: logger.info("Counter %s Trigger the action: %s" % repr(self.counter, self.policy.action)) return True else: logger.info( "Not triggering action %s since the counter is still under 1 (%s)" % (repr(self.policy.action), self.counter) ) return False else: logger.debug("Check upscaling: item value is lower than threshold") elif alarm.comparison_operator == "<" or alarm.comparison_operator == "lt": logger.debug("Check downscaling: check that item value is lower than threshold") if item_value < alarm.threshold: logger.info("Trigger the action: %s" % repr(self.policy.action)) return True else: logger.debug("Check downscaling: item value is bigger than threshold") logger.debug("Check item values finished") return False def start_policy_checker_si(self): logger.debug( "Start active_policy check for policy %s on service instance %s" % (self.policy.name, self.service_instance.name) ) while not self.is_stopped: logger.debug("Locking policy checking from %s" % self.policy.name) self.lock.acquire() logger.debug("Locked policy checking from %s" % self.policy.name) action = self.policy.action if action.scaling_adjustment > 0: if (len(self.service_instance.units) + action.scaling_adjustment) > self.service_instance.size.get( "max" ): logger.warning( "Check upscaling - Maximum number of unit exceeded for service instance: %s" % self.service_instance.name ) logger.debug("Release Policy lock by %s" % self.policy.name) self.lock.release() time.sleep(self.policy.period) continue if action.scaling_adjustment < 0: if (len(self.service_instance.units) + action.scaling_adjustment) < self.service_instance.size.get( "min" ): logger.warning( "Check downscaling - Minimum number of unit exceeded for service instance: %s" % self.service_instance.name ) logger.debug("Release Policy lock by %s" % self.policy.name) self.lock.release() time.sleep(self.policy.period) continue if self.service_instance.state != "UPDATING" and self.check_alarm_si(): logger.debug("Execute action: %s" % repr(self.policy.action)) if action.adjustment_type == "ChangeInCapacity": self.service_instance.state = "UPDATING" self.topology.state = "UPDATING" if action.scaling_adjustment > 0: logger.info("executing scaling out action ") info = { "so_id": "idnotusefulhere", "sm_name": "imsaas", "so_phase": "update", "phase_event": "start", "response_time": 0, "tenant": "mcntub", } self.start_time = time.time() info_json = json.dumps(info) glogger.debug(info_json) if ( len(self.service_instance.units) + action.scaling_adjustment ) <= self.service_instance.size.get("max"): for i in range(action.scaling_adjustment): _hostname = "%s-%s" % ( self.service_instance.name, str(len(self.service_instance.units) + 1), ) _state = "DEFINED" new_unit = Unit(hostname=_hostname, state=_state) new_unit.service_instance_id = self.service_instance.id self.service_instance.units.append(new_unit) self.db.persist(new_unit) else: logger.warning( "Maximum number of unit exceeded for service instance: %s" % self.service_instance.name ) else: logger.info("executing scaling in action ") if ( len(self.service_instance.units) + action.scaling_adjustment ) >= self.service_instance.size.get("min"): for i in range(-action.scaling_adjustment): removed_unit = self.remove_unit(self.topology, self.service_instance) self.db.remove(removed_unit) else: logger.warning( "Minimum number of unit exceeded for service instance: %s" % self.service_instance.name ) topology = self.db.update(self.topology) template = self.template_manager.get_template(self.topology) # logger.debug("Send update to heat template with: \n%s" % template) if action.scaling_adjustment <= 0: logger.info("provisioning the unit after scaling in operation") self.configure_after_scaling(removed_unit) try: logger.info("updating the heat template including new units") self.heat_client.update(stack_id=self.topology.ext_id, template=template) self.wait_until_final_state() logger.info("wait until final state function executed") if self.topology.state not in ["DEPLOYED", "UPDATED"]: logger.error( "ERROR: Something went wrong. Seems to be an error. Topology state -> %s" % self.topology.state ) self.lock.release() return except: self.is_stopped = True self.lock.release() logger.info("entering provisioning phase") if action.scaling_adjustment > 0: logger.info("provisioning new unit after scaling out operation") # adding relations between newly added unit and existing units from dependent services self.configure_new_unit(new_unit) logger.info("Sleeping (cooldown) for %s seconds" % self.policy.action.cooldown) time.sleep(self.policy.action.cooldown) logger.debug("Release Policy lock from %s" % self.policy.name) self.lock.release() logger.info("Sleeping (evaluation period) for %s seconds" % self.policy.period) time.sleep(self.policy.period) def configure_new_unit(self, unit): logging.info("configuring new unit with hostname %s" % unit.hostname) config = {} config["hostname"] = unit.hostname config["ips"] = unit.ips config["zabbix_ip"] = os.environ["ZABBIX_IP"] config["floating_ips"] = unit.floating_ips config["hostname"] = unit.hostname try: logging.info("sending requests to the adapter %s with config" % config) self.service_instance.adapter_instance.preinit(config) self.service_instance.adapter_instance.install(config) except Exception, e: logging.error("error while configuring vnf %s" % e) self.add_relations_after_scaling(config, unit) try: self.service_instance.adapter_instance.pre_start(config) self.service_instance.adapter_instance.start(config) except Exception, e: logging.error("error while configuring vnf %s" % e)
def update(self, new_topology, old_topology): conf = sys_util().get_sys_conf() db = FactoryAgent().get_agent(conf['database_manager']) updated_topology = old_topology updated_topology.name = new_topology.name #check for additional service instances and add them to the list of new instances appended_service_instances = [] for new_service_instance in new_topology.service_instances: is_found = False for updated_service_instance in updated_topology.service_instances: if new_service_instance.name == updated_service_instance.name: is_found = True break if not is_found: appended_service_instances.append(new_service_instance) #check for removed service instances and add it to the list of removed instances removed_service_instances = [] for updated_service_instance in updated_topology.service_instances: is_found = False for new_service_instance in new_topology.service_instances: if new_service_instance.name == updated_service_instance.name: is_found = True break if not is_found: removed_service_instances.append(updated_service_instance) #remove removed service instances for removed_service_instance in removed_service_instances: updated_topology.service_instances.remove(removed_service_instance) logger.debug('Removed ServiceInstance \"%s\" from Topology \"%s\".' % (removed_service_instance.name, updated_topology.name)) #append additional service instances for appended_service_instance in appended_service_instances: appended_service_instance.topology_id = updated_topology.id updated_topology.service_instances.append(appended_service_instance) if appended_service_instance.policies is not None: for policy in appended_service_instance.policies: db.persist(policy) db.persist(appended_service_instance) logger.debug('Appended ServiceInstance \"%s\" to Topology \"%s\".' % (appended_service_instance.name, updated_topology.name)) #Update all values for each service instance for updated_service_instance in updated_topology.service_instances: for new_service_instance in new_topology.service_instances: if updated_service_instance.name == new_service_instance.name: updated_service_instance.size = new_service_instance.size updated_service_instance.configuration = new_service_instance.configuration updated_service_instance.policies = new_service_instance.policies #updated_service_instance.service_type = new_service_instance.service_type if new_service_instance.service_type and updated_service_instance.service_type != new_service_instance.service_type: logger.warning("Cannot update service_type for %s->%s. Not Implemented." % (updated_topology.name, updated_service_instance.name)) if new_service_instance.adapter and updated_service_instance.adapter != new_service_instance.adapter: logger.warning("Cannot update adapter for %s->%s. Not Implemented." % (updated_topology.name, updated_service_instance.name)) #updated_service_instance.flavor = new_service_instance.flavor if new_service_instance.flavor and updated_service_instance.flavor.name != new_service_instance.flavor.name: logger.warning("Cannot update flavor for %s->%s. Not Implemented." % (updated_topology.name, updated_service_instance.name)) #updated_service_instance.image = new_service_instance.image if new_service_instance.image and updated_service_instance.image.name != new_service_instance.image.name: logger.warning("Cannot update image for %s->%s. Not Implemented." % (updated_topology.name, updated_service_instance.name)) #updated_service_instance.networks = new_service_instance.networks if new_service_instance.networks is not None: logger.warning("Cannot update networks for %s->%s. Not Implemented." % (updated_topology.name, updated_service_instance.name)) #updated_service_instance.requirements = new_service_instance.requirements if new_service_instance.requirements is not None: logger.warning("Cannot update networks for %s->%s. Not Implemented." % (updated_topology.name, updated_service_instance.name)) #updated_service_instance.user_data = new_service_instance.user_data if new_service_instance.user_data is not None: logger.warning("Cannot update user_data for %s->%s. Not Implemented." % (updated_topology.name, updated_service_instance.name)) if new_service_instance.key and updated_service_instance.key.name != new_service_instance.key.name: logger.warning("Cannot update key for %s->%s without replacement." % (updated_topology.name, updated_service_instance.name)) #Add or remove units according to minimal or maximal size for updated_service_instance in updated_topology.service_instances: if updated_service_instance not in appended_service_instances: if len(updated_service_instance.units) < updated_service_instance.size.get('min'): for i in range(updated_service_instance.size.get('min') - len(updated_service_instance.units)): _hostname = '%s-%s' % ( updated_service_instance.name, str(len(updated_service_instance.units) + 1)) _state = 'DEFINED' new_unit = Unit(hostname=_hostname, state=_state) new_unit.service_instance_id = updated_service_instance.id updated_service_instance.units.append(new_unit) db.persist(new_unit) if len(updated_service_instance.units) > updated_service_instance.size.get('max'): for i in range(len(updated_service_instance.units) - updated_service_instance.size.get('max')): removed_unit = updated_service_instance.units.pop(len(updated_service_instance.units) - 1) db.remove(removed_unit) return updated_topology
def delete(cls, topology): conf = sys_util().get_sys_conf() db = FactoryAgent().get_agent(conf['database_manager']) db.remove(topology) return topology