def check_alarm_si(self): LOG.debug("Checking for alarms on service instance %s" % self.service_instance.name) alarm = self.policy.alarm LOG.debug("Monitoring service: %s" % self.monitor) _sum = 0 _units_count = 0 si_avg = None LOG.debug("Requesting meter values for service instance: %s" % self.service_instance.name) for unit in self.service_instance.units: LOG.debug("Requesting meter value for unit with hostname %s, item_name %s, and period: %s" %(unit.hostname,alarm.meter_name,alarm.evaluation_periods)) item_value = self.monitor.get_item(res_id=unit.hostname, item_name=alarm.meter_name, kwargs={'period': alarm.evaluation_periods}) LOG.debug("Got item value for %s -> %s" % (unit.hostname, item_value)) if item_value: _sum += item_value _units_count += 1 else: _sum = -1 _units_count = -1 return False if _sum >= 0 and _units_count > 0: si_avg = _sum / _units_count LOG.debug("Average item value for the whole service instance group: %s -> %s" % ( self.service_instance.name, si_avg)) if not si_avg or si_avg < 0: LOG.warning( "Average item value for the whole service instance group %s was not calculated. Any Problems?" % ( self.service_instance.name)) return False # item_value = 50 if alarm.comparison_operator == '>' or alarm.comparison_operator == 'gt': LOG.debug( "Check upscaling: is the avg meter value bigger than threshold for service instance %s?" % self.service_instance.name) if si_avg > alarm.threshold: LOG.debug( "Check upscaling: avg item value is bigger than threshold for service instance %s." % self.service_instance.name) self.counter += 1 if self.counter > 4: LOG.info('Trigger the action: %s' % repr(self.policy.action)) return True else: LOG.info('Not triggering action %s since the counter is still under 3' % repr(self.policy.action)) return False else: LOG.debug( "Check upscaling: avg item value is lower than threshold for service instance %s." % self.service_instance.name) elif alarm.comparison_operator == '<' or alarm.comparison_operator == 'lt': LOG.debug( "Check downscaling: is the avg meter value lower than threshold for service instance %s." % self.service_instance.name) if si_avg < alarm.threshold: LOG.debug( "Check downscaling: item value is lower than threshold for service instance %s." % self.service_instance.name) LOG.info('Trigger the action: %s' % repr(self.policy.action)) return True else: LOG.debug( "Check downscaling: item value is bigger than threshold for service instance %s." % self.service_instance.name) LOG.debug( "Checking meter values are finished for service instance %s. Alarm was not triggered." % self.service_instance.name) return False
def list_subnet(self): res = [] try: LOG.debug('Requesting list of subnetworks...') lst = self.neutron.list_subnets() for net in lst.get('subnets'): res.append( Subnet(net.get('name'), net.get('id'), net.get('cidr'), net.get('allocation_pools')[0].get('start'), net.get('allocation_pools')[0].get('end'))) return res except Exception as e: LOG.warning("There was an error trying to collect subnets. The message is: " + e.message) raise e
def get_networks(self): res = [] try: subnets = self.list_subnet() lst = self.neutron.list_networks() for net in lst.get('networks'): n = Network(net.get('name'), net.get('id'), net.get('router:external')) for subnet in subnets: if subnet.ext_id in net.get('subnets'): n.subnets.append(subnet) res.append(n) except Exception as e: LOG.warning("There was an error while trying to connect to \"neutron\". The message is: " + e.message) return res
def active_policy_unit(self): LOG.debug("Start active_policy check") while not self.is_stopped: LOG.debug("Locking policy checking by %s" % self.policy.name) self.lock.acquire() for unit in self.service_instance.units: action = self.policy.action if action.scaling_adjustment > 0: if (len(self.service_instance.units) + action.scaling_adjustment) > self.service_instance.size.get( 'max'): LOG.warning( 'Check upscaling - Maximum number of unit exceeded for service instance: %s' % self.service_instance.name) break if action.scaling_adjustment < 0: if (len(self.service_instance.units) + action.scaling_adjustment) < self.service_instance.size.get( 'min'): LOG.warning( 'Check downscaling - Minimum number of unit exceeded for service instance: %s' % self.service_instance.name) break if self.service_instance.state != 'UPDATING' and self.check_alarm_unit(unit, self.monitor): LOG.debug('Execute action: %s' % repr(self.policy.action)) if action.adjustment_type == 'ChangeInCapacity': self.service_instance.state = 'UPDATING' self.topology.state = 'UPDATING' if action.scaling_adjustment > 0: if (len( self.service_instance.units) + action.scaling_adjustment) <= self.service_instance.size.get( 'max'): for i in range(action.scaling_adjustment): _hostname = '%s-%s' % ( self.service_instance.name, str(len(self.service_instance.units) + 1)) _state = 'Initialised' new_unit = Unit(hostname=_hostname, state=_state) self.service_instance.units.append(new_unit) else: LOG.warning( 'Maximum number of unit exceeded for service instance: %s' % self.service_instance.name) else: if (len( self.service_instance.units) + action.scaling_adjustment) >= self.service_instance.size.get( 'min'): for i in range(-action.scaling_adjustment): self.remove_unit(self.topology, self.service_instance) else: LOG.warning( 'Minimum number of unit exceeded for service instance: %s' % self.service_instance.name) try: self.db.update(self.topology) except Exception, msg: LOG.error(msg) self.topology.state='ERROR' self.topology.ext_id = None template = self.template_manager.get_template(self.topology) # LOG.debug("Send update to heat template with: \n%s" % template) self.heat_client.update(stack_id=self.topology.ext_id, template=template) LOG.info('Sleeping (cooldown) for %s seconds' % self.policy.action.cooldown) time.sleep(self.policy.action.cooldown) LOG.debug("Release Policy lock by %s" % self.policy.name) self.lock.release() LOG.info('Sleeping (evaluation period) for %s seconds' % self.policy.period) time.sleep(self.policy.period)
#Update only when the state is changed if old_state != service_instance.state: self.db.update(service_instance) def update_unit_state(self, unit, resource_details=None): if not resource_details: try: resource_details = self.heatclient.list_resources(self.topology.ext_id) LOG.debug('Resource details of %s: %s' % (self.topology.ext_name, resource_details)) except HTTPNotFound, exc: self.topology.state='DELETED' return except Exception, exc: LOG.exception(exc) self.topology.state='ERROR' for vm in resource_details: if vm.get('resource_type') == "OS::Nova::Server": if vm.get('resource_name') == unit.hostname: unit.ext_id = vm['physical_resource_id'] heat_state = vm.get('resource_status') if heat_state: _new_state = translate(heat_state, HEAT_TO_EMM_STATE) LOG.debug("State of unit %s: translate from %s to %s" % (unit.hostname, heat_state, _new_state)) if _new_state != unit.state: unit.state = _new_state self.db.update(unit) else: LOG.warning("State of unit %s: %s" % (unit.hostname, vm.get('resource_status'))) raise Exception
def check_alarm_si(self): LOG.debug("Checking for alarms on service instance %s" % self.service_instance.name) alarm = self.policy.alarm LOG.debug("Monitoring service: %s" % self.monitor) _sum = 0 _units_count = 0 si_avg = None LOG.debug("Requesting meter values for service instance: %s" % self.service_instance.name) for unit in self.service_instance.units: LOG.debug( "Requesting meter value for unit with hostname %s, item_name %s, and period: %s" % (unit.hostname, alarm.meter_name, alarm.evaluation_periods)) item_value = self.monitor.get_item( res_id=unit.hostname, item_name=alarm.meter_name, kwargs={'period': alarm.evaluation_periods}) LOG.debug("Got item value for %s -> %s" % (unit.hostname, item_value)) if item_value: _sum += item_value _units_count += 1 else: _sum = -1 _units_count = -1 return False if _sum >= 0 and _units_count > 0: si_avg = _sum / _units_count LOG.debug( "Average item value for the whole service instance group: %s -> %s" % (self.service_instance.name, si_avg)) if not si_avg or si_avg < 0: LOG.warning( "Average item value for the whole service instance group %s was not calculated. Any Problems?" % (self.service_instance.name)) return False # item_value = 50 if alarm.comparison_operator == '>' or alarm.comparison_operator == 'gt': LOG.debug( "Check upscaling: is the avg meter value bigger than threshold for service instance %s?" % self.service_instance.name) if si_avg > alarm.threshold: LOG.debug( "Check upscaling: avg item value is bigger than threshold for service instance %s." % self.service_instance.name) self.counter += 1 if self.counter > 4: LOG.info('Trigger the action: %s' % repr(self.policy.action)) return True else: LOG.info( 'Not triggering action %s since the counter is still under 3' % repr(self.policy.action)) return False else: LOG.debug( "Check upscaling: avg item value is lower than threshold for service instance %s." % self.service_instance.name) elif alarm.comparison_operator == '<' or alarm.comparison_operator == 'lt': LOG.debug( "Check downscaling: is the avg meter value lower than threshold for service instance %s." % self.service_instance.name) if si_avg < alarm.threshold: LOG.debug( "Check downscaling: item value is lower than threshold for service instance %s." % self.service_instance.name) LOG.info('Trigger the action: %s' % repr(self.policy.action)) return True else: LOG.debug( "Check downscaling: item value is bigger than threshold for service instance %s." % self.service_instance.name) LOG.debug( "Checking meter values are finished for service instance %s. Alarm was not triggered." % self.service_instance.name) return False
def start_policy_checker_si(self): LOG.debug( "Start active_policy check for policy %s on service instance %s" % (self.policy.name, self.service_instance.name)) while not self.is_stopped: LOG.debug("Locking policy checking from %s" % self.policy.name) self.lock.acquire() LOG.debug("Locked policy checking from %s" % self.policy.name) action = self.policy.action if action.scaling_adjustment > 0: if (len(self.service_instance.units) + action.scaling_adjustment ) > self.service_instance.size.get('max'): LOG.warning( 'Check upscaling - Maximum number of unit exceeded for service instance: %s' % self.service_instance.name) LOG.debug("Release Policy lock by %s" % self.policy.name) self.lock.release() time.sleep(self.policy.period) continue if action.scaling_adjustment < 0: if (len(self.service_instance.units) + action.scaling_adjustment ) < self.service_instance.size.get('min'): LOG.warning( 'Check downscaling - Minimum number of unit exceeded for service instance: %s' % self.service_instance.name) LOG.debug("Release Policy lock by %s" % self.policy.name) self.lock.release() time.sleep(self.policy.period) continue if self.service_instance.state != 'UPDATING' and self.check_alarm_si( ): LOG.debug('Execute action: %s' % repr(self.policy.action)) if action.adjustment_type == 'ChangeInCapacity': self.service_instance.state = 'UPDATING' self.topology.state = 'UPDATING' if action.scaling_adjustment > 0: if (len(self.service_instance.units) + action.scaling_adjustment ) <= self.service_instance.size.get('max'): for i in range(action.scaling_adjustment): _hostname = '%s-%s' % ( self.service_instance.name, str(len(self.service_instance.units) + 1)) _state = 'DEFINED' new_unit = Unit(hostname=_hostname, state=_state) new_unit.service_instance_id = self.service_instance.id self.service_instance.units.append(new_unit) self.db.persist(new_unit) else: LOG.warning( 'Maximum number of unit exceeded for service instance: %s' % self.service_instance.name) else: if (len(self.service_instance.units) + action.scaling_adjustment ) >= self.service_instance.size.get('min'): for i in range(-action.scaling_adjustment): removed_unit = self.remove_unit( self.topology, self.service_instance) self.db.remove(removed_unit) else: LOG.warning( 'Minimum number of unit exceeded for service instance: %s' % self.service_instance.name) topology = self.db.update(self.topology) template = self.template_manager.get_template( self.topology) # LOG.debug("Send update to heat template with: \n%s" % template) try: self.heat_client.update(stack_id=self.topology.ext_id, template=template) self.wait_until_final_state() if not self.topology.state == 'DEPLOYED': LOG.error( "ERROR: Something went wrong. Seems to be an error. Topology state -> %s" % self.topology.state) self.lock.release() return except: self.is_stopped = True self.lock.release() LOG.info('Sleeping (cooldown) for %s seconds' % self.policy.action.cooldown) time.sleep(self.policy.action.cooldown) LOG.debug("Release Policy lock from %s" % self.policy.name) self.lock.release() LOG.info('Sleeping (evaluation period) for %s seconds' % self.policy.period) time.sleep(self.policy.period)
def active_policy_unit(self): LOG.debug("Start active_policy check") while not self.is_stopped: LOG.debug("Locking policy checking by %s" % self.policy.name) self.lock.acquire() for unit in self.service_instance.units: action = self.policy.action if action.scaling_adjustment > 0: if (len(self.service_instance.units) + action.scaling_adjustment ) > self.service_instance.size.get('max'): LOG.warning( 'Check upscaling - Maximum number of unit exceeded for service instance: %s' % self.service_instance.name) break if action.scaling_adjustment < 0: if (len(self.service_instance.units) + action.scaling_adjustment ) < self.service_instance.size.get('min'): LOG.warning( 'Check downscaling - Minimum number of unit exceeded for service instance: %s' % self.service_instance.name) break if self.service_instance.state != 'UPDATING' and self.check_alarm_unit( unit, self.monitor): LOG.debug('Execute action: %s' % repr(self.policy.action)) if action.adjustment_type == 'ChangeInCapacity': self.service_instance.state = 'UPDATING' self.topology.state = 'UPDATING' if action.scaling_adjustment > 0: if (len(self.service_instance.units) + action.scaling_adjustment ) <= self.service_instance.size.get('max'): for i in range(action.scaling_adjustment): _hostname = '%s-%s' % ( self.service_instance.name, str( len(self.service_instance.units) + 1)) _state = 'Initialised' new_unit = Unit(hostname=_hostname, state=_state) self.service_instance.units.append( new_unit) else: LOG.warning( 'Maximum number of unit exceeded for service instance: %s' % self.service_instance.name) else: if (len(self.service_instance.units) + action.scaling_adjustment ) >= self.service_instance.size.get('min'): for i in range(-action.scaling_adjustment): self.remove_unit(self.topology, self.service_instance) else: LOG.warning( 'Minimum number of unit exceeded for service instance: %s' % self.service_instance.name) try: self.db.update(self.topology) except Exception, msg: LOG.error(msg) self.topology.state = 'ERROR' self.topology.ext_id = None template = self.template_manager.get_template( self.topology) # LOG.debug("Send update to heat template with: \n%s" % template) self.heat_client.update(stack_id=self.topology.ext_id, template=template) LOG.info('Sleeping (cooldown) for %s seconds' % self.policy.action.cooldown) time.sleep(self.policy.action.cooldown) LOG.debug("Release Policy lock by %s" % self.policy.name) self.lock.release() LOG.info('Sleeping (evaluation period) for %s seconds' % self.policy.period) time.sleep(self.policy.period)
def update_unit_state(self, unit, resource_details=None): if not resource_details: try: resource_details = self.heatclient.list_resources( self.topology.ext_id) LOG.debug('Resource details of %s: %s' % (self.topology.ext_name, resource_details)) except HTTPNotFound, exc: self.topology.state = 'DELETED' return except Exception, exc: LOG.exception(exc) self.topology.state = 'ERROR' for vm in resource_details: if vm.get('resource_type') == "OS::Nova::Server": if vm.get('resource_name') == unit.hostname: unit.ext_id = vm['physical_resource_id'] heat_state = vm.get('resource_status') if heat_state: _new_state = translate(heat_state, HEAT_TO_EMM_STATE) LOG.debug("State of unit %s: translate from %s to %s" % (unit.hostname, heat_state, _new_state)) if _new_state != unit.state: unit.state = _new_state self.db.update(unit) else: LOG.warning("State of unit %s: %s" % (unit.hostname, vm.get('resource_status'))) raise Exception
def init_sys(self): LOG.info("Starting the System") LOG.debug('Creating and removing the tables') LOG.debug('getting the DbManager') LOG.debug('Retrieving the System Configurations') sys_config.props = {} sys_config.name = 'SystemConfiguration' self._read_properties(sys_config.props) db = FactoryAgent.FactoryAgent().get_agent(agent=sys_config.props['database_manager']) if sys_config.props['create_tables'] == 'True': db.create_tables() old_cfg = db.get_by_name(Configuration, sys_config.name) if old_cfg: old_cfg[0].props = sys_config.props db.update(old_cfg[0]) else: db.persist(sys_config) try: #Persist and update networks on database nets = get_networks() #remove old networks available_network_names = [net.name for net in nets] persisted_nets = db.get_all(Network) for persisted_net in persisted_nets: if persisted_net.name not in available_network_names: db.remove(persisted_net) #update existing networks for net in nets: nets = db.get_by_name(Network, net.name) # if len(nets) >= 1: # existing_net = nets[0] # net.id = existing_net.id # for subnet in net.subnets: # for existing_subnet in existing_net.subnets: # if subnet.name == existing_subnet.name: # subnet.id = existing_subnet.id # existing_subnet = subnet # db.update(existing_subnet) # existing_net = net # db.update(existing_net) # else: try: db.persist(net) except IntegrityError, exc: LOG.warning('Network \"%s\" is already persisted on the Database.' % net.name) #Persist and update keys on database keys = get_keys() #remove old keys available_key_names = [key.name for key in keys] persisted_keys = db.get_all(Key) for persisted_key in persisted_keys: if persisted_key.name not in available_key_names: db.remove(persisted_key) #update existing keys for key in keys: keys = db.get_by_name(Key, key.name) if len(keys) >= 1: existing_key = keys[0] key.id = existing_key.id existing_key = key db.update(existing_key) else: try: db.persist(key) except IntegrityError, exc: LOG.warning('Key \"%s\" is already persisted on the Database.' % key.name)
def init_sys(self): LOG.info("Starting the System") LOG.debug('Creating and removing the tables') LOG.debug('getting the DbManager') LOG.debug('Retrieving the System Configurations') sys_config.props = {} sys_config.name = 'SystemConfiguration' self._read_properties(sys_config.props) db = FactoryAgent.FactoryAgent().get_agent( agent=sys_config.props['database_manager']) if sys_config.props['create_tables'] == 'True': db.create_tables() old_cfg = db.get_by_name(Configuration, sys_config.name) if old_cfg: old_cfg[0].props = sys_config.props db.update(old_cfg[0]) else: db.persist(sys_config) try: #Persist and update networks on database nets = get_networks() #remove old networks available_network_names = [net.name for net in nets] persisted_nets = db.get_all(Network) for persisted_net in persisted_nets: if persisted_net.name not in available_network_names: db.remove(persisted_net) #update existing networks for net in nets: nets = db.get_by_name(Network, net.name) # if len(nets) >= 1: # existing_net = nets[0] # net.id = existing_net.id # for subnet in net.subnets: # for existing_subnet in existing_net.subnets: # if subnet.name == existing_subnet.name: # subnet.id = existing_subnet.id # existing_subnet = subnet # db.update(existing_subnet) # existing_net = net # db.update(existing_net) # else: try: db.persist(net) except IntegrityError, exc: LOG.warning( 'Network \"%s\" is already persisted on the Database.' % net.name) #Persist and update keys on database keys = get_keys() #remove old keys available_key_names = [key.name for key in keys] persisted_keys = db.get_all(Key) for persisted_key in persisted_keys: if persisted_key.name not in available_key_names: db.remove(persisted_key) #update existing keys for key in keys: keys = db.get_by_name(Key, key.name) if len(keys) >= 1: existing_key = keys[0] key.id = existing_key.id existing_key = key db.update(existing_key) else: try: db.persist(key) except IntegrityError, exc: LOG.warning( 'Key \"%s\" is already persisted on the Database.' % key.name)
class SysUtil: def print_logo(self): LOG.info('\n' + '$$$$$$\ $$\ $$\ $$$$$$\ $$$$$$\ $$$$$$\ \n' '\_$$ _|$$$\ $$$ |$$ __$$\ $$ __$$\ $$ __$$\n' ' $$ | $$$$\ $$$$ |$$ / \__|$$ / \__|$$ / $$\n' ' $$ | $$\$$\$$ $$ |\$$$$$$\ \$$$$$$\ $$ | $$ |\n' ' $$ | $$ \$$$ $$ | \____$$\ \____$$\ $$ | $$ |\n' ' $$ | $$ |\$ /$$ |$$\ $$ |$$\ $$ |$$ | $$ |\n' '$$$$$$\ $$ | \_/ $$ |\$$$$$$ |\$$$$$$ | $$$$$$ |\n' '\______|\__| \__| \______/ \______/ \______/ \n') def _read_properties(self, props={}): with open('%s/etc/maas.properties' % PATH, 'r') as f: LOG.debug("Using %s/emm.properties file" % PATH) for line in f: line = line.rstrip() if "=" not in line: continue if line.startswith("#"): continue k, v = line.split("=", 1) props[k] = v def init_sys(self): LOG.info("Starting the System") LOG.debug('Creating and removing the tables') LOG.debug('getting the DbManager') LOG.debug('Retrieving the System Configurations') sys_config.props = {} sys_config.name = 'SystemConfiguration' self._read_properties(sys_config.props) db = FactoryAgent.FactoryAgent().get_agent( agent=sys_config.props['database_manager']) if sys_config.props['create_tables'] == 'True': db.create_tables() old_cfg = db.get_by_name(Configuration, sys_config.name) if old_cfg: old_cfg[0].props = sys_config.props db.update(old_cfg[0]) else: db.persist(sys_config) try: #Persist and update networks on database nets = get_networks() #remove old networks available_network_names = [net.name for net in nets] persisted_nets = db.get_all(Network) for persisted_net in persisted_nets: if persisted_net.name not in available_network_names: db.remove(persisted_net) #update existing networks for net in nets: nets = db.get_by_name(Network, net.name) # if len(nets) >= 1: # existing_net = nets[0] # net.id = existing_net.id # for subnet in net.subnets: # for existing_subnet in existing_net.subnets: # if subnet.name == existing_subnet.name: # subnet.id = existing_subnet.id # existing_subnet = subnet # db.update(existing_subnet) # existing_net = net # db.update(existing_net) # else: try: db.persist(net) except IntegrityError, exc: LOG.warning( 'Network \"%s\" is already persisted on the Database.' % net.name) #Persist and update keys on database keys = get_keys() #remove old keys available_key_names = [key.name for key in keys] persisted_keys = db.get_all(Key) for persisted_key in persisted_keys: if persisted_key.name not in available_key_names: db.remove(persisted_key) #update existing keys for key in keys: keys = db.get_by_name(Key, key.name) if len(keys) >= 1: existing_key = keys[0] key.id = existing_key.id existing_key = key db.update(existing_key) else: try: db.persist(key) except IntegrityError, exc: LOG.warning( 'Key \"%s\" is already persisted on the Database.' % key.name) #Persist and update flavors on database flavors = get_flavors() #remove old flavors available_flavor_names = [flavor.name for flavor in flavors] persisted_flavors = db.get_all(Flavor) for persisted_flavor in persisted_flavors: if persisted_flavor.name not in available_flavor_names: db.remove(persisted_flavor) #update existing flavors for flavor in flavors: flavors = db.get_by_name(Flavor, flavor.name) if len(flavors) >= 1: existing_flavor = flavors[0] flavor.id = existing_flavor.id existing_flavor = flavor db.update(existing_flavor) else: try: db.persist(flavor) except IntegrityError, exc: LOG.warning( 'Flavor \"%s\" is already persisted on the Database. Trigger update.' % flavor.name)
if persisted_image.name not in available_image_names: db.remove(persisted_image) #update existing images for image in images: images = db.get_by_name(Image, image.name) if len(images) >= 1: existing_image = images[0] image.id = existing_image.id existing_image = image db.update(existing_image) else: try: db.persist(image) except IntegrityError, exc: LOG.warning( 'Image \"%s\" is already persisted on the Database. Trigger update.' % image.name) all_quotas = db.get_all(Quotas) all_quotas_tenants = [quotas.tenant_id for quotas in all_quotas] new_quotas = get_quotas() #update existing quotas for quotas in all_quotas: if new_quotas.tenant_id in all_quotas_tenants: new_quotas.id = quotas.id quotas = new_quotas db.update(quotas) else: try: db.persist(new_quotas) except IntegrityError, exc:
def update(self, new_topology, old_topology): conf = sys_util().get_sys_conf() db = FactoryAgent().get_agent(conf['database_manager']) updated_topology = old_topology updated_topology.name = new_topology.name #check for additional service instances and add them to the list of new instances appended_service_instances = [] for new_service_instance in new_topology.service_instances: is_found = False for updated_service_instance in updated_topology.service_instances: if new_service_instance.name == updated_service_instance.name: is_found = True break if not is_found: appended_service_instances.append(new_service_instance) #check for removed service instances and add it to the list of removed instances removed_service_instances = [] for updated_service_instance in updated_topology.service_instances: is_found = False for new_service_instance in new_topology.service_instances: if new_service_instance.name == updated_service_instance.name: is_found = True break if not is_found: removed_service_instances.append(updated_service_instance) #remove removed service instances for removed_service_instance in removed_service_instances: updated_topology.service_instances.remove(removed_service_instance) LOG.debug('Removed ServiceInstance \"%s\" from Topology \"%s\".' % (removed_service_instance.name, updated_topology.name)) #append additional service instances for appended_service_instance in appended_service_instances: appended_service_instance.topology_id = updated_topology.id updated_topology.service_instances.append(appended_service_instance) if appended_service_instance.policies is not None: for policy in appended_service_instance.policies: db.persist(policy) db.persist(appended_service_instance) LOG.debug('Appended ServiceInstance \"%s\" to Topology \"%s\".' % (appended_service_instance.name, updated_topology.name)) #Update all values for each service instance for updated_service_instance in updated_topology.service_instances: for new_service_instance in new_topology.service_instances: if updated_service_instance.name == new_service_instance.name: updated_service_instance.size = new_service_instance.size updated_service_instance.configuration = new_service_instance.configuration updated_service_instance.policies = new_service_instance.policies #updated_service_instance.service_type = new_service_instance.service_type if new_service_instance.service_type and updated_service_instance.service_type != new_service_instance.service_type: LOG.warning("Cannot update service_type for %s->%s. Not Implemented." % (updated_topology.name, updated_service_instance.name)) if new_service_instance.adapter and updated_service_instance.adapter != new_service_instance.adapter: LOG.warning("Cannot update adapter for %s->%s. Not Implemented." % (updated_topology.name, updated_service_instance.name)) #updated_service_instance.flavor = new_service_instance.flavor if new_service_instance.flavor and updated_service_instance.flavor.name != new_service_instance.flavor.name: LOG.warning("Cannot update flavor for %s->%s. Not Implemented." % (updated_topology.name, updated_service_instance.name)) #updated_service_instance.image = new_service_instance.image if new_service_instance.image and updated_service_instance.image.name != new_service_instance.image.name: LOG.warning("Cannot update image for %s->%s. Not Implemented." % (updated_topology.name, updated_service_instance.name)) #updated_service_instance.networks = new_service_instance.networks if new_service_instance.networks is not None: LOG.warning("Cannot update networks for %s->%s. Not Implemented." % (updated_topology.name, updated_service_instance.name)) #updated_service_instance.requirements = new_service_instance.requirements if new_service_instance.requirements is not None: LOG.warning("Cannot update networks for %s->%s. Not Implemented." % (updated_topology.name, updated_service_instance.name)) #updated_service_instance.user_data = new_service_instance.user_data if new_service_instance.user_data is not None: LOG.warning("Cannot update user_data for %s->%s. Not Implemented." % (updated_topology.name, updated_service_instance.name)) if new_service_instance.key and updated_service_instance.key.name != new_service_instance.key.name: LOG.warning("Cannot update key for %s->%s without replacement." % (updated_topology.name, updated_service_instance.name)) #Add or remove units according to minimal or maximal size for updated_service_instance in updated_topology.service_instances: if updated_service_instance not in appended_service_instances: if len(updated_service_instance.units) < updated_service_instance.size.get('min'): for i in range(updated_service_instance.size.get('min') - len(updated_service_instance.units)): _hostname = '%s-%s' % ( updated_service_instance.name, str(len(updated_service_instance.units) + 1)) _state = 'DEFINED' new_unit = Unit(hostname=_hostname, state=_state) new_unit.service_instance_id = updated_service_instance.id updated_service_instance.units.append(new_unit) db.persist(new_unit) if len(updated_service_instance.units) > updated_service_instance.size.get('max'): for i in range(len(updated_service_instance.units) - updated_service_instance.size.get('max')): removed_unit = updated_service_instance.units.pop(len(updated_service_instance.units) - 1) db.remove(removed_unit) return updated_topology
def start_policy_checker_si(self): LOG.debug("Start active_policy check for policy %s on service instance %s" % ( self.policy.name, self.service_instance.name)) while not self.is_stopped: LOG.debug("Locking policy checking from %s" % self.policy.name) self.lock.acquire() LOG.debug("Locked policy checking from %s" % self.policy.name) action = self.policy.action if action.scaling_adjustment > 0: if (len(self.service_instance.units) + action.scaling_adjustment) > self.service_instance.size.get( 'max'): LOG.warning( 'Check upscaling - Maximum number of unit exceeded for service instance: %s' % self.service_instance.name) LOG.debug("Release Policy lock by %s" % self.policy.name) self.lock.release() time.sleep(self.policy.period) continue if action.scaling_adjustment < 0: if (len(self.service_instance.units) + action.scaling_adjustment) < self.service_instance.size.get( 'min'): LOG.warning( 'Check downscaling - Minimum number of unit exceeded for service instance: %s' % self.service_instance.name) LOG.debug("Release Policy lock by %s" % self.policy.name) self.lock.release() time.sleep(self.policy.period) continue if self.service_instance.state != 'UPDATING' and self.check_alarm_si(): LOG.debug('Execute action: %s' % repr(self.policy.action)) if action.adjustment_type == 'ChangeInCapacity': self.service_instance.state = 'UPDATING' self.topology.state = 'UPDATING' if action.scaling_adjustment > 0: if (len( self.service_instance.units) + action.scaling_adjustment) <= self.service_instance.size.get( 'max'): for i in range(action.scaling_adjustment): _hostname = '%s-%s' % ( self.service_instance.name, str(len(self.service_instance.units) + 1)) _state = 'DEFINED' new_unit = Unit(hostname=_hostname, state=_state) new_unit.service_instance_id = self.service_instance.id self.service_instance.units.append(new_unit) self.db.persist(new_unit) else: LOG.warning( 'Maximum number of unit exceeded for service instance: %s' % self.service_instance.name) else: if (len( self.service_instance.units) + action.scaling_adjustment) >= self.service_instance.size.get( 'min'): for i in range(-action.scaling_adjustment): removed_unit = self.remove_unit(self.topology, self.service_instance) self.db.remove(removed_unit) else: LOG.warning( 'Minimum number of unit exceeded for service instance: %s' % self.service_instance.name) topology = self.db.update(self.topology) template = self.template_manager.get_template(self.topology) # LOG.debug("Send update to heat template with: \n%s" % template) try: self.heat_client.update(stack_id=self.topology.ext_id, template=template) self.wait_until_final_state() if not self.topology.state == 'DEPLOYED': LOG.error( "ERROR: Something went wrong. Seems to be an error. Topology state -> %s" % self.topology.state) self.lock.release() return except: self.is_stopped = True self.lock.release() LOG.info('Sleeping (cooldown) for %s seconds' % self.policy.action.cooldown) time.sleep(self.policy.action.cooldown) LOG.debug("Release Policy lock from %s" % self.policy.name) self.lock.release() LOG.info('Sleeping (evaluation period) for %s seconds' % self.policy.period) time.sleep(self.policy.period)
def update(self, new_topology, old_topology): conf = sys_util().get_sys_conf() db = FactoryAgent().get_agent(conf['database_manager']) updated_topology = old_topology updated_topology.name = new_topology.name #check for additional service instances and add them to the list of new instances appended_service_instances = [] for new_service_instance in new_topology.service_instances: is_found = False for updated_service_instance in updated_topology.service_instances: if new_service_instance.name == updated_service_instance.name: is_found = True break if not is_found: appended_service_instances.append(new_service_instance) #check for removed service instances and add it to the list of removed instances removed_service_instances = [] for updated_service_instance in updated_topology.service_instances: is_found = False for new_service_instance in new_topology.service_instances: if new_service_instance.name == updated_service_instance.name: is_found = True break if not is_found: removed_service_instances.append(updated_service_instance) #remove removed service instances for removed_service_instance in removed_service_instances: updated_topology.service_instances.remove(removed_service_instance) LOG.debug('Removed ServiceInstance \"%s\" from Topology \"%s\".' % (removed_service_instance.name, updated_topology.name)) #append additional service instances for appended_service_instance in appended_service_instances: appended_service_instance.topology_id = updated_topology.id updated_topology.service_instances.append( appended_service_instance) if appended_service_instance.policies is not None: for policy in appended_service_instance.policies: db.persist(policy) db.persist(appended_service_instance) LOG.debug('Appended ServiceInstance \"%s\" to Topology \"%s\".' % (appended_service_instance.name, updated_topology.name)) #Update all values for each service instance for updated_service_instance in updated_topology.service_instances: for new_service_instance in new_topology.service_instances: if updated_service_instance.name == new_service_instance.name: updated_service_instance.size = new_service_instance.size updated_service_instance.configuration = new_service_instance.configuration updated_service_instance.policies = new_service_instance.policies #updated_service_instance.service_type = new_service_instance.service_type if new_service_instance.service_type and updated_service_instance.service_type != new_service_instance.service_type: LOG.warning( "Cannot update service_type for %s->%s. Not Implemented." % (updated_topology.name, updated_service_instance.name)) if new_service_instance.adapter and updated_service_instance.adapter != new_service_instance.adapter: LOG.warning( "Cannot update adapter for %s->%s. Not Implemented." % (updated_topology.name, updated_service_instance.name)) #updated_service_instance.flavor = new_service_instance.flavor if new_service_instance.flavor and updated_service_instance.flavor.name != new_service_instance.flavor.name: LOG.warning( "Cannot update flavor for %s->%s. Not Implemented." % (updated_topology.name, updated_service_instance.name)) #updated_service_instance.image = new_service_instance.image if new_service_instance.image and updated_service_instance.image.name != new_service_instance.image.name: LOG.warning( "Cannot update image for %s->%s. Not Implemented." % (updated_topology.name, updated_service_instance.name)) #updated_service_instance.networks = new_service_instance.networks if new_service_instance.networks is not None: LOG.warning( "Cannot update networks for %s->%s. Not Implemented." % (updated_topology.name, updated_service_instance.name)) #updated_service_instance.requirements = new_service_instance.requirements if new_service_instance.requirements is not None: LOG.warning( "Cannot update networks for %s->%s. Not Implemented." % (updated_topology.name, updated_service_instance.name)) #updated_service_instance.user_data = new_service_instance.user_data if new_service_instance.user_data is not None: LOG.warning( "Cannot update user_data for %s->%s. Not Implemented." % (updated_topology.name, updated_service_instance.name)) if new_service_instance.key and updated_service_instance.key.name != new_service_instance.key.name: LOG.warning( "Cannot update key for %s->%s without replacement." % (updated_topology.name, updated_service_instance.name)) #Add or remove units according to minimal or maximal size for updated_service_instance in updated_topology.service_instances: if updated_service_instance not in appended_service_instances: if len(updated_service_instance.units ) < updated_service_instance.size.get('min'): for i in range( updated_service_instance.size.get('min') - len(updated_service_instance.units)): _hostname = '%s-%s' % ( updated_service_instance.name, str(len(updated_service_instance.units) + 1)) _state = 'DEFINED' new_unit = Unit(hostname=_hostname, state=_state) new_unit.service_instance_id = updated_service_instance.id updated_service_instance.units.append(new_unit) db.persist(new_unit) if len(updated_service_instance.units ) > updated_service_instance.size.get('max'): for i in range( len(updated_service_instance.units) - updated_service_instance.size.get('max')): removed_unit = updated_service_instance.units.pop( len(updated_service_instance.units) - 1) db.remove(removed_unit) return updated_topology
for persisted_image in persisted_images: if persisted_image.name not in available_image_names: db.remove(persisted_image) #update existing images for image in images: images = db.get_by_name(Image, image.name) if len(images) >= 1: existing_image = images[0] image.id = existing_image.id existing_image = image db.update(existing_image) else: try: db.persist(image) except IntegrityError, exc: LOG.warning('Image \"%s\" is already persisted on the Database. Trigger update.' % image.name) all_quotas = db.get_all(Quotas) all_quotas_tenants = [quotas.tenant_id for quotas in all_quotas] new_quotas = get_quotas() #update existing quotas for quotas in all_quotas: if new_quotas.tenant_id in all_quotas_tenants: new_quotas.id = quotas.id quotas = new_quotas db.update(quotas) else: try: db.persist(new_quotas) except IntegrityError, exc: LOG.warning('Network \"%s\" are already persisted on the Database. Trigger update.' % get_quotas())