def checkPoliciesUnqiueness(policies): LOG.debug("Check uniqueness of policies.") for policy in policies: for comp_policy in policies: if policy.name == comp_policy and policy != comp_policy: raise NotUniqueException("policy:\"%s\" is not unique." % policy.name) LOG.debug("policy \"%s\" is unique." % policy.name)
def checkTopolgoyUniqueness(topology): db = DatabaseManager() # check names LOG.debug("Check uniqueness of name of the toplogy \"%s\"." % topology.name) for top in db.get_all(Topology): if topology.ext_name == top.ext_name and topology.id != top.id: raise NotUniqueException("Topology name \"%s\" is already used." % topology.name)
def checkNetworksUniqueness(networks): LOG.debug("\"Check uniqueness of networks.\"") for network in networks: for comp_network in networks: if network.name == comp_network.name and network != comp_network: raise NotUniqueException("network:\"%s\" is not unique." % network.name) LOG.debug("network \"%s\" is unique." % network.name)
def update(self, attributes): """ It configures a running EPCaaS using parameters passed by SM. :param attributes: is a dict that contains the OCCI attributes passed by SM (X-OCCI-ATTRIBUTE) """ LOG.debug("Executing update/provisioning logic") stack_state, stack_id, output = self.state() if not (stack_state == "CREATE_COMPLETE" or stack_state == "UPDATE_COMPLETE"): LOG.debug("Stack is not in a stable state a.t.m.. Retry") return False confg = Configurator( token=self.token, tenant=self.tenant_name, region=self.region_name, sm_parameters=self.sm_parameters, conf_param=self.conf_param, attributes=attributes, epc_config=self.epc_config, output=output, event=self.event, ) # XXX no management of this thread is done confg.start()
def bill_start_events(self, client, log_server): start_billing_query = SearchQuery( search_range=self.sr, query='phase_event:done AND so_phase:provision') try: start_results = log_server.search(start_billing_query) LOG.debug('Number of start billing events found: ' + str(len(start_results.messages))) for start_event in start_results.messages: rcb_message = {} start_message = json.loads(start_event.message) rcb_message['service_type'] = start_message.get( 'sm_name', 'none') rcb_message['instance_id'] = start_message.get('so_id', 'none') rcb_message['tenant_id'] = start_message.get( 'tenant', 'mcntub') rcb_message['status'] = 'start' LOG.debug('Sending start billing event to RCB: ' + rcb_message.__repr__()) promise = client.basic_publish(exchange='mcn', routing_key='events', body=json.dumps(rcb_message)) client.wait(promise) except Exception as e: LOG.error( 'Cannot issue query to the log service to extract start events.' ) raise e
def __init__(self, so_e, token, tenant_name, ready_event, stop_event): # super(EPCSODecision, self).__init__(so_e, token, tenant_name) service_orchestrator.Decision.__init__(self, so_e, token, tenant_name) threading.Thread.__init__(self) self.ready_event = ready_event self.stop_event = stop_event self.so_e = so_e self.token = token self.pol_eng = PolicyEngine() self.scaling_allowed = True # self.scaleout_triggered = True # self.scaleout_success = True # self.scalein_triggered = True # self.scalein_success = True # time.time() when last MME scaling (up/down) occured self.last_mme_scaling_time = 0 # time.time() when last GW scaling (up/down) occured self.last_gw_scaling_time = 0 self.num_of_mme = 1 self.num_of_gw = 1 LOG.debug("EPC SO Decision init")
def update(self, provisioning=False, attributes=None): """ deploy updated SICs. """ LOG.debug('Executing update deployment logic') # Check if attributes are being updated if attributes: if 'mcn.endpoint.maas' in attributes: self.maas_endpoint = str(attributes['mcn.endpoint.maas']) if 'mcn.endpoint.mobaas' in attributes: self.mobaas_endpoint = str(attributes['mcn.endpoint.mobaas']) # Get new template generator = icnaas.template_generator.ICNaaSTemplateGenerator(self.routers, self.maas_endpoint, \ self.mobaas_endpoint) template = generator.generate(provisioning, FMC_ENABLED) # Wait for any pending operation to complete while (True): if self.stack_id is not None: tmp = self.deployer.details(self.stack_id, self.token) if tmp['state'] == 'CREATE_COMPLETE' or tmp[ 'state'] == 'UPDATE_COMPLETE': break else: time.sleep(10) # Deploy new template if self.stack_id is not None: self.deployer.update(self.stack_id, template, self.token) # Mark as updated for SOD self.updated = True
def run(self): """ Decision part implementation goes here. """ while True: # It is unlikely that logic executed will be of any use until the provisioning phase has completed LOG.debug('Waiting for deploy and provisioning to finish') self.event.wait() LOG.debug('Starting runtime logic...') # Decision logic # Until service instance is destroyed while self.so_e.stack_id is not None: # Check if update is complete while True: tmp = self.so_e.deployer.details(self.so_e.stack_id, self.so_e.token) if tmp['state'] == 'UPDATE_COMPLETE': break else: time.sleep(10) # Set updated back to False self.so_e.updated = False # Update the information about CCNx routers self.so_e.state() # Then, attempt to connect to MaaS self.monitor = icnaas.monitor.ICNaaSMonitorCCNRouter( self.so_e.maas_endpoint) # Afterwards, keep checking the metrics until service is updated while not self.so_e.updated: self.check_metrics() for i in range(0, 6): if self.so_e.updated: break time.sleep(10) self.event = ready_event
def run(self): """ Decision part implementation goes here. """ while True: # It is unlikely that logic executed will be of any use until the provisioning phase has completed LOG.debug('Waiting for deploy and provisioning to finish') self.event.wait() LOG.debug('Starting runtime logic...') # Decision logic # Until service instance is destroyed while self.so_e.stack_id is not None: # Check if update is complete while True: tmp = self.so_e.deployer.details(self.so_e.stack_id, self.so_e.token) if tmp['state'] == 'UPDATE_COMPLETE': break else: time.sleep(10) # Set updated back to False self.so_e.updated = False # Update the information about CCNx routers self.so_e.state() # Then, attempt to connect to MaaS self.monitor = icnaas.monitor.ICNaaSMonitorCCNRouter(self.so_e.maas_endpoint) # Afterwards, keep checking the metrics until service is updated while not self.so_e.updated: self.check_metrics() for i in range(0, 6): if self.so_e.updated: break time.sleep(10) self.event = ready_event
def update(self, provisioning = False, attributes = None): """ deploy updated SICs. """ LOG.debug('Executing update deployment logic') # Check if attributes are being updated if attributes: if 'mcn.endpoint.maas' in attributes: self.maas_endpoint = str(attributes['mcn.endpoint.maas']) if 'mcn.endpoint.mobaas' in attributes: self.mobaas_endpoint = str(attributes['mcn.endpoint.mobaas']) # Get new template generator = icnaas.template_generator.ICNaaSTemplateGenerator(self.routers, self.maas_endpoint, \ self.mobaas_endpoint) template = generator.generate(provisioning, FMC_ENABLED) # Wait for any pending operation to complete while (True): if self.stack_id is not None: tmp = self.deployer.details(self.stack_id, self.token) if tmp['state'] == 'CREATE_COMPLETE' or tmp['state'] == 'UPDATE_COMPLETE': break else: time.sleep(10) # Deploy new template if self.stack_id is not None: self.deployer.update(self.stack_id, template, self.token) # Mark as updated for SOD self.updated = True
def checkRequirementsDependencies(requirements=[], service_instances=[]): for requirement in requirements: try: counter = 0 LOG.debug("Check dependencies for requirement \"%s\"." % requirement.name) for service_instance in service_instances: if requirement.source == service_instance.name: LOG.debug("source \"%s\" was found." % requirement.source) if requirement.parameter == 'private_ip' or requirement.parameter == 'public_ip': LOG.debug("parameter \"%s\" is available." % requirement.parameter) for obj in service_instance.networks: if requirement.obj_name == obj.name: LOG.debug("obj_name \"%s\" was found." % requirement.obj_name) counter += 1 else: raise InvalidInputException("parameter:\"%s\" is not available." % requirement.parameter) if counter == 0: raise NotFoundException("requirement:\"%s\" was not found (\"source:%s\", \"obj_name:%s\")." % ( requirement.name, requirement.source, requirement.obj_name)) elif counter == 1: LOG.debug("requirement \"%s\" is valid." % requirement.name) else: raise InvalidInputException("is not valid. Found sources or objects several times.") except Exception, exc: exc.message = 'Requirement:\"%s\"->%s' % (requirement.name, exc.message) raise exc
def __init__(self, token, tenant): # this python thread event is used to notify the SOD that the runtime phase can execute its logic self.event = threading.Event() self.so_e = SOE(token=token, tenant=tenant, ready_event=self.event) self.so_d = SOD(so_e=self.so_e, token=token, ready_event=self.event) LOG.debug('Starting SOD thread...') self.so_d.start()
def dispose(self): LOG.info( "disposal of stack" ); """ Dispose SICs on burns and ubern """ LOG.info('Calling dispose') # self.resolver.dispose() if self.hadoop_master is not None: ################################################################### # IMPORTANT NOTE: the floating IP has to be disassociated before # # the stack is deleted! Else, disposal will fail. Until now, # # neutronclient hasn't worked doing this which is why it has to # # be done either within OpenStack Horizon or from the terminal! # ################################################################### # # check first whether there is a floating ip associated # ep = self.deployer.endpoint # ep = ep[0:ep.find(":",6)]+":5000/v2.0" # neutron = client.Client('2.0', endpoint_url=ep, token=self.token) # neutron.format = 'json' # # # floating IP has to be disassociated before deleting the stack, see # # https://ask.openstack.org/en/question/25866/neutron-error/ # neutron.update_floatingip(self.floatingIpId, # {'floatingip': {'port_id': None}}) LOG.debug('Deleting stack: ' + self.hadoop_master) self.deployer.dispose(self.hadoop_master, self.token) self.hadoop_master = None
def update(self, attributes): """ It configures a running EPCaaS using parameters passed by SM. :param attributes: is a dict that contains the OCCI attributes passed by SM (X-OCCI-ATTRIBUTE) """ LOG.debug('Executing update/provisioning logic') stack_state, stack_id, output = self.state() if not (stack_state == 'CREATE_COMPLETE' or stack_state == 'UPDATE_COMPLETE'): LOG.debug('Stack is not in a stable state a.t.m.. Retry') return False confg = Configurator(token=self.token, tenant=self.tenant_name, region=self.region_name, sm_parameters=self.sm_parameters, conf_param=self.conf_param, attributes=attributes, epc_config=self.epc_config, output=output, event=self.event) # XXX no management of this thread is done confg.start()
def provision(self): """ (Optional) if not done during deployment - provision. """ LOG.debug('Executing resource provisioning logic') # once logic executes, deploy phase is done self.event.set()
def set_ips(self, unit, ext_id): """ Sets the fixed- and floating-ips of the given unit. :param unit: the to be processed unit :param ext_id: id of the stack containing the unit """ timeout = time.time() + 60 * 5 while True: template = self.client.stacks.get(ext_id).to_dict() if u'outputs' in template or time.time() > timeout: LOG.debug("outputs: " + str(template['outputs'])) break else: time.sleep(10) for ip in template['outputs']: if ip['output_key'].split('.')[2] == unit.hostname: if ip['output_key'].endswith('public'): k = ip['output_key'].split('.')[-2] unit.floating_ips[k] = ip['output_value'] LOG.debug(ip['output_value'] + " is a floating ip") elif ip['output_key'].endswith('private'): k = ip['output_key'].split('.')[-2] unit.ips[k] = ip['output_value'] LOG.debug(ip['output_value'] + " is a fixed ip") LOG.debug("ips: " + str(unit.ips)) LOG.debug("floating_ips: " + str(unit.floating_ips))
def deploy(self): """ deploy SICs. """ LOG.debug('Executing deployment logic') if self.stack_id is None: self.stack_id = self.deployer.deploy(self.template, self.token)
def __init__(self, token, tenant): # this python thread event is used to notify the SOD that the runtime phase can execute its logic self.event = threading.Event() self.so_e = ServiceOrchestratorExecution(token, tenant, self.event) self.so_d = ServiceOrchestratorDecision(self.so_e, token, self.event) LOG.debug('Starting SOD thread...') self.so_d.start()
def get_endpoint(service_type, endpoint_type=None,region_name=None): from clients import keystone # ##Init keystone client ksclient = keystone.Client() endpoint = ksclient.get_endpoint(service_type=service_type, endpoint_type=endpoint_type, region_name=region_name) LOG.debug("endpoint for service_type %s is %s" %(service_type,endpoint,)) return endpoint
def __init__(self, so_e, token, tenant_name, ready_event, stop_event): # super(EPCSODecision, self).__init__(so_e, token, tenant_name) service_orchestrator.Decision.__init__(self, so_e, token, tenant_name) threading.Thread.__init__(self) self.ready_event = ready_event self.stop_event = stop_event self.so_e = so_e self.token = token self.pol_eng = PolicyEngine() self.scaling_allowed = True # self.scaleout_triggered = True # self.scaleout_success = True # self.scalein_triggered = True # self.scalein_success = True # time.time() when last MME scaling (up/down) occured self.last_mme_scaling_time = 0 # time.time() when last GW scaling (up/down) occured self.last_gw_scaling_time = 0 self.num_of_mme = 1 self.num_of_gw = 1 LOG.debug('EPC SO Decision init')
def deploy(self): """ deploy SICs. """ LOG.debug('Executing deployment logic') if self.stack_id is None: self.stack_id = self.deployer.deploy(self.template, self.token)
def __init__(self): """ docstring stub """ try: # Load the initial heat template # TODO replace with dynamic template generation if TESTING_TILAB: templ_file = open( os.path.join(SO_DIR, 'data', 'maas-test-tilab-1.yaml'), 'r') else: # templ_file = open(os.path.join( # SO_DIR, 'data', 'epc-demo-review-1-gw-dns.yaml'), 'r') templ_file = open( os.path.join(SO_DIR, 'data', 'epc-y3-1-gw-fixed-addressing-update.yaml'), 'r') # tf = open(os.path.join(SO_DIR, 'data', 'maas-test-bart-1-deploy.yaml'), 'r') # tf = open(os.path.join(SO_DIR, 'data', 'epc-demo-review-1-gw.yaml'), 'r') self.graph = templ_file.read() templ_file.close() except IOError: LOG.debug('File not found')
def __init__(self, token, tenant): # this python thread event is used to notify the SOD that the runtime phase can execute its logic self.event = threading.Event() self.so_e = ServiceOrchestratorExecution(token, tenant, self.event) self.so_d = ServiceOrchestratorDecision(self.so_e, token, self.event) LOG.debug('Starting SOD thread...') self.so_d.start()
def provision(self): """ (Optional) if not done during deployment - provision. """ LOG.debug('Executing resource provisioning logic') # once logic executes, deploy phase is done self.event.set()
def provision(self, attributes): """ Takes care of the provisioning of a deployed instance of OpenEPC It calls the service-specific, implementation-specific method to actually configure service parameters on the SIC :param attributes: OCCI attributes passed by SM. Must match the attributes definition of the service in the SM. E.g. in this way the SM can pass to a SO parameters to be used in the config call. This is useful when an higher level SM, e.g. an E2E SM, manages also one or more support services, like DNSaaS or MaaS. The E2E SM will pass the EPCaaS SM the relevant parameters of the support services, like e.g. IP address for managing a MaaS. """ # SR 16/6/2015: currently empty, # since the epcaas VMs will be configured when the MaaS and DNSaaS ip # addresses will be available, namely when update is called from the # SM. LOG.debug("Executing provisioning logic") # NOTE: change this when real params are passed to config # parameters = attributes # if self.epc_config.config(parameters=parameters) != 0: # LOG.debug('Provisioning failed') pass
def run(self): """ Decision part implementation goes here. """ self.hosts_cpu_load.append(MyList()) self.hosts_cpu_util.append(MyList()) self.hosts_memory.append(MyList()) while True: LOG.debug('Waiting for deploy and provisioning to finish') self.event.wait() LOG.debug('Starting runtime logic...') # Decision logic # Until service instance is destroyed while self.so_e.stack_id is not None: # Check if update is complete while True: #tmp = self.so_e.deployer.details(self.so_e.stack_id, self.so_e.token) tmp = self.so_e.state() if tmp[0] == 'UPDATE_COMPLETE': break else: time.sleep(myparameters.STACK_CREATION_UPDATE) # Set updated back to False self.so_e.updated = False # Update the information about CCNx routers self.so_e.state() # Monitor the resources self.monitoring() self.event = ready_event
def __init__(self, token, tenant): # this python thread event is used to notify the SOD that the runtime phase can execute its logic self.event = threading.Event() self.so_e = SOE(token=token, tenant=tenant, ready_event=self.event) self.so_d = SOD(so_e=self.so_e, token=token, ready_event=self.event) LOG.debug('Starting SOD thread...') self.so_d.start()
def set_ips(self, unit, ext_id): """ Sets the fixed- and floating-ips of the given unit. :param unit: the to be processed unit :param ext_id: id of the stack containing the unit """ timeout = time.time() + 60 * 5 while True: template = self.client.stacks.get(ext_id).to_dict() if u"outputs" in template or time.time() > timeout: LOG.debug("outputs: " + str(template["outputs"])) break else: time.sleep(10) for ip in template["outputs"]: if ip["output_key"].split(".")[2] == unit.hostname: if ip["output_key"].endswith("public"): k = ip["output_key"].split(".")[-2] unit.floating_ips[k] = ip["output_value"] LOG.debug(ip["output_value"] + " is a floating ip") elif ip["output_key"].endswith("private"): k = ip["output_key"].split(".")[-2] unit.ips[k] = ip["output_value"] LOG.debug(ip["output_value"] + " is a fixed ip") LOG.debug("ips: " + str(unit.ips)) LOG.debug("floating_ips: " + str(unit.floating_ips))
def update(self, provisioning = False, attributes = None): """ deploy updated SICs. """ LOG.debug('Executing update deployment logic') # Check if attributes are being updated if attributes: if 'mcn.endpoint.maas' in attributes: self.maas_endpoint = str(attributes['mcn.endpoint.maas']) # Get new template templ_file = open(os.path.join(BUNDLE_DIR, 'data', 'influxdb-cyclops.yaml'), 'r') self.graph = templ_file.read() # Wait for any pending operation to complete while (True): if self.stack_id is not None: tmp = self.deployer.details(self.stack_id, self.token) if tmp['state'] == 'CREATE_COMPLETE' or tmp['state'] == 'UPDATE_COMPLETE': break else: time.sleep(10) # Deploy new template if self.stack_id is not None: self.deployer.update(self.stack_id, self.graph, self.token) # Mark as updated for SOD self.updated = True
def checkRequirementsDependencies(requirements=[], service_instances=[]): for requirement in requirements: try: counter = 0 LOG.debug("Check dependencies for requirement \"%s\"." % requirement.name) for service_instance in service_instances: if requirement.source == service_instance.name: LOG.debug("source \"%s\" was found." % requirement.source) if requirement.parameter == 'private_ip' or requirement.parameter == 'public_ip': LOG.debug("parameter \"%s\" is available." % requirement.parameter) for obj in service_instance.networks: if requirement.obj_name == obj.name: LOG.debug("obj_name \"%s\" was found." % requirement.obj_name) counter += 1 else: raise InvalidInputException( "parameter:\"%s\" is not available." % requirement.parameter) if counter == 0: raise NotFoundException( "requirement:\"%s\" was not found (\"source:%s\", \"obj_name:%s\")." % (requirement.name, requirement.source, requirement.obj_name)) elif counter == 1: LOG.debug("requirement \"%s\" is valid." % requirement.name) else: raise InvalidInputException( "is not valid. Found sources or objects several times.") except Exception, exc: exc.message = 'Requirement:\"%s\"->%s' % (requirement.name, exc.message) raise exc
def state(self): """ Report on state. """ LOG.info("retrieving state of the running stack with id %s" % self.stack_id) # LOG.info('Resolver state:') # LOG.info(resolver_state.__repr__()) if self.stack_id is not None: topology = TopologyOrchestrator.get(self.stack_id) stk = self.deployer.details(topology.ext_id) res = { 'state': stk['stack_status'], 'name': stk['stack_name'], 'id': stk['id'] } if 'outputs' in stk: res['output'] = stk['outputs'] output = '' try: output = res['output'] except KeyError: pass LOG.debug(" state %s, output %s" % (res['state'], output)) return res['state'], str(self.stack_id), output else: return 'CREATE_COMPLETE', 'N/A', ''
def state(self): """ Report on state. """ LOG.info("retrieving state of the running stack with id %s" % self.stack_id) # LOG.info('Resolver state:') # LOG.info(resolver_state.__repr__()) if self.stack_id is not None: topology = TopologyOrchestrator.get(self.stack_id) stk = self.deployer.details(topology.ext_id) res = {'state': stk['stack_status'], 'name': stk['stack_name'], 'id': stk['id']} if 'outputs' in stk: res['output'] = stk['outputs'] output = '' try: output = res['output'] except KeyError: pass LOG.debug(" state %s, output %s"%(res['state'],output)) return res['state'], str(self.stack_id), output else: return 'CREATE_COMPLETE', 'N/A', ''
def provision(self, attributes): """ (Optional) if not done during deployment - provision. """ LOG.info('Executing resource provisioning logic...') if attributes: #print attributes if 'mcn.endpoint.dssaas' in attributes: self.dss_endpoint = str(attributes['mcn.endpoint.dssaas']) if 'mcn.endpoint.api' in attributes: self.dns_api = str(attributes['mcn.endpoint.api']) self.dnsaas = DnsaasClientAction(self.dns_api, token=self.token) LOG.info('DNS EP is: ' + self.dns_api ) if 'mcn.endpoint.forwarder' in attributes: self.dns_forwarder = str(attributes['mcn.endpoint.forwarder']) LOG.info('DNS forwarder is: ' + self.dns_forwarder) # Update stack self.update(True) # if self.dns_api is not None: LOG.debug('Executing resource provisioning logic') # once logic executes, deploy phase is done self.ready_event.set()
def list_net(self): res = [] LOG.debug('Requesting list of networks...') lst = self.neutron.list_networks() for net in lst.get('networks'): res.append(Network(net.get('name'), net.get('id'), net.get('router:external'))) return res
def provision(self, attributes): """ Takes care of the provisioning of a deployed instance of OpenEPC It calls the service-specific, implementation-specific method to actually configure service parameters on the SIC :param attributes: OCCI attributes passed by SM. Must match the attributes definition of the service in the SM. E.g. in this way the SM can pass to a SO parameters to be used in the config call. This is useful when an higher level SM, e.g. an E2E SM, manages also one or more support services, like DNSaaS or MaaS. The E2E SM will pass the EPCaaS SM the relevant parameters of the support services, like e.g. IP address for managing a MaaS. """ # SR 16/6/2015: currently empty, # since the epcaas VMs will be configured when the MaaS and DNSaaS ip # addresses will be available, namely when update is called from the # SM. LOG.debug('Executing provisioning logic') # NOTE: change this when real params are passed to config # parameters = attributes #if self.epc_config.config(parameters=parameters) != 0: # LOG.debug('Provisioning failed') pass
def create_tables(engine): """ Drop all and recreate """ LOG.debug("drop and create tables") Base.metadata.drop_all(engine) Base.metadata.create_all(engine)
def dispose(self): """ Dispose SICs. """ LOG.debug('Executing disposal logic') if self.stack_id is not None: self.deployer.dispose(self.stack_id, self.token) self.stack_id = None
def get_token(): from clients import keystone # ##Init keystone client ksclient = keystone.Client() # ##Get token from keystone token = ksclient.get_token() LOG.debug("token: %s" % token) return token
def checkSecurityGroupUniqueness(security_group): db = DatabaseManager() existing_security_groups = db.get_all(SecurityGroup) LOG.debug("Check uniqueness of name of the security group \"%s\"." % security_group.name) for existing_security_group in existing_security_groups: if security_group.name == existing_security_group.name and security_group != existing_security_group: raise NotUniqueException("SecurityGroup:\"%s\" is already existing." % security_group.name) LOG.debug("Check rules of security group \"%s\"." % security_group.name)
def get_item(self, res_id, item_name, **kwargs): LOG.debug("Monitor: request resource %s for %s" % (res_id, item_name)) item_value = self.cmclient.get_statitics(resource_id=res_id, meter_name=item_name, period=kwargs.get('period') or 60) LOG.debug("Monitor: received %s" % item_value) return item_value
def dispose(self): """ Dispose SICs. """ LOG.debug('Executing disposal logic') if self.stack_id is not None: self.deployer.dispose(self.stack_id, self.token) self.stack_id = None
def get_token(): from clients import keystone # ##Init keystone client ksclient = keystone.Client() # ##Get token from keystone token = ksclient.get_token() LOG.debug("token: %s" % token) return token
def __init__(self, token, tenant): self.ready_event = threading.Event() self.stop_event = threading.Event() self.so_e = SOE(token, tenant, self.ready_event, self.stop_event) self.so_d = SOD(self.so_e, token, tenant, self.ready_event, self.stop_event) LOG.debug('Starting SOD thread...') self.so_d.start()
def checkSecurityGroup(security_group): try: LOG.debug("Check security group \"%s\"." % security_group.name) for rule in security_group.rules: checkRule(rule) except Exception, exc: exc.message = 'SecurityGroup:\"%s\"->%s' % (security_group.name, exc.message) raise exc
def checkPoliciesUnqiueness(policies): LOG.debug("Check uniqueness of policies.") for policy in policies: for comp_policy in policies: if policy.name == comp_policy and policy != comp_policy: raise NotUniqueException("policy:\"%s\" is not unique." % policy.name) LOG.debug("policy \"%s\" is unique." % policy.name)
def checkNetworksUniqueness(networks): LOG.debug("\"Check uniqueness of networks.\"") for network in networks: for comp_network in networks: if network.name == comp_network.name and network != comp_network: raise NotUniqueException("network:\"%s\" is not unique." % network.name) LOG.debug("network \"%s\" is unique." % network.name)
def __init__(self, token, tenant, **kwargs): # this python thread event is used to notify the SOD that the runtime phase can execute its logic self.event = threading.Event() app_url = kwargs.get('app_url', '') LOG.debug('app_url callback: ' + app_url) if len(app_url) > 0: self.so_e = SOE(token=token, tenant=tenant, ready_event=self.event, app_url=app_url) else: self.so_e = SOE(token=token, tenant=tenant, ready_event=self.event)
def __init__(self): LOG.debug("Starting RuntimeAgent.") # Get monitor name and service conf = SysUtil().get_sys_conf() monitor_name = conf.get('monitoring') self.monitoring_service = FactoryAgent.get_agent(monitor_name) self.policy_threads = {} self.heat_client = HeatClient() self.checker_threads = {}
def stop(self, _id): LOG.debug("Stopping all PolicyThreads %s" % self.policy_threads[_id]) for thread in self.policy_threads[_id]: LOG.debug("Stopping PolicyThread %s" % thread) thread.stop() LOG.debug("Stopped all PolicyThreads %s" % self.policy_threads[_id]) LOG.debug("Stopping CheckerThread: %s" % self.checker_threads[_id]) #self.checker_threads[_id].stop() LOG.debug("Stopped CheckerThread %s" % self.checker_threads[_id])
def run(self): """ Decision part implementation goes here. """ # it is unlikely that logic executed will be of any use until the provisioning phase has completed LOG.debug('Waiting for deploy and provisioning to finish') self.event.wait() LOG.debug('Starting runtime logic...')
def checkSecurityGroup(security_group): try: LOG.debug("Check security group \"%s\"." % security_group.name) for rule in security_group.rules: checkRule(rule) except Exception, exc: exc.message = 'SecurityGroup:\"%s\"->%s' % (security_group.name, exc.message) raise exc
def checkKey(key): db = DatabaseManager() existing_keys = db.get_all(Key) if key.name in [existing_key.name for existing_key in existing_keys]: LOG.debug("key \"%s\" is available." % key) else: raise NotFoundException( "key:\"%s\" is not available. Available keys: %s" % (key, [existing_key.name for existing_key in existing_keys]))
def checkAlarm(alarm): try: # check that the meter is available if alarm.meter_name: if alarm.meter_name in METERS: LOG.debug("meter_name \"%s\" is available." % alarm.meter_name) else: raise NotFoundException( "meter_name:\"%s\" is not available. Available meter names:%s" % (alarm.meter_name, METERS)) else: raise NotDefinedException("meter_name:\"%s\" is not defined.") # check that the statistic is available if alarm.statistic in STATISTICS: LOG.debug("statistic \"%s\" is available." % alarm.statistic) else: raise NotFoundException( "statistic:\"%s\" is not available. Available statistics: %s" % (alarm.statistic, STATISTICS)) # check that the evaluation period is an interger greater than 0 if alarm.evaluation_periods: if isinstance(alarm.evaluation_periods, (long, int)): if alarm.evaluation_periods > 0: LOG.debug("evaluation_periods \"%s\" is valid." % alarm.evaluation_periods) else: raise InvalidInputException( "evaluation_periods:\"%s\" is not valid. It must be greater than 0." % alarm.evaluation_periods) else: raise TypeErrorException( "evaluation_periods:\"%s\" is not valid. It must be an integer." % alarm.evaluation_periods) else: raise NotDefinedException("evaluation_periods is not defined.") if alarm.threshold: if isinstance(alarm.threshold, (long, int)): LOG.debug("threshold \"%s\" is valid." % alarm.threshold) else: raise TypeErrorException( "threshold:\"%s\" is not valid. It must be an integer." % alarm.threshold) else: raise NotDefinedException("threshold is not defined.") if alarm.comparison_operator: if alarm.comparison_operator in COMPARISON_OPERATORS: LOG.debug("comparison_operator \"%s\" is available." % alarm.comparison_operator) else: NotFoundException( "comparison_operator:\"%s\" is not available. Available comparison operators: %s" % (alarm.comparison_operator, COMPARISON_OPERATORS)) else: raise NotDefinedException("comparison_operator is not defined.") except Exception, exc: exc.message = 'Alarm->%s' % exc.message raise exc
def checkTopolgoyUniqueness(topology): db = DatabaseManager() # check names LOG.debug("Check uniqueness of name of the toplogy \"%s\"." % topology.name) for top in db.get_all(Topology): if topology.ext_name == top.ext_name and topology.id != top.id: raise NotUniqueException("Topology name \"%s\" is already used." % topology.name)
def provision(self): """ (Optional) if not done during deployment - provision. """ # TODO add you provision phase logic here # LOG.debug('Executing provisioning logic') # once logic executes, deploy phase is done self.event.set()
def dispose(self): """ Dispose SICs """ LOG.info('Calling dispose...') if self.stack_id is not None: LOG.debug('Deleting stack: %s' % self.stack_id) self.deployer.dispose(self.stack_id, self.token) self.stack_id = None
def get_by_id(self, _class, _id): try: LOG.debug('Get by id %s of class %s' % (_id, _class.__name__)) res = self.instance.get_by_id(_class, _id) self.instance.session.commit() except: self.instance.session.rollback raise return res
def __init__(self): LOG.debug("Starting RuntimeAgent.") # Get monitor name and service conf = SysUtil().get_sys_conf() monitor_name = conf.get('monitoring') self.monitoring_service = FactoryAgent.get_agent(monitor_name) self.policy_threads = {} self.heat_client = HeatClient() self.checker_threads = {}