def deploy(self, attributes): """ Deploy method """ if self.stack_id is not None: pass parameters = {} # defining the location of the topology if 'maas.location' in attributes: self.location = parameters['location'] = os.environ[ 'location'] = attributes['maas.location'] LOG.debug("location %s passed via OCCI Attribute" % self.location) self.deployer = FactoryAgent().get_agent(self.conf['deployer']) self.topology_type = topology_mapping[self.location] LOG.info("deploying template %s" % (self.topology_type, )) # read template... f = open(os.path.join(SO_DIR, 'data/topologies', self.topology_type)) template = f.read() f.close() LOG.debug("content of the topology %s" % template) # extracting hot template try: config = yaml.load(template) LOG.debug(config) except yaml.YAMLError, exc: if hasattr(exc, 'problem_mark'): mark = exc.problem_mark LOG.error("Error in configuration file:", exc) LOG.error("Error position: (%s:%s)" % (mark.line + 1, mark.column + 1)) else: LOG.error("Error in configuration file:", exc)
def checkTopology(topology): LOG.info("Check topology \"%s\"." % topology.name) # check service instances LOG.debug("Check service_instances of topology %s." % topology.name) try: for service_instance in topology.service_instances: # check service instance uniqueness inside topology LOG.debug( "Check service instance's uniqueness of \"%s\" of topology \"%s\"." % (service_instance.name, topology.name)) checkServiceInstanceUniqueness(service_instance, topology) # check service instance except requirements LOG.debug("Check service instance \"%s\" of topology \"%s\"." % (service_instance.name, topology.name)) checkServiceInstance(service_instance) # check requirements LOG.debug( "Check requirements' uniqueness for service instance \"%s\" of topology \"%s\"." % (service_instance.name, topology.name)) checkRequirementsUniqueness(service_instance.requirements) LOG.debug( "Check requirements' dependencies for service instance \"%s\" of topology \"%s\"." % (service_instance.name, topology.name)) checkRequirementsDependencies(service_instance.requirements, topology.service_instances) LOG.debug( "Check policies' uniqueness for service instance \"%s\" of topology \"%s\"." % (service_instance.name, topology.name)) checkPoliciesUnqiueness(service_instance.policies) except Exception, exc: exc.message = 'Topology:\"%s\"->%s' % (topology.name, exc.message) raise exc
def state(self): """ Report on state. """ LOG.info("retrieving state of the running stack with id %s" % self.stack_id) # LOG.info('Resolver state:') # LOG.info(resolver_state.__repr__()) if self.stack_id is not None: topology = TopologyOrchestrator.get(self.stack_id) stk = self.deployer.details(topology.ext_id) res = {'state': stk['stack_status'], 'name': stk['stack_name'], 'id': stk['id']} if 'outputs' in stk: res['output'] = stk['outputs'] output = '' try: output = res['output'] except KeyError: pass LOG.debug(" state %s, output %s"%(res['state'],output)) return res['state'], str(self.stack_id), output else: return 'CREATE_COMPLETE', 'N/A', ''
def state(self): """ Report on state. """ # TODO ideally here you compose what attributes should be returned to the SM # In this case only the state attributes are returned. # resolver_state = self.resolver.state() if self.stack_id is not None: LOG.info('stack id state: ' + str(self.stack_id)) try: tmp = self.deployer.details(self.stack_id, self.token) LOG.info('###### : ' + str(tmp.get('output'))) if tmp.get('output', None) is not None: for output in tmp['output']: if output['output_key'].startswith('mcn.endpoint.influxdb'): influxdb_url = output['output_value'] self.influxdb_ip = influxdb_url.split(':')[1][2:] print "influxdb_ip: ", self.influxdb_ip LOG.debug('influxdb_ip: '+self.influxdb_ip) break LOG.debug('State: ' + tmp['state'] + ' len output =' + str(len(tmp['output']))) return tmp['state'], self.stack_id, tmp['output'] else: return tmp['state'], self.stack_id, [] #return 'Unknown', 'N/A' except: LOG.debug(traceback.print_exc()) LOG.debug('Error/Exception getting stack!') return 'Error', self.stack_id, [] else: return 'Unknown', 'N/A', []
def provision(self): """ Provision SICs Run scripts. """ LOG.info('Calling provision...') stack_outputs =\ self.deployer.details(self.stack_id, self.token)['output'] # Store host IPs in a dictionary (host_ips), # where key = $(HOST_NAME)_{external, private}_ip # e.g. ralf public ip => host_ips['ralf_external_ip'] # e.g. dns private ip => host_ips['dns_private_ip'] self.host_ips = {} for o in stack_outputs: self.host_ips[o['output_key']] = o['output_value'] LOG.info('Host IPs: %s' % self.host_ips) # Create temporary PEM file self._save_file(self.pem_path, self._get_private_key()) # Provision DNS self._provision_dns() # Provision ClearWater components components = ['homer', 'homestead', 'sprout', 'bono', 'ellis', 'ralf'] processes = {} for c in components: processes[c] = Process(target=self._provision_cw_comp, args=(c,)) processes[c].start() for k in processes.keys(): processes[k].join() self._delete_file(self.pem_path)
def provision(self): """ (Optional) if not done during deployment - provision. """ LOG.info('Calling provision') super(SOE, self).provision()
def bill_stop_events(self, client, log_server): stop_billing_query = SearchQuery( search_range=self.sr, query='phase_event:done AND so_phase:destroy') try: stop_results = log_server.search(stop_billing_query) LOG.debug('Number of stop billing events found: ' + str(len(stop_results.messages))) for stop_event in stop_results.messages: rcb_message = {} stop_message = json.loads(stop_event.message) rcb_message['service_type'] = stop_message.get( 'sm_name', 'none') rcb_message['instance_id'] = stop_message.get('so_id', 'none') rcb_message['tenant_id'] = stop_message.get('tenant', 'mcntub') rcb_message['status'] = 'stop' LOG.info('Sending stop billing event to RCB: ' + rcb_message.__repr__()) promise = client.basic_publish(exchange='mcn', routing_key='events', body=json.dumps(rcb_message)) client.wait(promise) except Exception as e: LOG.error( 'Cannot issue query to the log service to extract stop events.' ) raise e
def state(self): """ Report on state. """ state = "Unknown" svc_insts = "None" insts = "" resolver_state = self.resolver.state() LOG.info("Resolver state:" + resolver_state.__repr__()) # XXX might there be the case where the value is just CREATE_COMPLETE? try: for key in resolver_state.keys(): if str(resolver_state[key]["occi.mcn.stack.state"]) != "UPDATE_COMPLETE": state = "Unknown" break else: state = resolver_state[key]["occi.mcn.stack.state"] insts = insts + key + " " svc_insts = insts[0:-1] except KeyError: pass return state, svc_insts
def checkTopology(topology): LOG.info("Check topology \"%s\"." % topology.name) # check service instances LOG.debug("Check service_instances of topology %s." % topology.name) try: for service_instance in topology.service_instances: # check service instance uniqueness inside topology LOG.debug( "Check service instance's uniqueness of \"%s\" of topology \"%s\"." % ( service_instance.name, topology.name)) checkServiceInstanceUniqueness(service_instance, topology) # check service instance except requirements LOG.debug("Check service instance \"%s\" of topology \"%s\"." % (service_instance.name, topology.name)) checkServiceInstance(service_instance) # check requirements LOG.debug("Check requirements' uniqueness for service instance \"%s\" of topology \"%s\"." % ( service_instance.name, topology.name)) checkRequirementsUniqueness(service_instance.requirements) LOG.debug("Check requirements' dependencies for service instance \"%s\" of topology \"%s\"." % ( service_instance.name, topology.name)) checkRequirementsDependencies(service_instance.requirements, topology.service_instances) LOG.debug("Check policies' uniqueness for service instance \"%s\" of topology \"%s\"." % ( service_instance.name, topology.name)) checkPoliciesUnqiueness(service_instance.policies) except Exception, exc: exc.message = 'Topology:\"%s\"->%s' % (topology.name, exc.message) raise exc
def state(self): """ Report on state. """ # TODO ideally here you compose what attributes should be returned to the SM # In this case only the state attributes are returned. resolver_state = self.resolver.state() LOG.info('Resolver state:') LOG.info(resolver_state.__repr__()) if self.stack_id is not None: tmp = self.deployer.details(self.stack_id, self.token) # Update routers dictionary and service endpoint if tmp.get('output', None) is not None: for i in tmp['output']: # CCNx Router IP if i['output_key'].startswith('mcn.ccnx'): router_id = i['output_key'].split('.')[2][6:] self.routers[int(router_id)]['public_ip'] = str( i['output_value']) # ICNaaS Service Endpoint elif i['output_key'] == 'mcn.endpoint.icnaas': self.endpoint = 'http://' + str( i['output_value']) + ':5000' i['output_value'] = self.endpoint return tmp['state'], self.stack_id, tmp['output'] else: return tmp['state'], self.stack_id, None else: return 'Unknown', 'N/A'
def deploy(self, attributes): """ Deploy method """ if self.stack_id is not None: pass parameters = {} # defining the location of the topology if 'maas.location' in attributes: self.location = parameters['location'] = os.environ['location'] = attributes['maas.location'] LOG.debug("location %s passed via OCCI Attribute"%self.location) self.deployer = FactoryAgent().get_agent(self.conf['deployer']) self.topology_type = topology_mapping[self.location] LOG.info("deploying template %s" % (self.topology_type,)) # read template... f = open(os.path.join(SO_DIR, 'data/topologies', self.topology_type)) template = f.read() f.close() LOG.debug("content of the topology %s" % template) # extracting hot template try: config = yaml.load(template) LOG.debug(config) except yaml.YAMLError, exc: if hasattr(exc, 'problem_mark'): mark = exc.problem_mark LOG.error("Error in configuration file:", exc) LOG.error("Error position: (%s:%s)" % (mark.line + 1, mark.column + 1)) else: LOG.error("Error in configuration file:", exc)
def dispose(self): LOG.info( "disposal of stack" ); """ Dispose SICs on burns and ubern """ LOG.info('Calling dispose') # self.resolver.dispose() if self.hadoop_master is not None: ################################################################### # IMPORTANT NOTE: the floating IP has to be disassociated before # # the stack is deleted! Else, disposal will fail. Until now, # # neutronclient hasn't worked doing this which is why it has to # # be done either within OpenStack Horizon or from the terminal! # ################################################################### # # check first whether there is a floating ip associated # ep = self.deployer.endpoint # ep = ep[0:ep.find(":",6)]+":5000/v2.0" # neutron = client.Client('2.0', endpoint_url=ep, token=self.token) # neutron.format = 'json' # # # floating IP has to be disassociated before deleting the stack, see # # https://ask.openstack.org/en/question/25866/neutron-error/ # neutron.update_floatingip(self.floatingIpId, # {'floatingip': {'port_id': None}}) LOG.debug('Deleting stack: ' + self.hadoop_master) self.deployer.dispose(self.hadoop_master, self.token) self.hadoop_master = None
def design(self): LOG.info("designing stack") """ Do initial design steps here. """ LOG.info('Entered design() - nothing to do here')
def state(self): """ Report on state. """ # TODO ideally here you compose what attributes should be returned to the SM # In this case only the state attributes are returned. resolver_state = self.resolver.state() LOG.info('Resolver state:') LOG.info(resolver_state.__repr__()) if self.stack_id is not None: tmp = self.deployer.details(self.stack_id, self.token) # Update routers dictionary and service endpoint if tmp.get('output', None) is not None: for i in tmp['output']: # CCNx Router IP if i['output_key'].startswith('mcn.ccnx'): router_id = i['output_key'].split('.')[2][6:] self.routers[int(router_id)]['public_ip'] = str(i['output_value']) # ICNaaS Service Endpoint elif i['output_key'] == 'mcn.endpoint.icnaas': self.endpoint = 'http://' + str(i['output_value']) + ':5000' i['output_value'] = self.endpoint return tmp['state'], self.stack_id, tmp['output'] else: return tmp['state'], self.stack_id, None else: return 'Unknown', 'N/A'
def state(self): """ Report on state. """ LOG.info("retrieving state of the running stack with id %s" % self.stack_id) # LOG.info('Resolver state:') # LOG.info(resolver_state.__repr__()) if self.stack_id is not None: topology = TopologyOrchestrator.get(self.stack_id) stk = self.deployer.details(topology.ext_id) res = { 'state': stk['stack_status'], 'name': stk['stack_name'], 'id': stk['id'] } if 'outputs' in stk: res['output'] = stk['outputs'] output = '' try: output = res['output'] except KeyError: pass LOG.debug(" state %s, output %s" % (res['state'], output)) return res['state'], str(self.stack_id), output else: return 'CREATE_COMPLETE', 'N/A', ''
def provision(self): # super(SOEExtn, self).provision() # TODO check that the provision descriptor is present # TODO: refactor this to: for region_name, region in self.service_manifest['resources'].iteritems() for region in self.service_manifest['resources'].keys(): if len(self.service_manifest['resources'][region]['stack_id']) > 0: self.service_manifest['resources'][region]['client'].update(self.service_manifest['resources'][region]['stack_id'], self.service_manifest['resources'][region]['provision'], self.token) LOG.info('Stack ID: ' + self.service_manifest['resources'][region]['stack_id']) if self.db: # persist data document_filter = { "_id": self.service_manifest['resources'][region]['stack_id'], "region": region } data = { "_id": self.service_manifest['resources'][region]['stack_id'], "token": self.token, "region": region, "provision": self.service_manifest['resources'][region]['provision'] } current = self.db.find_one(document_filter) if not current: self.db.insert(data) else: self.db.update_one(document_filter, { "$set": { 'provision': data['provision'], 'token': data['token'] } })
def state(self): # super(SOEExtn, self).state() stack_state = '' stack_ids = '' outputs = [] for region in self.service_manifest['resources'].keys(): tmp = self.service_manifest['resources'][region]['client'].details(self.service_manifest['resources'][region]['stack_id'], self.token) LOG.info('Returning Stack output state') # for stack state, we return the least successful one # e.g. one stack "CREATE_COMPLETED', one "CREATE_FAILED" -> we return CREATE_FAILED # one stack with "CREATE_IN_PROGRESS", one "CREATE_COMPLETED" -> we return CREATE_IN_PROGRESS # successful_states = ['CREATE_COMPLETE', 'UPDATE_COMPLETE'] ongoing_states = ['CREATE_IN_PROGRESS', 'UPDATE_IN_PROGRESS'] failed_states = ['CREATE_FAILED', 'UPDATE_FAILED'] no_state = [''] current_state = tmp['state'] if stack_state == '': stack_state = current_state else: # we already have a state present... if current_state in successful_states: # no need to write a successful state back pass if current_state in ongoing_states: # if the saved state is 'better', overwrite if stack_state in successful_states: stack_state = current_state if current_state in failed_states: # if the saved state is 'better', overwrite if stack_state in successful_states or stack_state in ongoing_states: stack_state = current_state # for stack_id, we concat them together with their region-names: # region1:stack-id1,region2:stack-id2 if len(stack_ids) == 0: stack_ids = '%s:%s' % (region, self.service_manifest['resources'][region]['stack_id']) else: stack_ids = '%s,%s:%s' %(stack_ids, region, self.service_manifest['resources'][region]['stack_id']) # for stack_output, we add the region name at the end of every key # a.b.c in region 1 becomes a.b.c.region1 try: current_outputs = tmp['output'] for output in current_outputs: outputs.append({ 'output_key': '%s.%s' % (output['output_key'], region.replace(' ', '')), 'output_value': output['output_value'] }) except KeyError: pass return stack_state, stack_ids, outputs
def dispose(self): """ Dispose SICs. """ LOG.info('Calling dispose') if self.stack_id is not None: self.deployer.dispose(self.stack_id, self.token) self.stack_id = None
def get_all(self, _class): try: LOG.info('Get all of %s' % _class.__name__) lst = self.instance.get_all(_class) self.instance.session.commit() except: self.instance.session.rollback raise return lst
def get_all(self, _class): try: LOG.info('Get all of %s' % _class.__name__) lst = self.instance.get_all(_class) self.instance.session.commit() except: self.instance.session.rollback raise return lst
def get_by_service_type(self, _class, _type): try: LOG.info('Get all of %s with service_type %s' % (_class.__name__ ,_type)) res = self.instance.get_by_service_type(_class, _type) self.instance.session.commit() except: self.instance.session.rollback raise return res
def dispose(self): """ Dispose SICs """ LOG.info('Calling dispose...') if self.stack_id is not None: LOG.debug('Deleting stack: %s' % self.stack_id) self.deployer.dispose(self.stack_id, self.token) self.stack_id = None
def __init__(self, token, tenant): super(SOE, self).__init__(token, tenant) self.stack_id = None region_name = 'EURE' self.deployer = util.get_deployer(token, url_type='public', tenant_name=tenant, region=region_name) LOG.info('Bundle dir: ' + BUNDLE_DIR)
def get_by_name(self, _class, _name): try: LOG.info('Get all of %s with name %s' % (_class.__name__, _name)) res = self.instance.get_by_name(_class, _name) self.instance.session.commit() except: self.instance.session.rollback raise return res
def provision(self, attributes=None): """ (Optional) if not done during deployment - provision. """ self.resolver.provision() # this is now async - must wait for an event on queue LOG.info("Now I can provision my resources once my resources are created. Service info:") LOG.info(self.resolver.service_inst_endpoints)
def print_logo(self): LOG.info('\n' + '$$$$$$\ $$\ $$\ $$$$$$\ $$$$$$\ $$$$$$\ \n' '\_$$ _|$$$\ $$$ |$$ __$$\ $$ __$$\ $$ __$$\n' ' $$ | $$$$\ $$$$ |$$ / \__|$$ / \__|$$ / $$\n' ' $$ | $$\$$\$$ $$ |\$$$$$$\ \$$$$$$\ $$ | $$ |\n' ' $$ | $$ \$$$ $$ | \____$$\ \____$$\ $$ | $$ |\n' ' $$ | $$ |\$ /$$ |$$\ $$ |$$\ $$ |$$ | $$ |\n' '$$$$$$\ $$ | \_/ $$ |\$$$$$$ |\$$$$$$ | $$$$$$ |\n' '\______|\__| \__| \______/ \______/ \______/ \n')
def deploy(self, attributes=None): """ deploy SICs. """ LOG.debug('Deploy service dependencies') self.resolver.deploy() LOG.debug('Executing deployment logic') if self.stack_id is None: self.stack_id = self.deployer.deploy(self.template, self.token) LOG.info('Resource dependencies - stack id: ' + self.stack_id)
def dispose(self): """ Dispose SICs. """ LOG.info('Disposing of third party service instances...') if self.stack_id is not None: self.deployer.dispose(self.stack_id, self.token) self.stack_id = None # self.stop_event.set()
def dispose(self): """ Dispose SICs. """ # super(SOEExtn, self).dispose() LOG.info('Calling dispose') for region in self.service_manifest['resources'].keys(): if len(self.service_manifest['resources'][region]['stack_id']) > 0: self.service_manifest['resources'][region]['client'].dispose(self.service_manifest['resources'][region]['stack_id'], self.token) self.service_manifest['resources'][region]['stack_id'] = ''
def print_logo(self): LOG.info('\n' + '$$$$$$\ $$\ $$\ $$$$$$\ $$$$$$\ $$$$$$\ \n' '\_$$ _|$$$\ $$$ |$$ __$$\ $$ __$$\ $$ __$$\n' ' $$ | $$$$\ $$$$ |$$ / \__|$$ / \__|$$ / $$\n' ' $$ | $$\$$\$$ $$ |\$$$$$$\ \$$$$$$\ $$ | $$ |\n' ' $$ | $$ \$$$ $$ | \____$$\ \____$$\ $$ | $$ |\n' ' $$ | $$ |\$ /$$ |$$\ $$ |$$\ $$ |$$ | $$ |\n' '$$$$$$\ $$ | \_/ $$ |\$$$$$$ |\$$$$$$ | $$$$$$ |\n' '\______|\__| \__| \______/ \______/ \______/ \n' )
def dispose(self): """ Dispose SICs. """ LOG.info('Disposing of 3rd party service instances...') self.resolver.dispose() if self.stack_id is not None: LOG.info('Disposing of resource instances...') self.deployer.dispose(self.stack_id, self.token) self.stack_id = None
def deploy(self): """ deploy SICs. """ LOG.info('Calling deploy') if self.stack_id is None: f = open(os.path.join(BUNDLE_DIR, 'data', 'deployment.yaml')) template = f.read() f.close() self.stack_id = self.deployer.deploy(template, self.token, parameters={'mme_pgwc_sgwc_input':'8.8.8.8'}) LOG.info('Stack ID: ' + self.stack_id.__repr__())
def update(self, obj): try: LOG.info('Updating Object: %s' % obj) obj = self.instance.update(obj) self.instance.session.commit() LOG.debug('Updated Object: %s' % obj) self.instance.session.expunge_all() self.instance.session.close() except: self.instance.session.rollback() raise return obj
def deploy(self, attributes=None): """ deploy SICs. """ LOG.info('Executing deployment logic ...') if self.stack_id is None: f = open(os.path.join(BUNDLE_DIR, 'data', 'aaa-deployment.yaml')) template = f.read() f.close() self.stack_id = self.deployer.deploy(template, self.token, parameters={'cms_dss_input':'8.8.8.8'}, name='AAAaaS_' + str(random.randint(1000, 9999))) LOG.info('Stack ID: ' + self.stack_id.__repr__())
def persist(self, obj): try: LOG.info('Persisting Object: %s' % id(obj)) obj = self.instance.persist(obj) self.instance.session.commit() LOG.debug('Persisted Object: %s' % obj) self.instance.session.expunge_all() self.instance.session.close() except: self.instance.session.rollback() raise return obj
def remove(self, obj): try: LOG.info('Removing Object: %s' % obj) self.instance.remove(obj) self.instance.session.commit() LOG.debug('Removed Object: %s' % obj) self.instance.session.expunge_all() self.instance.session.close() except: self.instance.session.rollback() raise return obj
def remove(self, obj): try: LOG.info('Removing Object: %s' % obj) self.instance.remove(obj) self.instance.session.commit() LOG.debug('Removed Object: %s' % obj) self.instance.session.expunge_all() self.instance.session.close() except: self.instance.session.rollback() raise return obj
def update(self, obj): try: LOG.info('Updating Object: %s' % obj) obj = self.instance.update(obj) self.instance.session.commit() LOG.debug('Updated Object: %s' % obj) self.instance.session.expunge_all() self.instance.session.close() except: self.instance.session.rollback() raise return obj
def persist(self, obj): try: LOG.info('Persisting Object: %s' % id(obj)) obj = self.instance.persist(obj) self.instance.session.commit() LOG.debug('Persisted Object: %s' % obj) self.instance.session.expunge_all() self.instance.session.close() except: self.instance.session.rollback() raise return obj
def provision(self, attributes=None): """ (Optional) if not done during deployment - provision. """ self.resolver.provision() LOG.debug('ICN SO provision - Getting EPs') for ep_entity in self.resolver.service_inst_endpoints: for item in ep_entity: if 'mcn.endpoint.mobaas' in item['attributes']: self.mobaas_endpoint = item['attributes'][ 'mcn.endpoint.mobaas'] # EP is only the IP if self.mobaas_endpoint is not None and self.mobaas_endpoint.startswith( 'http'): self.mobaas_endpoint = self.mobaas_endpoint.split('/')[2].split( ':')[0] LOG.info( 'Now I can provision my resources once my resources are created. Service info:' ) LOG.info(self.resolver.service_inst_endpoints) # Wait for create/update to be completed while (True): if self.stack_id is not None: tmp = self.deployer.details(self.stack_id, self.token) if tmp['state'] == 'CREATE_COMPLETE' or tmp[ 'state'] == 'UPDATE_COMPLETE': break else: time.sleep(10) LOG.debug('Executing resource provisioning logic') # XXX note that provisioning of external services must happen before resource provisioning # Get endpoint of MaaS if attributes: #print attributes if 'mcn.endpoint.maas' in attributes: self.maas_endpoint = str(attributes['mcn.endpoint.maas']) if 'mcn.endpoint.mobaas' in attributes: self.mobaas_endpoint = str(attributes['mcn.endpoint.mobaas']) # Update stack self.update(True) # Mark all routers as provisioned for r in self.routers: self.routers[r]['provisioned'] = True # once logic executes, deploy phase is done self.event.set()
def dispose(self): """ Dispose SICs. """ LOG.info('Disposing of 3rd party service instances...') # self.resolver.dispose() if self.stack_id is not None: LOG.info('Disposing of resource instances...') self.deployer.dispose(self.stack_id, self.token) self.stack_id = None # TODO on disposal, the SOE should notify the SOD to shutdown its thread self.destroy_event.set()
def dispose(self): """ Dispose SICs. """ LOG.info('Disposing of 3rd party service instances...') # self.resolver.dispose() if self.stack_id is not None: LOG.info('Disposing of resource instances...') self.deployer.dispose(self.stack_id, self.token) self.stack_id = None # TODO on disposal, the SOE should notify the SOD to shutdown its thread self.destroy_event.set()
def state(self): LOG.info( "getting status of stack") """ Report on state for both stacks in burns and ubern """ if self.hadoop_master is not None: tmp = self.deployer.details(self.hadoop_master, self.token) if 'output' not in tmp: return tmp['state'], self.hadoop_master, dict() return tmp['state'], self.hadoop_master, tmp['output'] else: return 'Unknown', 'N/A', {}
def dispose(self): """ Dispose method """ LOG.info("deleting topology with id %s " % self.stack_id) if self.stack_id is not None: topology = TopologyOrchestrator.get(self.stack_id) LOG.debug("topology to be deleted %s " % topology) self.deployer.dispose(topology) TopologyOrchestrator.delete(topology) self.stack_id = None if self.maas is not None: util.dispose_maas(self.token, self.maas)
def update(self, provisioning = False, attributes = None): """ Update SICs. """ LOG.info('Executing update logic ...') if attributes: #print attributes if 'mcn.endpoint.dssaas' in attributes: self.dss_endpoint = str(attributes['mcn.endpoint.dssaas']) if 'mcn.endpoint.api' in attributes: self.dns_api = str(attributes['mcn.endpoint.api']) self.dnsaas = DnsaasClientAction(self.dns_api, token=self.token) LOG.info('DNS EP is: ' + self.dns_api ) if 'mcn.endpoint.forwarder' in attributes: self.dns_forwarder = str(attributes['mcn.endpoint.forwarder']) LOG.info('DNS forwarder is: ' + self.dns_forwarder) # Wait for any pending operation to complete while (True): if self.stack_id is not None: tmp = self.deployer.details(self.stack_id, self.token) if tmp['state'] == 'CREATE_COMPLETE' or tmp['state'] == 'UPDATE_COMPLETE': break else: time.sleep(10) if self.stack_id is not None: f = open(os.path.join(BUNDLE_DIR, 'data', 'aaa-update.yaml')) template = f.read() f.close() self.deployer.update(self.stack_id, template, self.token, parameters={'cms_dss_input':self.dss_endpoint}) LOG.info('Updated stack ID: ' + self.stack_id.__repr__())
def deploy(self): """Deploy Project Clearwater infrastructure Note that the HOT requires providing your environment specific details such as VM flavours, available network offerings, management ssh & CloudStack APIs keys, etc. """ LOG.info('Deploying...') HOT_path = os.path.join( self.HOT_dir, str(self.platform) + '-clearwater-keys.yaml') LOG.info('HOT path: %s' % HOT_path) params = {} LOG.info('Params: %s' % params) if self.stack_id is None: with open(HOT_path, 'r') as f: template = f.read() script_path = os.path.join(self.script_dir, 'inject-key.sh') with open(script_path, 'r') as f: script = f.read() params['script'] = script self.stack_id = self.deployer.deploy( template, self.token, parameters=params) LOG.info('Stack ID: ' + self.stack_id.__repr__())
def state(self): """ Report on state. """ if self.stack_id is not None: tmp = self.deployer.details(self.stack_id, self.token) LOG.info('Returning Stack output state...') output = '' try: output = tmp['output'] except KeyError: pass return tmp['state'], self.stack_id, output else: LOG.info('Stack output: none - Unknown, N/A') return 'Unknown', 'N/A', None
def remove_dnsconf(self, dnsaas, domain, record, info_rec, rec_type='A'): if dnsaas is not None: LOG.info('Remove Record ' + record + ' domain=' + domain + ' info_rec=' + info_rec) result = -1 while (result != 1): time.sleep(1) result = dnsaas.delete_record(domain, record, rec_type, self.token) try: if result.get('status', None) is not None: if(result['status'] == '404'): break except: break return True else: LOG.info('Something wrong dnsaasclient not set!') return False
def dispose(self): """ Dispose SICs. """ LOG.info('Disposing of 3rd party service instances...') self.resolver.dispose() if self.stack_id is not None: LOG.info('Disposing of resource instances...') self.deployer.dispose(self.stack_id, self.token) self.endpoint = None self.maas_endpoint = None self.routers = { 1: { 'public_ip': 'unassigned', 'layer': 0, 'cell_id': 200, \ 'provisioned': False, 'scale_in_count': 0, 'scale_out_count': 0 }, \ 2: { 'public_ip': 'unassigned', 'layer': 1, 'cell_id': 0, \ 'provisioned': False, 'scale_in_count': 0, 'scale_out_count': 0 } } self.stack_id = None
def run(self): """ Decision part implementation goes here. """ LOG.debug('AAAaaS SOD - Waiting for deploy and provisioning to finish') self.ready_event.wait() LOG.debug('AAAaaS SOD - Starting runtime logic...') # RUN-TIME MANAGEMENT while not self.stop_event.isSet(): event_is_set = self.stop_event.wait(self.time_wait) if self.so_e.dns_info_configured is not True and self.so_e.dns_api is not None: res_Openam = False res_Profile = False state, stack_id, stack_output = self.so_e.state() if stack_output is not None: for line in stack_output: if line['output_key'] == 'mcn.endpoint.aaa-profile-instance': self.so_e.recInfoProfile = str(line['output_value']) res_Openam = self.perform_dnsconf(self.so_e.dnsaas, self.so_e.aaaDomainName, self.so_e.recProfile, self.so_e.recInfoProfile) if line['output_key'] == 'mcn.endpoint.aaa-openam-instance': self.so_e.recInfoOpenam = str(line['output_value']) res_Profile = self.perform_dnsconf(self.so_e.dnsaas, self.so_e.aaaDomainName, self.so_e.recOpenam, self.so_e.recInfoOpenam) if res_Openam and res_Profile: self.so_e.dns_info_configured = True LOG.info('DNS information for AAA configured') self.time_wait = self.so_e.time_wait_after_dns #wait more work is done if self.stop_event.isSet(): LOG.debug('AAAaaS SOD - STOP event set after disposal') if self.so_e.dns_info_configured: res_Openam = self.remove_dnsconf(self.so_e.dnsaas, self.so_e.aaaDomainName, self.so_e.recProfile, self.so_e.recInfoProfile) res_Profile = self.remove_dnsconf(self.so_e.dnsaas, self.so_e.aaaDomainName, self.so_e.recOpenam, self.so_e.recInfoOpenam) if res_Openam and res_Profile: self.so_e.dns_info_configured = True LOG.info('DNS information remove successfully!')
def deploy(self, attributes=None): """ deploy SICs. """ # LOG.debug('Deploy service dependencies') # self.resolver.deploy() LOG.debug('Executing deployment logic') if self.stack_id is None: # let's add monasca service rt = runtime.Monasca(self.token, self.tenant, auth_url=os.environ['DESIGN_URI']) # user creation specific to SO instance to pass to monasca-agent within VMs if self.mon_user is None and self.mon_pass is None: self.mon_id, self.mon_user, self.mon_pass = rt.create_user() params = dict() params['username'] = self.mon_user params['password'] = self.mon_pass params['tenant'] = self.tenant params['service_id'] = self.service params['hostname'] = self.hostname self.stack_id = self.deployer.deploy(self.template, self.token, parameters=params) # need a way to get local SO url from opsv3 to setup a notification url notification_url = self.app_url n_name, n_id = rt.notify( '(avg(cpu.user_perc{service=' + self.service + ',hostname=' + self.hostname + '}) > 100)', notification_url, runtime.ACTION_UNDETERMINED) self.mon_not[n_name] = "replace_host1" self.mon_not_ids.append(n_id) LOG.debug("created alarm: " + n_name + " with id: " + n_id + " and action: " + self.mon_not[n_name]) # fill the mapping self.mappings[ 'rcb_si'] = 'rcb_si' # initial mapping: resource name on heat template is same as expected LOG.info('Resource dependencies - stack id: ' + self.stack_id)
def state(self): """ Report on state of the stack """ if self.stack_id is not None: tmp = self.deployer.details( self.stack_id, self.token) stack_state = tmp['state'] LOG.info('Returning stack output') # XXX type should be consistent output = '' try: output = tmp['output'] except KeyError: pass return stack_state, self.stack_id, output else: LOG.info('Stack output: none - Unknown, N/A') return 'Unknown', 'N/A', None
def dispose(self): """ Dispose SICs. """ # LOG.info('Disposing of 3rd party service instances...') # self.resolver.dispose() if self.stack_id is not None: LOG.info('Disposing of resource instances...') self.deployer.dispose(self.stack_id, self.token) self.stack_id = None # TODO on disposal, the SOE should notify the SOD to shutdown its thread # Removing users, alarm-def rt = runtime.Monasca(self.token, self.tenant, auth_url=os.environ['DESIGN_URI']) rt.delete_user(self.mon_id) for n_id in self.mon_not_ids: rt.dispose_monasca(n_id)
def run(self): LOG.info("Initialise policy thread for policy %s" % self.policy.name) self.wait_until_final_state() LOG.info("Starting policy thread for policy %s" % self.policy.name) if self.is_stopped: LOG.info("Cannot start policy threads. PolicyThreads are stopped.") elif self.topology.state in ['DEPLOYED', 'UPDATED']: self.start_policy_checker_si() LOG.info("Started policy thread for policy %s" % self.policy.name) else: LOG.error( "ERROR: Something went wrong. Seems to be an error. Topology state -> %s. Didn't start the PolicyThread" % self.topology.state)
def deploy(self): """ deploy SICs. """ LOG.debug('Executing deployment logic') if self.stack_id is None: username = '' password = '' if username == '' or password == '': raise RuntimeError( 'No username or password set. Please set one') self.stack_id = self.deployer.deploy(self.template, self.token, parameters={ 'UserName': username, 'Password': password }) LOG.info('Resource dependencies - stack id: ' + self.stack_id)
def provision(self, attributes): """ (Optional) if not done during deployment - provision. """ LOG.info('Executing resource provisioning logic...') if attributes: #print attributes if 'mcn.endpoint.dssaas' in attributes: self.dss_endpoint = str(attributes['mcn.endpoint.dssaas']) if 'mcn.endpoint.api' in attributes: self.dns_api = str(attributes['mcn.endpoint.api']) self.dnsaas = DnsaasClientAction(self.dns_api, token=self.token) LOG.info('DNS EP is: ' + self.dns_api ) if 'mcn.endpoint.forwarder' in attributes: self.dns_forwarder = str(attributes['mcn.endpoint.forwarder']) LOG.info('DNS forwarder is: ' + self.dns_forwarder) # Update stack self.update(True) # if self.dns_api is not None: LOG.debug('Executing resource provisioning logic') # once logic executes, deploy phase is done self.ready_event.set()