def run(self): ctx = ServiceContext() config = ctx.getConfigService() queue = ctx.getQueueService() self.schema = avro.schema.parse(avro_schema) constructor="KafkaConsumer(%s,group_id=%s,bootstrap_servers=%s)" topics = config.get("Input Plugin: kafka_collector","kafka_topics") group_id = config.get("Input Plugin: kafka_collector","kafka_groupid") bootstrap_server = config.get("Message","kafka_broker") str = constructor % (topics,group_id,bootstrap_server) self.consumer = eval(str) for msg in self.consumer: value = bytearray(msg.value) topic = msg.topic bytes_reader = io.BytesIO(value[5:]) decoder = avro.io.BinaryDecoder(bytes_reader) reader = avro.io.DatumReader(self.schema) kafkamsg = reader.read(decoder) try: jsondata = json.loads(kafkamsg['rawdata']) eventType = jsondata["eventName"] jsondata['topic'] = topic queue.put(EventFactory.getEvent(eventType,jsondata)) except InputError,e: self.error(str(e)) except:
def handleTenant_Delete(*args,**kwargs): event = kwargs["sender"] accountId = event.accountId session = kwargs["session"] rdclient = ServiceContext().getRdClient() rdclient.api_version = 1 try: account = session.query(Tenant).filter(Tenant.id == accountId).one() services = account.services for svc in services: nodes = svc.nodes for node in nodes: jobs = node.jobs for job in jobs: try: rdclient.delete_job(job.jobid) except: logger.warning("delete job(%s) error, just ignore any way" % job.jobid) session.delete(job) session.delete(node) session.delete(svc) session.delete(account) session.commit() logger.info("delete account<%s>." % accountId) except NoResultFound: logger.warning("account(<%s>) has been deleted, just ignore" % accountId ) finally: del rdclient.api_version
def buildmonitorcpejob(cls, tenant): ctx = ServiceContext() config = ctx.getConfigService() host = config.get("monitorcpe", "host") cmd = config.get("monitorcpe", "command") lockcmd = config.get("monitorcpe", "getlock") releasecmd = config.get("monitorcpe", "releaselock") svclist = tenant.services for svc in svclist: if svc.name == "vrouter": nodes = svc.nodes for node in nodes: job = Job() job.node = node job.setNodeFilter(host) job.setGroup(tenant.id) job.setName("MonitorCPETest") job.addlocalcommand(lockcmd) job.addcommand( cmd + " -c 6620e868-e9df-4c8d-8c74-87fe364f7222 -l 10.74.113.116 -r 10.74.113.117 -x 204 -y 205" ) job.addcommand(releasecmd) return job return None
def run(self): self._initializeSession() ctx = ServiceContext() queue = ctx.getQueueService() while(True): event = queue.get() dispatcher.send(signal=event.eventName,sender=event,session=self.session)
def checksvc(accountid): ctx = ServiceContext() queue = ctx.getQueueService() map = {"accountId": accountid, "eventName": "TENANT_CHECK"} event = EventFactory.getEvent("TENANT_CHECK", map) queue.put(event) logger.info("trigger account(%s) tenant check" % accountid)
def vmcreate(e): tenant = e.tenant msg = e.payload session = object_session(tenant) svc = tenant.getservicebyname(msg.vmType) if svc is None: logger.error("for node(%s/%s/%s) can not find corresponding service db object" % (tenant.id,msg.vmType,msg.stackid)) return logger.info("part sync start.accountId<%s>" % tenant.id) ctx = ServiceContext() zk_host = ctx.getConfigService().get("Inventory","zk_address") account_sync(tenant.id,zk_host) logger.info("part sync finished.accountId<%s>" % tenant.id) node = svc.createnode() node.stackid = msg.stackId node.vmtype = msg.vmType node.manageip = msg.vmManagementIP node.publicip = msg.vmPublicIP node.serviceip = msg.vmServiceIP session.add(node) session.commit() logger.info("node(%s/%s/%s) has been created in db" % (tenant.id,node.vmtype,node.stackid)) flag = True for svc in tenant.services: flag = flag and svc.isready() tenant.state = e.fsm.current if flag: tenant.getSM().trigger("create_vm_done",tenant = tenant)
def checksvc(accountid): ctx = ServiceContext() queue = ctx.getQueueService() map = {"accountId":accountid,"eventName":"TENANT_CHECK"} event = EventFactory.getEvent("TENANT_CHECK",map) queue.put(event) logger.info("trigger account(%s) tenant check" % accountid)
def vmcreate(e): tenant = e.tenant msg = e.payload session = object_session(tenant) svc = tenant.getservicebyname(msg.vmType) if svc is None: logger.error( "for node(%s/%s/%s) can not find corresponding service db object" % (tenant.id, msg.vmType, msg.stackid)) return logger.info("part sync start.accountId<%s>" % tenant.id) ctx = ServiceContext() zk_host = ctx.getConfigService().get("Inventory", "zk_address") account_sync(tenant.id, zk_host) logger.info("part sync finished.accountId<%s>" % tenant.id) node = svc.createnode() node.stackid = msg.stackId node.vmtype = msg.vmType node.manageip = msg.vmManagementIP node.publicip = msg.vmPublicIP node.serviceip = msg.vmServiceIP session.add(node) session.commit() logger.info("node(%s/%s/%s) has been created in db" % (tenant.id, node.vmtype, node.stackid)) flag = True for svc in tenant.services: flag = flag and svc.isready() tenant.state = e.fsm.current if flag: tenant.getSM().trigger("create_vm_done", tenant=tenant)
def monitorcpe(e): tenant = e.tenant job = JobBuilder.buildmonitorcpejob(tenant) rundeck_client = ServiceContext().getRdClient() session = object_session(tenant) if job is None: logger.error("monitor cpe job build failded") return try: rundeck_reponse = rundeck_client.import_job(job.to_xml(),fmt = "xml", dupeOption = "create" , project = "dms-sa", uuidOption = "remove") except: logger.error("import account(%s) job failed, for connection reason" % tenant.id) session.flush() return if rundeck_reponse['failed'] == None and rundeck_reponse['skipped'] == None: print rundeck_reponse id = rundeck_reponse['succeeded'][0]["id"] name = rundeck_reponse['succeeded'][0]["name"] rdjob = job.node.createrdjob(name,id) session.add(rdjob) try: ret = rundeck_client.run_job(rdjob.jobid) except Exception,e: logger.error("runjob (%s) error (%s)" % (rdjob.jobid,e.message)) else: status = ret['status'] if status == 'falied': rdjob.jobstate = "runerror" href = ret["href"] logger.error("job run error , the execution link: (%s)" % href) else: rdjob.jobstate = "runsuccess" session.flush()
def addschedcheckjob(cls, accountid): ctx = ServiceContext() sched = ctx.getSchedServcie() config = ctx.getConfigService() interval = config.get("Orchestrator", "tenant_check_interval") now = datetime.now() delta = timedelta(seconds=int(interval)) runtime = now + delta sched.add_job(checksvc, args=(accountid, ), run_date=runtime)
def addschedcheckjob(cls,accountid): ctx = ServiceContext() sched = ctx.getSchedServcie() config = ctx.getConfigService() interval = config.get("Orchestrator","tenant_check_interval") now = datetime.now() delta = timedelta(seconds=int(interval)) runtime = now + delta sched.add_job(checksvc,args=(accountid,),run_date=runtime)
def _serviceInitialize(self): ctx = ServiceContext() queue = Queue() ctx.registerQueueService(queue) ctx.registerSchedService(self.initializeScheduler()) ctx.registerConfigService(self.config) server = self.config.get("Rundeck", "rundeck_server") apitoken = self.config.get("Rundeck", "api_token") rdclient = Rundeck(server=server, api_token=apitoken) ctx.registerRdClient(rdclient)
def serviceactivate(e): tenant = e.tenant session = object_session(tenant) JobBuilder.addschedcheckjob(tenant.id) tenant.state = e.fsm.current ctx = ServiceContext() tenant_path = os.path.join(ctx.getConfigService().get("File","local_temp_path"),tenant.id) if not os.path.exists(tenant_path): os.makedirs(tenant_path) session.flush() session.commit() logger.info("Account: %s [Package_Activate] handle successfully" % tenant.id)
def _serviceInitialize(self): ctx = ServiceContext() queue = Queue() ctx.registerQueueService(queue) ctx.registerSchedService(self.initializeScheduler()) ctx.registerConfigService(self.config) server = self.config.get("Rundeck","rundeck_server") apitoken = self.config.get("Rundeck","api_token") rdclient = Rundeck(server=server,api_token=apitoken) ctx.registerRdClient(rdclient)
def serviceactivate(e): tenant = e.tenant session = object_session(tenant) JobBuilder.addschedcheckjob(tenant.id) tenant.state = e.fsm.current ctx = ServiceContext() tenant_path = os.path.join( ctx.getConfigService().get("File", "local_temp_path"), tenant.id) if not os.path.exists(tenant_path): os.makedirs(tenant_path) session.flush() session.commit() logger.info("Account: %s [Package_Activate] handle successfully" % tenant.id)
def __init__(self, host=None, header_name=None, header_value=None, cookie=None): """ Constructor of the class. """ self.rest_client = RESTClientObject() self.default_headers = {} if header_name is not None: self.default_headers[header_name] = header_value if host is None: config = ServiceContext().getConfigService() self.host = config.get("DSO","url") else: self.host = host self.cookie = cookie # Set default User-Agent. self.user_agent = 'Python-Swagger/1.0.0'
def saprovisioning(e): tenant = e.tenant jobs = JobBuilder.buildsaenablejobs(tenant) session = object_session(tenant) rundeck_client = ServiceContext().getRdClient() for job in jobs: try: rundeck_reponse = rundeck_client.import_job(job.to_xml(), fmt="xml", dupeOption="create", project="dms-sa", uuidOption="remove") except: logger.error( "import account(%s) job failed, for connection reason" % tenant.id) session.flush() return if rundeck_reponse['failed'] == None and rundeck_reponse[ 'skipped'] == None: print rundeck_reponse id = rundeck_reponse['succeeded'][0]["id"] name = rundeck_reponse['succeeded'][0]["name"] rdjob = job.node.createrdjob(name, id) session.add(rdjob) try: ret = rundeck_client.run_job(rdjob.jobid) except Exception, e: logger.error("runjob (%s) error (%s)" % (rdjob.jobid, e.message)) rdjob.jobstate = "runerror" else: status = ret['status'] if status == 'falied': rdjob.jobstate = "runerror" href = ret["href"] logger.error("job run error , the execution link: (%s)" % href) else: rdjob.jobstate = "runsuccess" session.flush()
def __init__(self, host=None, header_name=None, header_value=None, cookie=None): """ Constructor of the class. """ self.rest_client = RESTClientObject() self.default_headers = {} if header_name is not None: self.default_headers[header_name] = header_value if host is None: config = ServiceContext().getConfigService() self.host = config.get("DSO", "url") else: self.host = host self.cookie = cookie # Set default User-Agent. self.user_agent = 'Python-Swagger/1.0.0'
def run(self): ctx = ServiceContext() queue = ctx.getQueueService() self._initializeschema() self._initializeconsumer() for msg in self.consumer: value = bytearray(msg.value) topic = msg.topic bytes_reader = io.BytesIO(value[5:]) decoder = avro.io.BinaryDecoder(bytes_reader) reader = avro.io.DatumReader(self.schema) kafkamsg = reader.read(decoder) try: jsondata = json.loads(kafkamsg['rawdata']) eventType = jsondata["eventName"] jsondata['topic'] = topic print EventFactory.getEvent(eventType,jsondata) queue.put(EventFactory.getEvent(eventType,jsondata)) except: self.error("has excetpion when resovle kafka message.")
def run(self): ctx = ServiceContext() queue = ctx.getQueueService() config = ctx.getConfigService() constructor = "KafkaConsumer(%s,group_id=%s,bootstrap_servers=%s)" topics = config.get("Input Plugin: event_collector", "event_topic") group_id = config.get("Input Plugin: event_collector", "event_groupid") bootstrap_server = config.get("Message", "kafka_broker") str = constructor % (topics, group_id, bootstrap_server) self.consumer = eval(str) for msg in self.consumer: # value = bytearray(msg.value) topic = msg.topic try: jsondata = json.loads(msg.value) eventType = jsondata["eventName"] jsondata['topic'] = topic queue.put(EventFactory.getEvent(eventType, jsondata)) except IndexError, e: self.error(e)
def run(self): ctx = ServiceContext() queue = ctx.getQueueService() config = ctx.getConfigService() constructor="KafkaConsumer(%s,group_id=%s,bootstrap_servers=%s)" topics = config.get("Input Plugin: event_collector","event_topic") group_id = config.get("Input Plugin: event_collector","event_groupid") bootstrap_server = config.get("Message","kafka_broker") str = constructor % (topics,group_id,bootstrap_server) self.consumer = eval(str) for msg in self.consumer: # value = bytearray(msg.value) topic = msg.topic try: jsondata = json.loads(msg.value) eventType = jsondata["eventName"] jsondata['topic'] = topic queue.put(EventFactory.getEvent(eventType,jsondata)) except IndexError,e: self.error(e)
def buildmonitorcpejob(cls,tenant): ctx = ServiceContext() config = ctx.getConfigService() host = config.get("monitorcpe","host") cmd = config.get("monitorcpe","command") lockcmd = config.get("monitorcpe","getlock") releasecmd = config.get("monitorcpe","releaselock") svclist = tenant.services for svc in svclist: if svc.name == "vrouter": nodes = svc.nodes for node in nodes: job = Job() job.node = node job.setNodeFilter(host) job.setGroup(tenant.id) job.setName("MonitorCPETest") job.addlocalcommand(lockcmd) job.addcommand(cmd + " -c 6620e868-e9df-4c8d-8c74-87fe364f7222 -l 10.74.113.116 -r 10.74.113.117 -x 204 -y 205") job.addcommand(releasecmd) return job return None
def __init__(self): config = ServiceContext().getConfigService() self.remote_path_base = config.get("File", "remote_path")
def _buildcontext(cls,tenant): """ build {svc:context} :param tenant: :return: """ svclist = tenant.services modelmgr = ModelManager() svcmap = {} context = {} config = ServiceContext().getConfigService() tenant_path = os.path.join(config.get("File","local_temp_path"),tenant.id) items = config.items("Agent") paras = {} for key,value in items: paras[key] = value for svc in svclist: svcmap[svc.name] = svc context[svc.name] = {} for svc in svclist: if svc.name == "vpc": logger.info("build sa jobs--skip the vpc") continue svcdef = modelmgr.getsvfdefbyname(svc.name) sn = svcdef.service_neighbors mn = svcdef.manage_neighbors global_para = paras.copy() if config.has_section(svc.name): items = config.items(svc.name) for key,value in items: global_para[key] = value ctx = context[svc.name] ctx["neighbors_manageip"] = [] ctx["neighbors_serviceip"] = [] ctx["account_id"] = tenant.id ctx.update(global_para) instances = svc.nodes for instance in instances: ctx["host_name"] = instance.manageip ctx["vm_type"] = svc.name node_path = os.path.join(tenant_path,instance.manageip) if not os.path.exists(node_path): os.makedirs(node_path) ctx["node_temp_home"] = node_path for nb in sn: corrresponding_svc = svcmap[nb] nodes = corrresponding_svc.nodes for node in nodes: if node.serviceip is not None: ctx["neighbors_serviceip"].append(node.serviceip) for nb in mn: corrresponding_svc = svcmap[nb] nodes = corrresponding_svc.nodes for node in nodes: ctx["neighbors_manageip"].append(node.manageip) ctx["neighbors_manageip"] = ",".join(ctx["neighbors_manageip"]) ctx["neighbors_serviceip"] = ",".join(ctx["neighbors_serviceip"]) ctx["os"] = svcdef.os return context
def _serviceInitialize(self): ctx = ServiceContext() queue = Queue() ctx.registerQueueService(queue) ctx.registerSchedService(self.initializeScheduler())
def __init__(self): config = ServiceContext().getConfigService() self.zk_address = config.get("Inventory", "zk_address") self.root_path = config.get("Inventory", "zk_root_path") self.zk_client = KazooClient(hosts=self.zk_address)
def _initializeSession(self): config = ServiceContext().getConfigService() db_url = config.get("DB","mysql_url") engine = create_engine(db_url) self.sessionmaker = sessionmaker(bind=engine) self.session = self.sessionmaker()
def _buildcontext(cls, tenant): """ build {svc:context} :param tenant: :return: """ svclist = tenant.services modelmgr = ModelManager() svcmap = {} context = {} config = ServiceContext().getConfigService() tenant_path = os.path.join(config.get("File", "local_temp_path"), tenant.id) items = config.items("Agent") paras = {} for key, value in items: paras[key] = value for svc in svclist: svcmap[svc.name] = svc context[svc.name] = {} for svc in svclist: if svc.name == "vpc": logger.info("build sa jobs--skip the vpc") continue svcdef = modelmgr.getsvfdefbyname(svc.name) sn = svcdef.service_neighbors mn = svcdef.manage_neighbors global_para = paras.copy() if config.has_section(svc.name): items = config.items(svc.name) for key, value in items: global_para[key] = value ctx = context[svc.name] ctx["neighbors_manageip"] = [] ctx["neighbors_serviceip"] = [] ctx["account_id"] = tenant.id ctx.update(global_para) instances = svc.nodes for instance in instances: ctx["host_name"] = instance.manageip ctx["vm_type"] = svc.name node_path = os.path.join(tenant_path, instance.manageip) if not os.path.exists(node_path): os.makedirs(node_path) ctx["node_temp_home"] = node_path for nb in sn: corrresponding_svc = svcmap[nb] nodes = corrresponding_svc.nodes for node in nodes: if node.serviceip is not None: ctx["neighbors_serviceip"].append(node.serviceip) for nb in mn: corrresponding_svc = svcmap[nb] nodes = corrresponding_svc.nodes for node in nodes: ctx["neighbors_manageip"].append(node.manageip) ctx["neighbors_manageip"] = ",".join(ctx["neighbors_manageip"]) ctx["neighbors_serviceip"] = ",".join(ctx["neighbors_serviceip"]) ctx["os"] = svcdef.os return context
def __init__(self): config = ServiceContext().getConfigService() self.remote_path_base = config.get("File","remote_path")
def __init__(self): config = ServiceContext().getConfigService() self.zk_address = config.get("Inventory","zk_address") self.root_path = config.get("Inventory","zk_root_path") self.zk_client = KazooClient(hosts=self.zk_address)