def run(self): ctx = ServiceContext() config = ctx.getConfigService() queue = ctx.getQueueService() self.schema = avro.schema.parse(avro_schema) constructor="KafkaConsumer(%s,group_id=%s,bootstrap_servers=%s)" topics = config.get("Input Plugin: kafka_collector","kafka_topics") group_id = config.get("Input Plugin: kafka_collector","kafka_groupid") bootstrap_server = config.get("Message","kafka_broker") str = constructor % (topics,group_id,bootstrap_server) self.consumer = eval(str) for msg in self.consumer: value = bytearray(msg.value) topic = msg.topic bytes_reader = io.BytesIO(value[5:]) decoder = avro.io.BinaryDecoder(bytes_reader) reader = avro.io.DatumReader(self.schema) kafkamsg = reader.read(decoder) try: jsondata = json.loads(kafkamsg['rawdata']) eventType = jsondata["eventName"] jsondata['topic'] = topic queue.put(EventFactory.getEvent(eventType,jsondata)) except InputError,e: self.error(str(e)) except:
def vmcreate(e): tenant = e.tenant msg = e.payload session = object_session(tenant) svc = tenant.getservicebyname(msg.vmType) if svc is None: logger.error("for node(%s/%s/%s) can not find corresponding service db object" % (tenant.id,msg.vmType,msg.stackid)) return logger.info("part sync start.accountId<%s>" % tenant.id) ctx = ServiceContext() zk_host = ctx.getConfigService().get("Inventory","zk_address") account_sync(tenant.id,zk_host) logger.info("part sync finished.accountId<%s>" % tenant.id) node = svc.createnode() node.stackid = msg.stackId node.vmtype = msg.vmType node.manageip = msg.vmManagementIP node.publicip = msg.vmPublicIP node.serviceip = msg.vmServiceIP session.add(node) session.commit() logger.info("node(%s/%s/%s) has been created in db" % (tenant.id,node.vmtype,node.stackid)) flag = True for svc in tenant.services: flag = flag and svc.isready() tenant.state = e.fsm.current if flag: tenant.getSM().trigger("create_vm_done",tenant = tenant)
def vmcreate(e): tenant = e.tenant msg = e.payload session = object_session(tenant) svc = tenant.getservicebyname(msg.vmType) if svc is None: logger.error( "for node(%s/%s/%s) can not find corresponding service db object" % (tenant.id, msg.vmType, msg.stackid)) return logger.info("part sync start.accountId<%s>" % tenant.id) ctx = ServiceContext() zk_host = ctx.getConfigService().get("Inventory", "zk_address") account_sync(tenant.id, zk_host) logger.info("part sync finished.accountId<%s>" % tenant.id) node = svc.createnode() node.stackid = msg.stackId node.vmtype = msg.vmType node.manageip = msg.vmManagementIP node.publicip = msg.vmPublicIP node.serviceip = msg.vmServiceIP session.add(node) session.commit() logger.info("node(%s/%s/%s) has been created in db" % (tenant.id, node.vmtype, node.stackid)) flag = True for svc in tenant.services: flag = flag and svc.isready() tenant.state = e.fsm.current if flag: tenant.getSM().trigger("create_vm_done", tenant=tenant)
def buildmonitorcpejob(cls, tenant): ctx = ServiceContext() config = ctx.getConfigService() host = config.get("monitorcpe", "host") cmd = config.get("monitorcpe", "command") lockcmd = config.get("monitorcpe", "getlock") releasecmd = config.get("monitorcpe", "releaselock") svclist = tenant.services for svc in svclist: if svc.name == "vrouter": nodes = svc.nodes for node in nodes: job = Job() job.node = node job.setNodeFilter(host) job.setGroup(tenant.id) job.setName("MonitorCPETest") job.addlocalcommand(lockcmd) job.addcommand( cmd + " -c 6620e868-e9df-4c8d-8c74-87fe364f7222 -l 10.74.113.116 -r 10.74.113.117 -x 204 -y 205" ) job.addcommand(releasecmd) return job return None
def addschedcheckjob(cls,accountid): ctx = ServiceContext() sched = ctx.getSchedServcie() config = ctx.getConfigService() interval = config.get("Orchestrator","tenant_check_interval") now = datetime.now() delta = timedelta(seconds=int(interval)) runtime = now + delta sched.add_job(checksvc,args=(accountid,),run_date=runtime)
def addschedcheckjob(cls, accountid): ctx = ServiceContext() sched = ctx.getSchedServcie() config = ctx.getConfigService() interval = config.get("Orchestrator", "tenant_check_interval") now = datetime.now() delta = timedelta(seconds=int(interval)) runtime = now + delta sched.add_job(checksvc, args=(accountid, ), run_date=runtime)
def serviceactivate(e): tenant = e.tenant session = object_session(tenant) JobBuilder.addschedcheckjob(tenant.id) tenant.state = e.fsm.current ctx = ServiceContext() tenant_path = os.path.join(ctx.getConfigService().get("File","local_temp_path"),tenant.id) if not os.path.exists(tenant_path): os.makedirs(tenant_path) session.flush() session.commit() logger.info("Account: %s [Package_Activate] handle successfully" % tenant.id)
def serviceactivate(e): tenant = e.tenant session = object_session(tenant) JobBuilder.addschedcheckjob(tenant.id) tenant.state = e.fsm.current ctx = ServiceContext() tenant_path = os.path.join( ctx.getConfigService().get("File", "local_temp_path"), tenant.id) if not os.path.exists(tenant_path): os.makedirs(tenant_path) session.flush() session.commit() logger.info("Account: %s [Package_Activate] handle successfully" % tenant.id)
def run(self): ctx = ServiceContext() queue = ctx.getQueueService() config = ctx.getConfigService() constructor = "KafkaConsumer(%s,group_id=%s,bootstrap_servers=%s)" topics = config.get("Input Plugin: event_collector", "event_topic") group_id = config.get("Input Plugin: event_collector", "event_groupid") bootstrap_server = config.get("Message", "kafka_broker") str = constructor % (topics, group_id, bootstrap_server) self.consumer = eval(str) for msg in self.consumer: # value = bytearray(msg.value) topic = msg.topic try: jsondata = json.loads(msg.value) eventType = jsondata["eventName"] jsondata['topic'] = topic queue.put(EventFactory.getEvent(eventType, jsondata)) except IndexError, e: self.error(e)
def run(self): ctx = ServiceContext() queue = ctx.getQueueService() config = ctx.getConfigService() constructor="KafkaConsumer(%s,group_id=%s,bootstrap_servers=%s)" topics = config.get("Input Plugin: event_collector","event_topic") group_id = config.get("Input Plugin: event_collector","event_groupid") bootstrap_server = config.get("Message","kafka_broker") str = constructor % (topics,group_id,bootstrap_server) self.consumer = eval(str) for msg in self.consumer: # value = bytearray(msg.value) topic = msg.topic try: jsondata = json.loads(msg.value) eventType = jsondata["eventName"] jsondata['topic'] = topic queue.put(EventFactory.getEvent(eventType,jsondata)) except IndexError,e: self.error(e)
def buildmonitorcpejob(cls,tenant): ctx = ServiceContext() config = ctx.getConfigService() host = config.get("monitorcpe","host") cmd = config.get("monitorcpe","command") lockcmd = config.get("monitorcpe","getlock") releasecmd = config.get("monitorcpe","releaselock") svclist = tenant.services for svc in svclist: if svc.name == "vrouter": nodes = svc.nodes for node in nodes: job = Job() job.node = node job.setNodeFilter(host) job.setGroup(tenant.id) job.setName("MonitorCPETest") job.addlocalcommand(lockcmd) job.addcommand(cmd + " -c 6620e868-e9df-4c8d-8c74-87fe364f7222 -l 10.74.113.116 -r 10.74.113.117 -x 204 -y 205") job.addcommand(releasecmd) return job return None