def compile_function_start(self, function_id): local_vars_amount = self.st.get_local_vars_amount(function_id) logger.debug('function_id:{}, local_vars_amount:{}'.format( function_id, local_vars_amount)) code = 'function {} {}'.format(function_id, local_vars_amount) self.vm_code.append(code) logger.info(code)
def readerBaudRate(self): readBytes = self.__serial.read(self.__serial.in_waiting) text = self.__rx_decoder.decode(readBytes) print "Length datos recibidos = ", len(text) self.receivedData(text) logger.debug('<' + text + '>')
def cancel(self, event_id): """Remove an event from the queue. This must be presented the ID as returned by enter(). If the event is not in the queue, this raises ValueError. """ i = 0 deleted = False global logger print 'event ', event_id, ' to be canceled\n' while i < len(scheduler._queue): #print i,'th event in queue is',scheduler._queue[i][4] if scheduler._queue[i][4] == int(event_id): print 'event ', event_id, ' is queuing\n' #time.sleep(3) #might cause a problem of inconsistency #should try and catch scheduler._queue.remove(scheduler._queue[i]) deleted = True break i = i + 1 if deleted: msg = 'Successfully canceld event %s' logger.debug(msg, event_id) scheduler.print_queue() heapq.heapify(scheduler._queue) else: msg = 'fail to cancel event %s' logger.error(msg, event_id) print msg % event_id return deleted
def urgent(self, event_id): global logger msg = "Try to elevate event %s to urgent" i = 0 while i < len(scheduler._queue): if scheduler._queue[i][4] == int(event_id): break else: i += 1 if i == len(scheduler._queue): err = 'Can not find event %s' logger.error(err, event_id) print err % event_id return False else: (tmp, tmp, handle, argument, uid, tmp) = scheduler._queue[i] scheduler._queue.remove(scheduler._queue[i]) event = Event(0, 0, handle, argument, uid, 'URG') heapq.heappush(scheduler._queue, event) if Wakeup_Lock().IsLocked(): Wakeup_Lock().OpenLock() print 'after urgent insert\n' i = 0 while i < len(scheduler._queue): print i, 'th event in queue is', scheduler._queue[i][4] i += 1 #time.sleep(5) logger.debug(msg, event_id) print msg % event_id return True
def checkResponse(self, resStr): res = None try: # Remove # resStr = resStr.replace(self.sentString, '') # Only if we have echo activated if resStr.find('ERROR') != -1: res = ATresponse(True, self.cmd, [resStr]) elif resStr.find('OK') != -1: resStr = (resStr.replace('\n', '')).replace('\r', '') logger.debug("[%s] search %s " % (resStr, commandsList[self.cmd][2])) # Capture values res = self._parseValues(resStr) elif resStr.find('\r\n'): if commandsList[self.cmd][2] != '': res = self._parseValues(resStr) #searchObj = re.search(commandsList[self.cmd][2], resStr, re.S | re.M) #if searchObj: # res = ATresponse(True, self.cmd, searchObj.groups()) except Exception as e: logger.error("Exception parse response, %s" % e) return res
def run(self): while True: try: notebooks = cnr.get_list_of_notebooks(self.prefix) logger.debug(notebooks) for container_name, container_created_time in notebooks: container_name = container_name.lstrip("/") container_created_time = parser.parse( container_created_time, fuzzy=True) now = datetime.now(tz=container_created_time.tzinfo) running_time = (now - container_created_time).total_seconds() logger.debug(container_name + ", " + str(container_created_time) + ", " + str(running_time)) if running_time > self.timeout: try: if len( cnr.get_list_of_active_kernels( container_name, self.docker_host, self.enable_ssl)) is 0: logger.debug("stopping " + container_name) cnr.stop_docker_container(container_name) except Exception as e: logger.debug(e.message) except Exception as e: logger.debug(e.message) finally: time.sleep(self.delay)
def get_data_from_hdfs_datasource(self, datasource_name, path, **kwargs): uri = self.uri % ("hdfs_files_by_path/contents") kwargs["hdfs_data_source_name"] = datasource_name kwargs["path"] = path logger.debug("get " + uri) logger.debug("params:" + str(kwargs)) response = requests.get(uri, params=kwargs, verify=kwargs.get("verify", self.cert)) return response
def run(self,instance_id='instance-0000',offav_config='-r -d max-depth=10 --no-remove'): msg = 'OfflineAntiVirus Task %i Started\n' task_idx= OfflineAntiVirusThread.idx global logger logger.debug(msg,task_idx) #for i in range(20): #while True: time.sleep(1234) msg = 'OfflineAntiVirus Task %s done\n' logger.debug(msg,task_idx)
def handle(self): global logger scheduler = whu_sched.scheduler(time.time,time.sleep) msg = 'scheduler initialized' logger.debug(msg) #print msg E = Event.E_LoopPerceive() event = E.Gen_Event() scheduler.enter(*event) msg = 'Initial event %s added' logger.debug(msg,event[4])
def handle(self): global logger scheduler = whu_sched.scheduler(time.time, time.sleep) msg = 'scheduler initialized' logger.debug(msg) #print msg E = Event.E_LoopPerceive() event = E.Gen_Event() scheduler.enter(*event) msg = 'Initial event %s added' logger.debug(msg, event[4])
def run(self, instance_id='instance-0000', offav_config='-r -d max-depth=10 --no-remove'): msg = 'OfflineAntiVirus Task %i Started\n' task_idx = OfflineAntiVirusThread.idx global logger logger.debug(msg, task_idx) #for i in range(20): #while True: time.sleep(1234) msg = 'OfflineAntiVirus Task %s done\n' logger.debug(msg, task_idx)
def stop_docker_container_without_active_kernels(container_id, docker_host, enable_ssl): try: if len( get_list_of_active_kernels(container_id, docker_host, enable_ssl)) > 0: return except Exception as e: logger.debug(e) return return stop_docker_container(container_id)
def stop_all_notebook_docker_container(): prefix = request.args.get("prefix", docker_container_name_prefix) if prefix == None: return jsonify({"error": "prefix cannot be None"}), 403 logger.debug("prefix: " + prefix) try: container_name = cnr.stop_all_docker_container(docker_container_name_prefix) except Exception as e: logger.debug(e) return jsonify({"error": "%s" % e}), 403 response = {"container_name": container_name} return jsonify(response), 202
def uploads(): target_dir = request.args.get("target_dir", None) if target_dir == None: return jsonify({"error": "target_dir cannot be None"}), 403 logger.debug("target_dir: " + target_dir) ret, stdout, stderr = shell("mkdir -p %s" % target_dir) if ret is not 0: raise Exception(stderr) file = request.files['file'] file.save(os.path.join(target_dir, file.filename)) response = {file.filename: "uploaded"} return jsonify(response), 201
def writeDirect(self, data): try: if self.status: if self.rawMode: self.__serial.write(data) else: logger.debug("tx " + data) self.rawData(data) self.__serial.write(self.__tx_decoder.encode(data)) except Exception, v: logger.error("Exception writeDirect")
def import_data_to_hdfs_datasource(self, datasource_name, data, path, **kwargs): uri = self.uri % ("hdfs_files_by_path/import") kwargs["hdfs_data_source_name"] = datasource_name kwargs["path"] = path logger.debug("get " + uri) logger.debug("params:" + str(kwargs)) files = {'file': ('upload.csv', data)} response = requests.post(uri, params=kwargs, files=files, verify=kwargs.get("verify", self.cert)) return response
def compile_function_define(self, subroutine_dec): function_id = self.get_class_name() + '.' + subroutine_dec.find_all( )[2].text # 进行子程序头部定义。格式为 function func_name local_var_amount local_vars_amount = self.st.get_local_vars_amount(function_id) logger.debug('function_id:{}, local_vars_amount:{}'.format( function_id, local_vars_amount)) code = 'function {} {}'.format(function_id, local_vars_amount) self.vm_code.append(code) logger.info(code) # 处理方法体部分 statements = subroutine_dec.find('subroutineBody').find_all( 'statements', recursive=False) self.compile_statements(function_id, statements)
def deal_single_var(self, text): # 则先找到变量地址信息,然后push # logger.info(self.function_id) # logger.info(text) var_info = self.symbol_table.check_var_info(self.function_id, text) if var_info['kind'] == 'field': logger.debug('处理对象{}'.format(var_info)) # 如果是对象属性,则从heap区域取值 # self.vm_code.append('push argument 0') # self.vm_code.append('pop pointer 0') self.vm_code.append('push this {}'.format(var_info['index'])) else: # 如果是普通变量,则从对应local或者argument取值 self.vm_code.append('push {} {}'.format(var_info['kind'], var_info['index']))
def stop(self): if self.status: self.status = False # Exit write thread self.__cmdQueue.put(None) if self.__writeThread.isAlive(): self.__writeThread.join() logger.debug("Write thread stopped") if self.__serial is not None: self.__serial.cancel_read() self.__serial.close() if self.__readThread.isAlive(): self.__readThread.join() logger.debug("Read thread stopped")
def _parseValues(self, response): if commandsList[self.cmd][2] != '': searchObj = re.search(commandsList[self.cmd][2], response, re.S | re.M) if searchObj: logger.debug(pprint(searchObj.groups())) # print "Searched ans : ", searchObj.group(1) res = ATresponse(True, self.cmd, searchObj.groups()) else: res = ATresponse(True, self.cmd, [response]) # No parameter parsing defined else: res = ATresponse(True, self.cmd, [response]) return res
def _timeoutHandler(self): try: logger.debug("Command response timeout, command %s" % self.__currCommand.cmd) answer = None self.__cmdMutex.acquire() if self.__waitingAnswer: self.__waitingAnswer = False answer = ATresponse(False, self.__currCommand.cmd, ["Timeout"]) self.__cmdMutex.release() if answer is not None: self.receivedAnswer(answer) except: logger.error("Timeout handler exception") self.__cmdProcesssedEvent.set()
def compile(self): logger.debug('start to compile') subroutine_decs = self.soup.find_all('subroutineDec') # print('subroutines', subroutines) index = 0 for subroutine_dec in subroutine_decs: index += 1 logger.info('处理第{}个方法'.format(index)) if subroutine_dec.find().text == 'function': self.compile_function_define(subroutine_dec) elif subroutine_dec.find().text == 'constructor': self.compile_constructor_define(subroutine_dec) elif subroutine_dec.find().text == 'method': self.compile_method_define(subroutine_dec) else: raise Exception('未知的结构:{}'.format(subroutine_dec)) logger.info('第{}个方法处理完成'.format(index)) return self.vm_code
def handle(self): msg = '@@@@@@====== Worker test ======@@@@@@ ' logger.debug(msg) print msg # i = random.randint(0,99) # if i>5 and i <70: # event = Event.E_Worker().Gen_Event() # scheduler = whu_sched.scheduler(time.time,time.sleep) # scheduler.enter(*event) # msg = 'New E_Worker event entered!\n' # logger.debug(msg) # print msg # time.sleep(3) t = OfflineAntiVirusThread() t.setDaemon(True) t.start() print 'An OffAVT started\n' msg = '/********** Worker Handled ***********/\n' logger.debug(msg) print msg
def handle(self): msg = '@@@@@@====== Worker test ======@@@@@@ ' logger.debug(msg) print msg # i = random.randint(0,99) # if i>5 and i <70: # event = Event.E_Worker().Gen_Event() # scheduler = whu_sched.scheduler(time.time,time.sleep) # scheduler.enter(*event) # msg = 'New E_Worker event entered!\n' # logger.debug(msg) # print msg # time.sleep(3) t = OfflineAntiVirusThread() t.setDaemon(True) t.start() print 'An OffAVT started\n' msg = '/********** Worker Handled ***********/\n' logger.debug(msg) print msg
def stop_notebook_docker_container(): username = request.form.get("username") if username == None: return jsonify({"error": "username cannot be None"}), 403 logger.debug("username: "******"DOCKER_HOST", app.config.get('HOST', "0.0.0.0")), app.config.get("CERTFILE", "") is not "" and app.config.get( "KEYFILE", "") is not "" ) except Exception as e: logger.debug(e) return jsonify({"error": "%s" % e}), 403 response = {"container_name": container_name} return jsonify(response), 202
def compile_constructor_define(self, subroutine_dec): function_id = self.get_class_name() + '.' + subroutine_dec.find_all( )[2].text # 进行子程序头部定义。格式为 function func_name local_var_amount local_vars_amount = self.st.get_local_vars_amount(function_id) logger.debug('function_id:{}, local_vars_amount:{}'.format( function_id, local_vars_amount)) code = 'function {} {}'.format(function_id, local_vars_amount) self.vm_code.append(code) logger.info(code) # 分配内存空间 class_vars_amount = self.st.get_class_vars_amount( self.get_class_name()) self.vm_code.append('push constant {}'.format(class_vars_amount)) self.vm_code.append('call Memory.alloc 1') # 设置当前对象的this地址 self.vm_code.append('pop pointer 0') # 处理方法体部分 statements = subroutine_dec.find('subroutineBody').find_all( 'statements', recursive=False) self.compile_statements(function_id, statements)
def sql_execute(self, data_source_name, sql, schema_name=None, database_name="public", **kwargs): assert (database_name is not None) uri = self.uri % ("db_execute/sql") kwargs["data_source_name"] = data_source_name kwargs["database"] = database_name if schema_name is not None: kwargs["schema"] = schema_name files = {'file': ("upload.sql", sql)} logger.debug("post " + uri) logger.debug("params:" + str(kwargs)) logger.debug("sql:" + str(sql)) response = requests.post(uri, params=kwargs, files=files, verify=kwargs.get("verify", self.cert)) return response
def handle(self): global logger rnd = random.randint(0,99) #global scheduler scheduler = whu_sched.scheduler(time.time,time.sleep) if rnd >40 and rnd < 50: msg = "VM %s status anomaly detected" vmname = random.choice(['instance-00000001','instance-00000002','instance-00000003','instance-00000004',]) logger.warn(msg,vmname) #print msg %vmname args=[] args.append(vmname) #print 'args for E_RescueVM is ',args E = Event.E_RescueVM(args) event = E.Gen_Event() scheduler.enter(*event) msg = 'RescueVM event %s added' logger.debug(msg,event[4]) #print msg %event[4] else: msg = "VM status is Ok,size of queue is %s" logger.debug(msg,len(scheduler._queue)) #print msg %len(scheduler._queue) msg = 'H_PerceiveVM handled' logger.debug(msg)
def handle(self,host_id = 'host-0000'): global logger # print 'F_HostStatus called\n' # print 'F_GetHostStatus called\n' rnd = random.randint(0,99) #global scheduler scheduler = whu_sched.scheduler(time.time,time.sleep) if rnd >40 and rnd < 45: msg = "Host %s status anomaly detected" hostname = random.choice(['host-0001','host-0002','host-0003','host-0004',]) logger.warn(msg,hostname) #print msg %hostname args=[] args.append(hostname) #print 'args for E_RescueVM is ',args E = Event.E_RescueHost(args) event = E.Gen_Event() scheduler.enter(*event) msg = 'RescueHost event %s added' logger.debug(msg,event[4]) #print msg %event[4] else: msg = 'Host status is Ok ,size of queue is %s' logger.debug(msg,len(scheduler._queue)) #print msg %len(scheduler._queue) msg = 'H_PerceiveHost handled' logger.debug(msg)
def handle(self): global logger rnd = random.randint(0, 99) #global scheduler scheduler = whu_sched.scheduler(time.time, time.sleep) if rnd > 40 and rnd < 50: msg = "VM %s status anomaly detected" vmname = random.choice([ 'instance-00000001', 'instance-00000002', 'instance-00000003', 'instance-00000004', ]) logger.warn(msg, vmname) #print msg %vmname args = [] args.append(vmname) #print 'args for E_RescueVM is ',args E = Event.E_RescueVM(args) event = E.Gen_Event() scheduler.enter(*event) msg = 'RescueVM event %s added' logger.debug(msg, event[4]) #print msg %event[4] else: msg = "VM status is Ok,size of queue is %s" logger.debug(msg, len(scheduler._queue)) #print msg %len(scheduler._queue) msg = 'H_PerceiveVM handled' logger.debug(msg)
def handle(self, host_id='host-0000'): global logger # print 'F_HostStatus called\n' # print 'F_GetHostStatus called\n' rnd = random.randint(0, 99) #global scheduler scheduler = whu_sched.scheduler(time.time, time.sleep) if rnd > 40 and rnd < 45: msg = "Host %s status anomaly detected" hostname = random.choice([ 'host-0001', 'host-0002', 'host-0003', 'host-0004', ]) logger.warn(msg, hostname) #print msg %hostname args = [] args.append(hostname) #print 'args for E_RescueVM is ',args E = Event.E_RescueHost(args) event = E.Gen_Event() scheduler.enter(*event) msg = 'RescueHost event %s added' logger.debug(msg, event[4]) #print msg %event[4] else: msg = 'Host status is Ok ,size of queue is %s' logger.debug(msg, len(scheduler._queue)) #print msg %len(scheduler._queue) msg = 'H_PerceiveHost handled' logger.debug(msg)
def get_container_for_user(): username = request.args.get("username") if username == None: return jsonify({"error": "username cannot be None"}), 403 logger.debug("username: "******"running": True, "container_name": container_name, "port": port }) else: return jsonify({ "running": False })
def compile_terms(self, items): if len(items) == 0: return item = items[0] logger.debug('解析{}'.format(item)) # 如果是操作符号op if item.name == 'symbol' and item.text in [ '+', '-', '*', '/', '=', '>', '>', '<', '<', '&', '&', '|' ]: self.compile_term(items[1]) self.vm_code.append('{}'.format(self.get_op(item.text))) if len(items) == 2: return self.compile_terms(items[2:]) elif item.name == 'term': self.compile_term(item) if len(items) == 1: return self.compile_terms(items[1:]) else: raise Exception('未知{}'.format(item))
def handle(self): global logger #global scheduler scheduler = whu_sched.scheduler(time.time, time.sleep) #Enter PerceiveVM event E = Event.E_PerceiveVM() event = E.Gen_Event() scheduler.enter(*event) #Enter PerceiveHost event E = Event.E_PerceiveHost() event = E.Gen_Event() scheduler.enter(*event) #Enter PerceiveVNet event E = Event.E_PerceiveVNet() event = E.Gen_Event() scheduler.enter(*event) #Enter LoopPerceive again E = Event.E_LoopPerceive() event = E.Gen_Event() scheduler.enter(*event) msg = "LoopPerceive handled,size of queue is %s" logger.debug(msg, len(scheduler._queue))
def handle(self): global logger #global scheduler scheduler = whu_sched.scheduler(time.time,time.sleep) #Enter PerceiveVM event E = Event.E_PerceiveVM() event = E.Gen_Event() scheduler.enter(*event) #Enter PerceiveHost event E = Event.E_PerceiveHost() event = E.Gen_Event() scheduler.enter(*event) #Enter PerceiveVNet event E = Event.E_PerceiveVNet() event = E.Gen_Event() scheduler.enter(*event) #Enter LoopPerceive again E = Event.E_LoopPerceive() event = E.Gen_Event() scheduler.enter(*event) msg = "LoopPerceive handled,size of queue is %s" logger.debug(msg,len(scheduler._queue))
def handle_array(self, term): logger.debug('===========get into array========={}'.format(term)) array_name = term.find().text # 获取数组地址,设置that # 获取变量信息 var_info = self.symbol_table.check_var_info(self.function_id, array_name) # 赋值 if var_info['kind'] == 'field': # 计算数组起始地址 self.vm_code.append('push this {}'.format(var_info['index'])) else: # 计算数组起始地址 self.vm_code.append('push {} {}'.format(var_info['kind'], var_info['index'])) # 计算出数组的index self.compile_expression(term.find('expression')) # 将数组地址和index相加 self.vm_code.append('add') # 让that指向上述计算出的地址 self.vm_code.append('pop pointer 1') # 计算array_name[index]的值 self.vm_code.append('push that 0')
def handle(self,host_id = 'host-0000'): global logger msg = 'H_RescueHost handled with %s' logger.debug(msg,host_id)
def handle(self): global logger msg = "PerceiveVNet handled" logger.debug(msg)
def handle(self,instance_id = 'instance-00000000'): global logger msg = "H_RescueVM handled with %s" logger.debug(msg,instance_id)
def handle(self): global logger msg = 'H_RescueVNet handled' logger.debug(msg) print msg
def handle(self): msg = '$$$$$$====== Hello test %i =====$$$$$$ ' logger.debug(msg,Event.E_Test.idx) print msg %Event.E_Test.idx