class Vrm(object): def __init__(self): self.driver = None self.is_synced = False self.logger = Logger() self.settings = Settings() self.valid_driver = self.__list_valid_driver() def __list_valid_driver(self): return self.settings.get_item("vrm_default","valid_driver").split(",") def init_object(self,vm_name,driver,memory=None,hd_size=None,password=None): if driver in self.__list_valid_driver() : self.driver = __import__("vrm.%s_driver" % driver, fromlist="*").Driver(vm_name,memory,hd_size,password) self.logger.success("[Vrm] Driver Object successfuly initialized for VirtualMacnine %s" % vm_name) if self.driver.is_synced : self.is_synced = True return True else : self.logger.critical("[Vrm] Invalid or not supported Driver, %s" % driver) return False def register_instance(self): if self.is_synced : self.logger.warning("[Vrm] Instance %s already created" % self.driver.vm_name) return False else : self.driver.make_server() self.logger.success("[Vrm] Instance %s successfuly created" % self.driver.vm_name) return True
def pack_down_data(trans_parms, data, rx_window, device): FREQ_PLAN = frequency_plan[device.app.freq_plan] tx_params = device.get_tx_params() s_time = 0 txpk = {} txpk['rfch'] = FREQ_PLAN.RF_CH txpk['powe'] = 14 txpk['modu'] = trans_parms['modu'] if txpk['modu'] == 'LORA': txpk['codr'] = trans_parms['codr'] txpk['ipol'] = True # rossi support # elif txpk['modu'] == 'FSK': # txpk['fdev'] = 3000 txpk['prea'] = 8 txpk['size'] = len(data) txpk['data'] = b2a_base64(data).decode().rstrip('\n') # txpk['ncrc'] = False if device.dev_class == ClassType.b: b_info = BTiming(ConstDB0.dev + hexlify(device.dev_eui).decode(), time.time()) s_time = b_info.ping_time() txpk['time'] = datetime.utcfromtimestamp(s_time).isoformat() + 'Z' txpk['freq'] = FREQ_PLAN.b_freq() txpk['datr'] = FREQ_PLAN.DataRate(b_info.datr).name elif rx_window == 1: datr = FREQ_PLAN.DataRate[trans_parms['datr']] txpk['datr'] = FREQ_PLAN.rx1_datr(datr.value, tx_params['RX1DRoffset']).name txpk['freq'] = FREQ_PLAN.rx1_freq(float(trans_parms['freq'])) txpk['tmst'] = int(trans_parms['tmst']) + tx_params['RxDelay'] * 1000000 # 1us step upadd; try: ts = iso_to_utc_ts(trans_parms['time']) s_time = ts + tx_params['RxDelay'] except Exception as error: Logger.error(action=Action.downlink, type=IDType.device, id=device.dev_eui, msg=str(error)) elif rx_window == 2: datr = FREQ_PLAN.DataRate(tx_params['RX2DataRate']) txpk['datr'] = datr.name txpk['freq'] = tx_params['RX2Frequency'] if device.dev_class == ClassType.c: txpk['imme'] = True s_time = time.time() elif device.dev_class == ClassType.a: txpk['tmst'] = int(trans_parms['tmst']) + (tx_params['RxDelay'] + 1) * 1000000 try: ts = iso_to_utc_ts(trans_parms['time']) s_time = ts + tx_params['RxDelay'] + 1 except Exception as error: Logger.error(action=Action.downlink, type=IDType.device, id=device.dev_eui, msg=str(error)) return txpk, s_time
def __init__(self, name): # The logger self._log = Logger.get_instance() # The name of the service self._name = name # self._overlay = None self._alternatives = []
def __init__(self): Thread.__init__(self) # This extractor is a observable object, due to the fact that upon extraction finishes, other actions will be # undertaken Observable.__init__(self) # Logger self._log = Logger.get_instance()
def __init__(self): Overlay.__init__(self, self.__class__.__name__) self._vertices = {} self._edges = [] # Get a logger self._log = Logger.get_instance()
def _run(self): result = write_join_accept_data(self.dev_eui, self.data) if not result: Logger.error(action=Action.otaa, msg='No packet, pull_info return!!!') return packet = result[0] pull_info = result[1] if pull_info.prot_ver == 2: resend = ReSender(pull_info, packet, self.server) resend.start() self.server.sendto(packet, pull_info.ip_addr) Logger.info(action=Action.otaa, type=IDType.ip_addr, id='%s:%d' % pull_info.ip_addr, msg='SENT JOIN ACCEPT %s' % packet)
def prepare(self): # 连接数据库 try: blog.connect() except Exception as e: Logger().log(e, True) self.redirect('/500')
def post(self): ret = {'status': 'false', 'message': '', 'data': ''} username = self.get_argument("username", None) password = self.get_argument("password", None) try: user_obj = UserInfo.get(UserInfo.username == username, UserInfo.password == password) if user_obj: self.set_secure_cookie("username", username) ret['status'] = 'true' except UserInfo.DoesNotExist as e: Logger().log(e, True) ret['message'] = '用户名或密码错误' except Exception as e: Logger().log(e, True) ret['message'] = '服务器开小差了' return self.write(json.dumps(ret))
def add(self): score = self.cal_score() key = ConstDB0.dev_gateways + hexlify(self.dev_eui).decode() if not db1.execute_command('zadd', key, 'NX', score, self.gateway_mac_addr): Logger.debug(action='JoinDevGateway', type=IDType.device, id=self.dev_eui, msg='zadd %s, %s already exists' % (key, hexlify(self.gateway_mac_addr).decode())) pre_score = db1.zscore(key, self.gateway_mac_addr) if score > pre_score: print() db1.zadd(key, score, self.gateway_mac_addr, nx=False) Logger.debug(action='JoinDevGateway', type=IDType.device, id=self.dev_eui, msg='zadd %s, %s refresh from %s to %s' % (key, hexlify(self.gateway_mac_addr).decode(), pre_score, score)) return True else: return False return True
def analyze_BeaconTimingReq(device, cmd_payload): assert len(cmd_payload) == 0, "WRONG MAC CMD PAYLOAD OF BeaconTimingReq" Logger.info(action=Action.mac_cmd_get, type=IDType.device, id=hexlify(device.dev_eui).decode(), msg='BeaconTimingReq: %s' % cmd_payload) ##action ---- get the next beacon timing and channel delay = BeaconTiming.cal_beacon_time_delay(device) channel = 0 ##gen payload mac_cmd_payload = BeaconTimingAns(delay, channel).return_data() beacon_timing_ans = MACCmd(device.dev_eui) beacon_timing_ans.push_into_que(CID.BeaconTimingAns, payload=mac_cmd_payload)
def __init__(self, topology, service, environment, alternative): Thread.__init__(self) """ Utils objects """ # Get the object for filesystem handling self._fs = FileSystem.get_instance() # Logger self._log = Logger.get_instance() # Root simulation path (simulation/) self._root_simulation_path = self._fs.get_simulations_folder() # Specific simulation path (simulation/service_name/timestamp/) self._simulation_path = None # The topology self._topology = topology # Service to evaluate self._service = service # The environment in which this simulation is running self._environment = environment # The alternative of the service to evaluate self._alternative = alternative # The metrics to evaluate during this simulation self._metrics = alternative.get_metrics() # Extractor count. This variable is used to keep track of how many extractors notified this object self._extractor_number = len(self._metrics) self._extractor_count = 0 # Initialize the simulation self._init()
def __init__(self, com: ICommunication_Controller, estimate_bandwidth: int = 10, logger: IPrinter = None): """ Coordinator :param com: Communication Thread :param estimate_bandwidth: bandwidth estimation, Bytes per second :param logger: IPrinter """ self.__com = com if logger is None: self.__log = Logger(title_info='Coordinator', log_to_file=True) else: self.__log = logger self.__estimate_bandwidth = estimate_bandwidth self.__group_allocated = set() self.__global_allocated = set() self.__log.log_message("Coordinator version: {}.".format(VERSION))
def post(self, *args, **kwargs): ret = {'status': 'false', 'message': '', 'data': ''} flink_id = self.get_argument('flink_id', None) flink_name = self.get_argument('name', None) flink_link = self.get_argument('link', None) flink_weight = self.get_argument('weight', None) action = self.get_argument('action', None) if flink_id and flink_name and flink_link and flink_weight and action: if action == 'post': try: flink_obj = FriendlyLink(name=flink_name) flink_obj.link = flink_link flink_obj.weight = flink_weight flink_obj.save() ret['status'] = 'true' ret['message'] = '友链保存成功' except Exception as e: Logger().log(e, True) ret['message'] = '友链保存失败' elif action == 'patch': try: flink_obj = FriendlyLink.get(FriendlyLink.id == flink_id) flink_obj.link = flink_link flink_obj.name = flink_name flink_obj.weight = flink_weight flink_obj.save() ret['status'] = 'true' ret['message'] = '友链保存成功' except Exception as e: Logger().log(e, True) ret['message'] = '友链保存失败' elif action == 'delete': try: flink_obj = FriendlyLink.get(FriendlyLink.id == flink_id) flink_obj.delete_instance() ret['status'] = 'true' ret['message'] = '友链删除成功' except Exception as e: Logger().log(e, True) ret['message'] = '友链删除失败' else: ret['message'] = '请求非法' Logger().log(ret, True) else: ret['message'] = '参数非法' Logger().log(ret, True) self.write(json.dumps(ret))
def footer_info(self): try: blog_obj = Blog.get(Blog.id == 1) footer_text = blog_obj.copyright return footer_text except Exception as e: Logger().log(e, True) return server_error
def analyze_BeaconFreqAns(device, cmd_payload): assert len(cmd_payload) == 1, "WRONG MAC CMD PAYLOAD OF BeaconFreqAns" status = cmd_payload[0] freq_ack = status & 0b1 rfu = status >> 1 if freq_ack == 1: Logger.info(action=Action.mac_cmd_get, type=IDType.device, id=hexlify(device.dev_eui).decode(), msg='BeaconFreqAns: %s, freq_ack: %s' % (cmd_payload, freq_ack)) else: Logger.info(action=Action.mac_cmd_get, type=IDType.device, id=hexlify(device.dev_eui).decode(), msg='BeaconFreqAns: %s, freq_ack: %s' % (cmd_payload, freq_ack))
def wrapper(request, *args, **kwargs): if not monitor_api_auth_method(request): Logger().log(message='API认证未通过', mode=False) response = ApiResponse() response.code = 401 response.message = 'API认证未通过' return JsonResponse(data=response.__dict__, json_dumps_params={'ensure_ascii': False}) return func(request, *args, **kwargs)
def __init__(self, request_msg, trans_params, gateway): """ :param request_msg:str :param trans_params:dict :param gateway_mac_addr: :return: """ Greenlet.__init__(self) app_eui = int.to_bytes(int.from_bytes(request_msg[1:9], byteorder='little'), byteorder='big', length=8) dev_eui = int.to_bytes(int.from_bytes(request_msg[9:17], byteorder='little'), byteorder='big', length=8) dev_nonce = int.to_bytes(int.from_bytes(request_msg[17:19], byteorder='little'), byteorder='big', length=2) mic = request_msg[19:23] hex_dev_eui = hexlify(dev_eui).decode() hex_app_eui = hexlify(app_eui).decode() Logger.info(action=Action.otaa, type=IDType.device, id=dev_eui, msg="JOIN REQ: APP:%s, dev_nonce:%s" % (hex_app_eui, dev_nonce)) real_app_eui = db0.hget(ConstDB0.dev + hex_dev_eui, 'app_eui') if real_app_eui is not None and real_app_eui != app_eui: raise Exception('Device %s belong to other app %s, not app %s' % (dev_eui, real_app_eui, app_eui)) else: app = Application.objects.get(app_eui) if app is None: raise KeyError('APP:%s does not exist' % hex_app_eui) elif gateway.public is not True and app.user_id != gateway.user_id: raise AccessDeny( hexlify(gateway.mac_addr).decode(), ConstDB0.app + hex_app_eui) self.app = app self.dev_eui = dev_eui self.request_msg = request_msg self.trans_params = trans_params self.gateway = gateway self.dev_nonce = dev_nonce self.mic = mic
def user_relevance_application(request, *args, **kwargs): """关联应用视图""" uid = kwargs['uid'] user_obj = models.UserProfile.objects.get(id=uid) all_application_list = list(models.Application.objects.values( 'id', 'name')) for item in all_application_list: item['name'] = str( models.Application.objects.filter( id=item['id']).first().project.name) + '-' + item['name'] user_application_list = list(user_obj.app.values('id', 'name')) for item in user_application_list: item['name'] = str( models.Application.objects.filter( id=item['id']).first().project.name) + '-' + item['name'] sub_application_list = [] for item in all_application_list: if item not in user_application_list: sub_application_list.append(item) if request.method == 'POST': try: application_list = request.POST.getlist('application') user_obj.app.set(application_list) Logger().log(message='[%s]关联应用%s成功' % (user_obj.name, application_list), mode=True) return redirect('/log_web/user.html') except Exception as e: error = str(e) Logger().log( message='[%s]关联应用%s失败,%s' % (user_obj.name, request.POST.getlist('application'), error), mode=False) return render( request, 'user_relevance_application.html', { 'uid': uid, 'sub_application_list': sub_application_list, 'user_application_list': user_application_list, 'error': error }) return render( request, 'user_relevance_application.html', { 'uid': uid, 'sub_application_list': sub_application_list, 'user_application_list': user_application_list })
def _iterate(*args): try: strat_file_name, coin, tf, kwargs = args # coin = args[1] # tf = args[2] strat_class = utils.import_file('strategy', strat_file_name + '.py') cerebro = bt.Cerebro() logger = Logger(logging.INFO, sys.stdout) kwargs['is_live'] = False cerebro.addstrategy(strat_class, **kwargs) # Create a Data Feed dump = Dump('binance', coin) df = dump.get_data(tf, hist_days) data = bt.feeds.PandasData(dataname=df) cerebro.adddata(data) df = dump.get_data('1m', hist_days) data = bt.feeds.PandasData(dataname=df) cerebro.adddata(data) # Set our desired cash start cerebro.broker.setcash(1000.0) # Add a FixedSize sizer according to the stake cerebro.addsizer(bt.sizers.PercentSizer, percents=95) # Set the commission cerebro.broker.setcommission(commission=0.001) # Run over everything strat = cerebro.run(maxcpus=1) res = int(strat[0].broker.getvalue()) # ret = (res, '%s, coin=%s, tf=%s, stperiod=%s, mult=%s' % (res, coin, tf, kwargs['stperiod'], kwargs['mult'])) ret = (res, '%s, %s, %s' % (res, ', '.join(args[:-1]), str(kwargs))) print(ret) return ret except Exception as e: logger.error(e) print(e) return (0, str(e))
def get(self, article_id=None): if article_id: try: article_obj = Article.get(Article.id == article_id) article_data = { 'id': article_obj.id, 'title': article_obj.title, 'content': article_obj.content, 'read_count': article_obj.read_count, 'created_date': article_obj.created_date, 'update_date': article_obj.update_date, 'article_type': article_obj.article_type.article_type } Article.update(read_count=Article.read_count + 1).where(Article.id == article_id).execute() n_article_obj = Article.select( Article.id).where(Article.id > article_id).order_by( Article.id.asc()).limit(1) n_a_id = [] for i in n_article_obj: n_a_id.append(i.id) if len(n_a_id) == 1: n_a_id = n_a_id[0] else: n_a_id = article_id article_data['n_a_id'] = n_a_id p_article_obj = Article.select( Article.id).where(Article.id < article_id).order_by( Article.id.desc()).limit(1) p_a_id = [] for i in p_article_obj: p_a_id.append(i.id) if len(p_a_id) == 1: p_a_id = p_a_id[0] else: p_a_id = article_id article_data['p_a_id'] = p_a_id except Article.DoesNotExist as e: Logger().log(e, True) return self.redirect('/404') except Exception as e: Logger().log(e, True) return self.render('index/500.html') return self.render('index/article.html', article_data=article_data) self.redirect('/404')
def analyze_PingSlotFreqAns(device, cmd_payload): assert len(cmd_payload) == 1, "WRONG MAC CMD PAYLOAD OF PingSlotFreqAns" status = cmd_payload[0] ch_ack = status & 0b1 dr_ack = status >> 1 & 0b1 rfu = status >> 2 if ch_ack & dr_ack == 1: Logger.info(action=Action.mac_cmd_get, type=IDType.device, id=hexlify(device.dev_eui).decode(), msg='PingSlotFreqAns Success: %s, ch_ack: %s, dr_ack: %s' % (cmd_payload, ch_ack, dr_ack)) else: Logger.error(action=Action.mac_cmd_get, type=IDType.device, id=hexlify(device.dev_eui).decode(), msg='PingSlotFreqAns Fqil: %s, ch_ack: %s, dr_ack: %s' % (cmd_payload, ch_ack, dr_ack))
def __init__(self, overlay): # Logger self._log = Logger.get_instance() # The overlay based on which the topology is created self._overlay = overlay # Create a Mininet instance self._net = Mininet(controller=None, switch=CustomSwitch, listenPort=6634, inNamespace=False) # Add controller to the network self._net.addController('c0', controller=RemoteController, ip='127.0.0.1', port=6633)
def del_chart(request): """删除图表视图""" if request.method == 'POST': response = WebResponse() chart_list = request.POST.getlist('chart_list') try: with transaction.atomic(): for chart_id in chart_list: chart_id = int(chart_id) chart_obj = models.Chart.objects.filter(id=chart_id).first() chart_obj.delete() Logger().log(message='删除图表成功,%s' % chart_obj.name, mode=True) response.message = '删除图表成功' except Exception as e: response.status = False response.error = str(e) Logger().log(message='删除图表失败,%s' % str(e), mode=False) return JsonResponse(response.__dict__)
class Reclaimer: def __init__(self, com: ICommunication_Controller, logger: Logger = None): self.__com = com if logger is None: self.__log = Logger(title_info='Retrieve', log_to_file=True) else: self.__log = logger def require_client_log(self): """ Require client_log file from all workers. :return: None """ # send request for id in self.__com.available_clients: self.__com.send_one(id, RequestWorkingLog()) self.__log.log_message('Acquire log file from worker({}).'.format(id)) try: nodes_ready = set() total_nodes = set(self.__com.available_clients) while nodes_ready != total_nodes: id_from, log = self.__com.get_one() if isinstance(log, DoneType): log.restore() file_format = "\n\t\t--> ".join([filename for filename in log.file_list]) self.__log.log_message('Save file for {}.\n\tList:\n\t\t--> {}'.format(id_from, file_format)) nodes_ready.add(id_from) self.__log.log_message('Node({}) is done, {} is done.'.format(id_from, nodes_ready)) except Exception as e: # print DEBUG message import sys import traceback exc_type, exc_value, exc_tb = sys.exc_info() exc_tb = traceback.format_exception(exc_type, exc_value, exc_tb) exc_format = "".join(exc_tb) self.__log.log_error('Exception occurred: {}\n\t{}'.format(e, exc_format)) # print DEBUG message self.__log.log_message('Done.')
def del_template(request): """删除视图模板""" if request.method == 'POST': response = WebResponse() template_list = request.POST.getlist('template_list') try: with transaction.atomic(): for template_id in template_list: template_id = int(template_id) template_obj = models.Template.objects.filter( id=template_id).first() # 删除redis中相关监控项及触发器、报警计数 alert_counter_redis_key = settings.ALERT_COUNTER_REDIS_KEY application_obj_list = template_obj.applications.all() for application_obj in application_obj_list: key_in_redis = '*_%s_*' % application_obj.name key_list = REDIS_OBJ.keys(key_in_redis) for key in key_list: REDIS_OBJ.delete( key) # 删除redis中相关监控项和报警trigger的key template_trigger_id_list = [] template_trigger_obj_list = template_obj.trigger_set.all() for trigger_obj in template_trigger_obj_list: template_trigger_id_list.append(str(trigger_obj.id)) alert_counter_data = json.loads( REDIS_OBJ.get(alert_counter_redis_key).decode()) # 删除报警计数中相关数据,key->action_id,value->{\"CentOS-03_172.16.99.25\": {\"3\": {\"last_alert\": 1528083651.9851427, \"counter\": 1}}} for key1, value1 in alert_counter_data.items(): # key->hostname,value->{\"3\": {\"last_alert\": 1528083651.9851427, \"counter\": 1}} for key2, value2 in value1.items(): # key->trigger_id,value->{\"last_alert\": 1528083651.9851427, \"counter\": 1} for key3 in list(value2.keys()): if key3 in template_trigger_id_list: del alert_counter_data[key1][key2][ key3] # 删除对应主机下的trigger计数 template_obj.delete() Logger().log(message='删除模板成功,%s' % template_obj.name, mode=True) response.message = '删除模板成功' except Exception as e: response.status = False response.error = str(e) Logger().log(message='删除模板失败,%s' % str(e), mode=False) return JsonResponse(response.__dict__)
def __init__(self, controller_path, controller_cmd): # Get the framework file system handler self._fs = FileSystem.get_instance() # Logger self._log = Logger.get_instance() # Controller's parameters self._path = controller_path self._cmd = controller_cmd # The controller process self._controller_process = None
def project_del(request): """项目删除视图""" response = BaseResponse() if request.method == 'POST': pid = request.POST.get('pid') try: project_obj = models.Project.objects.filter(id=pid).first() project_name = project_obj.name if os.path.exists(os.path.join(settings.RSYNC_LOG_DIR, project_obj.path_name)): shutil.rmtree(os.path.join(settings.RSYNC_LOG_DIR, project_obj.path_name)) project_obj.delete() response.message = '删除成功' Logger().log(message='[%s]删除项目[%s]成功' % (request.user.name, project_name), mode=True) except Exception as e: response.status = False response.message = '删除失败' response.error = str(e) Logger().log(message='[%s]删除项目失败,%s' % (request.user.name, str(e)), mode=False) return JsonResponse(response.__dict__)
def __init__(self, node_id, logger=Logger('None')): super().__init__() self.Node_Id = node_id self.Logger = logger self.Global_State = 0 self.Weights_Last_Received = {} for key in GlobalSettings.get_default().nodes: self.Weights_Last_Received[key] = 0
def get(self): username = self.get_secure_cookie('username') user_info = {'username': username} try: user_obj = UserInfo.get(UserInfo.username == username) user_info['email'] = user_obj.email self.render('admin/profile.html', user_info=user_info) except Exception as e: Logger().log(e, True) self.render('index/500.html')
def analyze_NewChannelAns(device, cmd_payload): assert len(cmd_payload) == 1, 'WRONG MAC CMD PAYLOAD OF NewChannelAns' # logger.info(ConstLog.mac_cmd+'analyze_NewChannelAns') status = cmd_payload[0] ch_ack = status & 0b1 dr_ack = status >> 1 & 0b1 rfu = status >> 2 if ch_ack & dr_ack == 1: Logger.info(action=Action.mac_cmd_get, type=IDType.device, id=hexlify(device.dev_eui).decode(), msg='DevStatusAns Success: %s, ch_ack: %d, dr_ack: %d' % (cmd_payload, ch_ack, dr_ack)) else: Logger.error(action=Action.mac_cmd_get, type=IDType.device, id=hexlify(device.dev_eui).decode(), msg='DevStatusAns Fail: %s, ch_ack: %d, dr_ack: %d' % (cmd_payload, ch_ack, dr_ack))
def __init__(self): Overlay.__init__(self, self.__class__.__name__) # Logger self._log = Logger.get_instance() # Internal data structures self._switches = {} self._hosts = {} # This is a map<device, list of all connected devices>; it is an OrderedDict to preserve the mapping between # host-pe connections. self._links = [] #OrderedDict()
def add_user(request): """创建用户""" if request.method == 'GET': form_obj = user_form.AddUserForm() return render(request, 'add_user.html', {'form_obj': form_obj}) elif request.method == 'POST': form_obj = user_form.AddUserForm(request.POST) if form_obj.is_valid(): data = form_obj.cleaned_data try: with transaction.atomic(): user_obj = models.UserProfile.objects.create(**data) Logger().log(message='创建用户成功,%s' % user_obj.email, mode=True) return redirect('/monitor_web/user.html') except Exception as e: Logger().log(message='创建用户失败,%s' % str(e), mode=False) raise ValidationError(_('添加用户失败'), code='invalid') else: return render(request, 'add_user.html', {'form_obj': form_obj})
def __init__(self, node_id, logger=Logger('None')): ICommunication_Ctrl.__init__(self) self.Node_ID = node_id self.BlockWeights = dict() # system diagnosed info self.Log = logger
def __add_schedule_background_tasks(self): sch_date = self.today t_schedules = self.sl.get_today_schedulers( sch_date.strftime('%Y-%m-%d')) if t_schedules and len(t_schedules) > 0: for sch in t_schedules: tell_time = dt(self.today.year, self.today.month, self.today.day, int(sch.get('hour')), int(sch.get('minute')), 0) # 过期的就不再添加到提醒任务中了 if tell_time > dt.now(): self.bs.add_job( self.__show_msg, trigger=DateTrigger(run_date=tell_time), args=[sch.get('title'), sch.get('describ')]) Logger.info(self.bs.get_jobs(), module='__add_schedule_background_tasks')
def add_item(request): """创建监控项视图""" if request.method == 'GET': form_obj = item_form.AddItemForm() return render(request, 'add_item.html', {'form_obj': form_obj}) elif request.method == 'POST': form_obj = item_form.AddItemForm(request.POST) if form_obj.is_valid(): data = form_obj.cleaned_data try: with transaction.atomic(): item_obj = models.Item.objects.create(**data) Logger().log(message='创建监控项成功,%s' % item_obj.key, mode=True) return redirect('/monitor_web/item.html') except Exception as e: Logger().log(message='创建监控项失败,%s' % str(e), mode=False) raise ValidationError(_('添加监控项失败'), code='invalid') else: return render(request, 'add_item.html', {'form_obj': form_obj})
def del_item(request): """删除监控项视图""" if request.method == 'POST': response = WebResponse() item_list = request.POST.getlist('item_list') try: with transaction.atomic(): for item_id in item_list: item_id = int(item_id) item_obj = models.Item.objects.filter(id=item_id).first() item_obj.delete() Logger().log(message='删除监控项成功,%s' % item_obj.key, mode=True) response.message = '删除监控项成功' except Exception as e: response.status = False response.error = str(e) Logger().log(message='删除监控项失败,%s' % str(e), mode=False) return JsonResponse(response.__dict__)
def get(self): current_page = self.get_argument("p", 1) per_page_count = self.get_argument("pre", 10) try: current_page = int(current_page) per_page_count = int(per_page_count) except ValueError as e: Logger().log(e, True) self.redirect('/index') try: data_count = Article.select().count() page_obj = Page(current_page=current_page, data_count=data_count, per_page_count=per_page_count) page_html = page_obj.page_str(base_url="index?") at_list = [] if current_page == 1: article_objs = Article.select()[-page_obj.end:] else: article_objs = Article.select()[-page_obj.end:-page_obj.start] for article_obj in article_objs: at_list.append({ 'id': article_obj.id, 'title': article_obj.title, 'summary': article_obj.summary, 'read_count': article_obj.read_count, 'created_date': article_obj.created_date, 'article_type': article_obj.article_type.article_type }) at_list.reverse() except Exception as e: Logger().log(e, True) return self.render('index/500.html') if current_page == 1 and len(at_list) < per_page_count: page_html = "" self.render('index/index.html', at_list=at_list, page_html=page_html)
def check_and_alert_recover_notifier(self, host_obj, trigger_obj): """检查报警恢复并通知""" trigger_redis_key = 'Host_%s_trigger_%s' % (host_obj.hostname, trigger_obj.id) trigger_data = self.redis_obj.get(trigger_redis_key) if trigger_data: trigger_data = json.loads(trigger_data.decode()) alert_counter_dict_key = settings.ALERT_COUNTER_REDIS_KEY alert_counter_dict = json.loads( self.redis_obj.get(alert_counter_dict_key).decode()) old_alert_counter_dict = deepcopy(alert_counter_dict) action_set = trigger_obj.action_set.all() # 获取报警策略集合 for action_obj in action_set: # 循环每个报警策略 if str(action_obj.id ) in alert_counter_dict: # 如果报警计数字典中存在报警策略id for hostname, value in alert_counter_dict[str( action_obj.id)].items(): if host_obj.hostname == hostname: # 主机也对上了 for trigger_id in value: if trigger_id == str(trigger_obj.id): del alert_counter_dict[str( action_obj.id)][hostname][str( trigger_obj.id)] self.redis_obj.set( alert_counter_dict_key, json.dumps(alert_counter_dict)) # 删除redis上触发器报警的key self.redis_obj.delete(trigger_redis_key) # 将主机状态改为正常在线 host_obj.status = 1 host_obj.save() Logger().log(message='服务器状态改变,%s' % host_obj.hostname, mode=True) # 发送恢复通知 if action_obj.recover_notice: # 开启了恢复通知功能 action_operation_obj_list = action_obj.action_operations.all( ) for action_operation_obj in action_operation_obj_list: if old_alert_counter_dict[str( action_obj.id )][hostname][str( trigger_obj.id )]['counter'] >= action_operation_obj.step: action_func = getattr( action, '%s' % action_operation_obj. action_type) action_func( action_operation_obj= action_operation_obj, hostname=hostname, trigger_data=trigger_data, action_obj=action_obj ) # 通过反射发送相关恢复通知
def __init__(self, topology): self._name = None self._overlays = {} self._current_overlay = None self._topology_as_graphml = topology # Get a logger self._log = Logger.get_instance() # When create a Topology object, add it a Topology Overlay by default. self._add_topology_overlay()
def __init__(self): # Logger self._log = Logger.get_instance() # ConfigObj reference self._parser = None # System configuration parser self._system_parser = SystemParser() # Service parser factory self._factory_service_parser = FactoryServiceParser.get_instance() # The service specific parser self._service_parser = None # The factories for the creation of extractors and collectors self._factory_extractor = FactoryExtractor.get_instance() self._factory_collector = FactoryCollector.get_instance() # All services expressed in the configuration file. Each service is an instance of model.service.Service self._services = []
def __init__(self): # Get a logger self._log = Logger.get_instance() # Create the parser self._parser = Parser() # The topology self._topology = None # Factory loader. For each alternative, a new environment is loaded in accord with the alternative itself. self._loader = EnvironmentLoader() # ArgParse self._arg = argparse.ArgumentParser(description='Comparison Framework') self._arg.add_argument('-c', '--config-file', required=True, help='The framework configuration file.') self._arg.add_argument('-t', '--topology', required=True, help='The topology on which framework runs. It must be a GraphML file.')
def __init__(self): # Reference to the file to parse self._parser = XmlParser.parse('conf/system.xml') # Logger self._log = Logger.get_instance() # All services. This is a map<name, parser_adapter> self._services = {} # Mapping between services and alternatives. This is a map<service_name, list(alternative_name)> self._service_to_alternatives = {} # All alternatives. This is a map<name, alternative_adapter> self._alternatives = {} # All metrics. This is a list self._metrics = [] # Mapping between metric and collector adapter. This is a map<metric_name, collector_adapter_name> self._metric_to_collector = {} # Mapping between metric and extractor adapter. This is a map<metric_name, extractor_adapter_name> self._metric_to_extractor = {} # All environments. This is a map<env_name, adapter> self._environments = {} # Mapping between environments and alternatives. This is a map<env_name, list(alternative_name)> self._environment_to_alternatives = {}
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Created on Feb 23, 2012 @author: 章红兵 """ import os import time import glob import sys from utils.log import Logger sys.path.append("D:/") logger = Logger("==mse scan==") logger.setLevel(1) #windows def getstatusoutput(cmd): """Return (status, output) of executing cmd in a shell.""" import sys mswindows = (sys.platform == "win32") import os if not mswindows: cmd = '{ ' + cmd + '; }' pipe = os.popen(cmd + ' 2>&1', 'r')
import errno import socket import select import multiprocessing from inspect import currentframe workdir = os.path.dirname(os.path.realpath(__file__)) sys.path.insert(0, workdir + "/../") from utils.config import config from utils.log import Logger # 获取log配置文件 logconf = config('nbnet', 'log') # 调用日志模块 logger = Logger.getLogger() # debug开关,开启后记录debug日志 debug = logconf['debug'] class DebugLog(): '''debug日志模块会显示运行的文件和行数''' def __init__(self, file_path, debug=True): self.file_path = file_path self.debug = debug def get_linenumber(self): '''获取函数运行在一行''' cf = currentframe() return "run_line: file %s line %s " % (self.file_path, cf.f_back.f_back.f_lineno)
def __init__(self): # FileSystem handler self._fs = FileSystem.get_instance() # Logger self._log = Logger.get_instance()
def __init__(self, mininet_topology): Thread.__init__(self) # Logger self._log = Logger.get_instance() # The reference to the topology executing into mininet self._mininet_topology = mininet_topology
def __init__(self): self.driver = None self.is_synced = False self.logger = Logger() self.settings = Settings() self.valid_driver = self.__list_valid_driver()
def __init__(self, service_name): # Logger self._log = Logger.get_instance() # The service's name self._service_name = service_name
def __init__(self): # The reference to the FileSystem object, useful for writing configuration in tmp folder self._fs = FileSystem.get_instance() # Logger self._log = Logger.get_instance()
def __init__(self, name): # Logger self._log = Logger.get_instance() # The name of the alternative self._name = name
from utils.log import Logger logging = Logger.getLogger(__file__) from utils.config import ConfigManager import plivo PLIVO_CONFIG = "plivo_config" PLIVO_AUTHID = "plivo_authid" PLIVO_AUTHTOKEN = "plivo_authtoken" PLIVO_NUMBER = "plivo_number" PLIVO_URL = "plivo_url" PLIVO_ANSWERURL = "plivo_answerurl" class CallerService(object): def __init__(self): config_obj = ConfigManager.get_instance() self.config = config_obj.dataMap self.plivo_block = self.config.get(PLIVO_CONFIG,{}) def single_call_parameters(self,dest_number,source_number=None): call_params = {} call_params['from'] = source_number call_params['to'] = dest_number call_params['answer_url'] = self.plivo_block[PLIVO_URL] call_params['answer_method'] = "POST" if source_number == None: call_params['from'] = "+919999999999" plivo_call = plivo.RestAPI(self.plivo_block[PLIVO_AUTHID], self.plivo_block[PLIVO_AUTHTOKEN]) check_call = plivo_call.make_call(call_params)
def __init__(self): # Logger self._log = Logger.get_instance() # The reference to the extractor object self._extractor = None
def __init__(self): Thread.__init__(self) # Logger self._log = Logger.get_instance()
#! /usr/bin/env python from framework import ComparisonFramework from utils.log import Logger if __name__ == '__main__': log = Logger.get_instance() framework = ComparisonFramework() log.info('Runner', 'Framework starts.') framework.run()