def __init__(self): self.__update_id = None # Token loading file = open("bot_token.txt", "r") self.__token = file.read() file.close() logger.debug("Token loading : ", self.__token) # Load bot information self.__req = requests.session() get_info = self.__request_API("getMe") if get_info["ok"] is True: self.id = get_info["result"]["id"] self.first_name = get_info["result"]["first_name"] self.username = get_info["result"]["username"] else: logger.critical("Incorrect Token") raise Exception("Incorrect Token !") self.s = sched.scheduler(time.time, time.sleep) self.task_count = 0 # Print bot information logger.info("Bot '", self.first_name, "' @", self.username, " | ID: ", self.id, " loaded successfully !")
def log(self, msg, level=1): """Internal logger - re-direct all messages to the project logger, critical messages""" #logger.levels = ['debug','info','warning','error','critical'] self.count_msg += 1 #print 'Received msg: %d' % self.count_msg #if self.print_bits & 1 or level==4 : print msg if level == 1: logger.info(msg, __name__) elif level == 4: logger.critical(msg, __name__) elif level == 0: logger.debug(msg, __name__) elif level == 2: logger.warning(msg, __name__) elif level == 3: logger.error(msg, __name__) else: logger.info(msg, __name__)
def log(self, msg, level=1) : """Internal logger - re-direct all messages to the project logger, critical messages""" #logger.levels = ['debug','info','warning','error','critical'] self.count_msg += 1 #print 'Received msg: %d' % self.count_msg #if self.print_bits & 1 or level==4 : print msg if level==1 : logger.info (msg, __name__) elif level==4 : logger.critical(msg, __name__) elif level==0 : logger.debug (msg, __name__) elif level==2 : logger.warning (msg, __name__) elif level==3 : logger.error (msg, __name__) else : logger.info (msg, __name__)
def __init__(self): time_check_query="""SELECT CASE WHEN (SELECT COUNT(1) FROM log_time WHERE hour_truncate=(SELECT date_trunc('hour',now())::timestamp without time zone)) > 0 THEN NULL ELSE LOCALTIMESTAMP END AS actual_time,date_trunc('hour',LOCALTIMESTAMP) AS hour_truncate""" self.cursor.execute(time_check_query) time_data=self.cursor.fetchone() if not time_data[0]: logger.critical('Appropriate record for "{0}" already exists'.format(time_data[1])) self.cursor.close() database.db_conn.close() exit() # logger.debug('Log time obtained. Actual Time: {0}\tHour Truncate: {1}'.format(time_data[0],time_data[1])) self.actual_time=time_data[0] self.hour_truncate=time_data[1] self.cursor.execute("INSERT INTO log_time (hour_truncate,actual_time) VALUES ('{0}','{1}') RETURNING id".format(time_data[1],time_data[0])) self.id=self.cursor.fetchone()[0]
def save_log_file(self): logfname = fnm.log_file() msg = 'See details in log-file: %s' % logfname #self.log(msg,4) # set it 4-critical - always print logger.critical(msg) # critical - always print logger.saveLogInFile(logfname)
def save_log_file(self) : logfname = fnm.log_file() msg = 'See details in log-file: %s' % logfname #self.log(msg,4) # set it 4-critical - always print logger.critical(msg) # critical - always print logger.saveLogInFile(logfname)
def to_model(self, input_shape, name="default_for_op", kernel_regularizer_l2=0.01): # with graph.as_default(): # with tf.name_scope(name) as scope: graph_helper = self.copy() assert nx.is_directed_acyclic_graph(graph_helper) topo_nodes = nx.topological_sort(graph_helper) input_tensor = Input(shape=input_shape) for node in topo_nodes: pre_nodes = graph_helper.predecessors(node) suc_nodes = graph_helper.successors(node) if node.type not in ['Concatenate', 'Add', 'Multiply']: if len(pre_nodes) == 0: layer_input_tensor = input_tensor else: assert len(pre_nodes) == 1 layer_input_tensor = graph_helper[pre_nodes[0]][node]['tensor'] if node.type == 'Conv2D': kernel_size = node.config.get('kernel_size', 3) filters = node.config['filters'] layer = Conv2D(kernel_size=kernel_size, filters=filters, name=node.name, padding='same', kernel_regularizer=regularizers.l2(kernel_regularizer_l2) ) elif node.type == 'Conv2D_Pooling': kernel_size = node.config.get('kernel_size', 3) filters = node.config['filters'] layer = self.conv_pooling_layer(name=node.name, kernel_size=kernel_size, filters=filters, kernel_regularizer_l2=kernel_regularizer_l2) elif node.type == 'Group': layer = self.group_layer(name=node.name, group_num=node.config['group_num'], filters=node.config['filters'], kernel_regularizer_l2=kernel_regularizer_l2) elif node.type == 'GlobalMaxPooling2D': layer = keras.layers.GlobalMaxPooling2D(name=node.name) elif node.type == 'MaxPooling2D': layer = keras.layers.MaxPooling2D(name=node.name) elif node.type == 'AveragePooling2D': layer = keras.layers.AveragePooling2D(name=node.name) elif node.type == 'Activation': activation_type = node.config['activation_type'] layer = Activation(activation=activation_type, name=node.name) layer_output_tensor = layer(layer_input_tensor) if node.type in ['Conv2D', 'Conv2D_Pooling', 'Group']: self.update(), graph_helper.update() if node.type == 'Conv2D': layer_output_tensor = PReLU()(layer_output_tensor) # MAX_DP, MIN_DP = .35, .01 # ratio_dp = - (MAX_DP - MIN_DP) / self.max_depth * node.depth + MAX_DP # use fixed drop out ratio ratio_dp = 0.30 layer_output_tensor = keras.layers.Dropout(ratio_dp)(layer_output_tensor) # logger.debug('layer {} ratio of dropout {}'.format(node.name, ratio_dp)) # for test, use batch norm #layer_output_tensor = keras.layers.BatchNormalization(axis = 3)(layer_output_tensor) else: layer_input_tensors = [graph_helper[pre_node][node]['tensor'] for pre_node in pre_nodes] if node.type == 'Add': # todo also test multiply assert K.image_data_format() == 'channels_last' ori_shapes = [ktf.int_shape(layer_input_tensor)[1:3] for layer_input_tensor in layer_input_tensors] ori_shapes = np.array(ori_shapes) new_shape = ori_shapes.min(axis=0) ori_chnls = [ktf.int_shape(layer_input_tensor)[3] for layer_input_tensor in layer_input_tensors] ori_chnls = np.array(ori_chnls) new_chnl = ori_chnls.min() for ind, layer_input_tensor, ori_shape in \ zip(range(len(layer_input_tensors)), layer_input_tensors, ori_shapes): diff_shape = ori_shape - new_shape if diff_shape.any(): diff_shape += 1 layer_input_tensors[ind] = \ keras.layers.MaxPool2D(pool_size=diff_shape, strides=1, name=node.name + '_maxpool2d')( layer_input_tensor) if ori_chnls[ind] > new_chnl: layer_input_tensors[ind] = \ Conv2D(filters=new_chnl, kernel_size=1, padding='same', name=node.name + '_conv2d')(layer_input_tensor) layer = keras.layers.Add(name=node.name) # logger.debug('In graph to_model add a Add layer with name {}'.format(node.name)) if node.type == 'Concatenate': logger.critical('Concatenate is decrapted!!!') if K.image_data_format() == "channels_last": (width_ind, height_ind, chn_ind) = (1, 2, 3) else: (width_ind, height_ind, chn_ind) = (2, 3, 1) ori_shapes = [ ktf.int_shape(layer_input_tensor)[width_ind:height_ind + 1] for layer_input_tensor in layer_input_tensors ] ori_shapes = np.array(ori_shapes) new_shape = ori_shapes.min(axis=0) for ind, layer_input_tensor, ori_shape in \ zip(range(len(layer_input_tensors)), layer_input_tensors, ori_shapes): diff_shape = ori_shape - new_shape if diff_shape.all(): diff_shape += 1 layer_input_tensors[ind] = \ keras.layers.MaxPool2D(pool_size=diff_shape, strides=1)(layer_input_tensor) # todo custom div layer # def div2(x): # return x / 2. # layer_input_tensors = [keras.layers.Lambda(div2)(tensor) for tensor in layer_input_tensors] layer = keras.layers.Concatenate(axis=chn_ind, name=node.name) try: layer_output_tensor = layer(layer_input_tensors) except: print("create intput output layer error!") #embed() graph_helper.add_node(node, layer=layer) if len(suc_nodes) == 0: output_tensor = layer_output_tensor else: for suc_node in suc_nodes: graph_helper.add_edge(node, suc_node, tensor=layer_output_tensor) # assert tf.get_default_graph() == graph, "should be same" # tf.train.export_meta_graph('tmp.pbtxt', graph_def=tf.get_default_graph().as_graph_def()) assert 'output_tensor' in locals() import time tic = time.time() model = Model(inputs=input_tensor, outputs=output_tensor) logger.info('Consume Time(Just Build model: {}'.format(time.time() - tic)) return model
#!/usr/bin/python3 import time import traceback from requests.exceptions import ConnectionError from Bot import Bot from Logger import logger # Dev app - pooling messages if __name__ == '__main__': bot = Bot() try: while True: try: bot.pool_message() except ConnectionError: pass except Exception as e: logger.critical("FATAL ERROR ! Crash handle :\n", traceback.format_exc()) time.sleep(2) except KeyboardInterrupt: logger.info("Exiting")