def test_print_in_file_only_error_or_higher_levels(self): log = LogHelper() log.debug(self.DEBUG_MESSAGE) log.info(self.INFO_MESSAGE) log.warning(self.WARNING_MESSAGE) log.error(self.ERROR_MESSAGE) log.fatal(self.FATAL_MESSAGE) log_name = 'test_print_in_file_only_error_or_higher_levels.log' self.assertTrue(log.logs.__len__() == 5) self.assertEqual(log.save_logs(log_name, log_level=LogHelper.ERROR), True) with open(f'./{log_name}') as file: data = file.read() self.assertFalse(self.DEBUG_MESSAGE in data) self.assertFalse(self.INFO_MESSAGE in data) self.assertFalse(self.WARNING_MESSAGE in data) self.assertTrue(self.ERROR_MESSAGE in data) self.assertTrue(self.FATAL_MESSAGE in data)
def add_log(self, log): """Add a log to InfluxDB""" mapped_log = LogHelper().prepare_influx_insert_query(log) self.influx_client.write_points(mapped_log)
def __init__(self): LogHelper.log("created")
def __init__(self, module_dir, user='******'): BaseConfig.__init__(self) print("module dir: " + module_dir) self.test_config = BaseConfig() self.test_config.batch_size = 1 self.wv = self.test_config.wv # tf.flags.DEFINE_integer("embedding_dim_cn", 300, "Dimensionality of character embedding (default: 128)") # tf.flags.DEFINE_integer("batch_size_classify", 1, "Batch Size (default: 64)") # # self.FLAGS = tf.flags.FLAGS # self.FLAGS._parse_flags() # print("\nParameters:") # for attr, value in sorted(self.FLAGS.__dict__['__flags'].items()): # print("{}={}".format(attr.upper(), value)) # print("") checkpoint_dir = os.path.join(module_dir, "cn", "checkpoints") classes_file = codecs.open(os.path.join(module_dir, "cn", "classes"), "r", "utf-8") self.classes = list(line.strip() for line in classes_file.readlines()) classes_file.close() print("\nEvaluating...\n") # Evaluation # ================================================== checkpoint_file = tf.train.latest_checkpoint(checkpoint_dir) graph = tf.Graph() with graph.as_default(): with tf.device("/cpu:0"): session_conf = tf.ConfigProto( allow_soft_placement=self.test_config.allow_soft_placement, log_device_placement=self.test_config.log_device_placement) session_conf.gpu_options.allow_growth = True self.sess = tf.Session(config=session_conf) # Load the saved meta graph and restore variables saver = tf.train.import_meta_graph( "{}.meta".format(checkpoint_file)) saver.restore(self.sess, checkpoint_file) # Get the placeholders from the graph by name self.embedded_chars = graph.get_operation_by_name( "embedded_chars").outputs[0] # input_y = graph.get_operation_by_name("input_y").outputs[0] self.dropout_keep_prob = graph.get_operation_by_name( "dropout_keep_prob").outputs[0] # Tensors we want to evaluate self.scores = graph.get_operation_by_name( "output/scores").outputs[0] self.probabilities = graph.get_operation_by_name( "output/probabilities").outputs[0] this_file = inspect.getfile(inspect.currentframe()) dir_name = os.path.abspath(os.path.dirname(this_file)) self.chat_log_path = os.path.join(dir_name, '..', 'log/module/cnn_classify') if not os.path.exists(os.path.join(self.chat_log_path, user)): if not os.path.exists(self.chat_log_path): os.makedirs(self.chat_log_path) f = open(self.chat_log_path + '/' + user, 'w', encoding='utf-8') f.close() if not self.lh: self.lh = LogHelper(user, self.chat_log_path)