def show_eval(output_key): for mode in [TEST]: res = json.load( open(data_config.output_path(output_key, mode, EVALUATION), 'r')) print(mode) print_evaluation(res) for col in res[CONFUSION_MATRIX]: print(','.join(map(str, col)))
def eval_sub(input_filename): dataset = Processor.load_origin(input_filename) labels_predict = list(map(lambda _item: _item[-1], dataset)) labels_gold = load_label_list(data_config.path(FINAL, LABEL)) res = basic_evaluate(gold=labels_gold, pred=labels_predict) print_evaluation(res) for col in res[CONFUSION_MATRIX]: print(','.join(map(str, col))) print()
def main(dataset_key, label_version=None, config_path='config_svm.yaml', kernel='rbf'): """ python algo/svm.py main semeval2018_task3 A :param dataset_key: string :param label_version: string or None :param config_path: string :param kernel: string :return: """ pos_label = None if dataset_key == 'semeval2018_task3' and label_version == 'A': pos_label = 1 config_data = yaml.load(open(config_path)) train_config = Config(data=config_data) data_config = getattr( importlib.import_module('dataset.{}.config'.format(dataset_key)), 'config') datasets = load_dataset(data_config, train_config, label_version) if train_config.use_class_weights: class_weight = 'balanced' else: class_weight = None clf = svm.SVC(class_weight=class_weight, kernel=kernel) #clf = LogisticRegression(C=1., random_state=0, class_weight='balanced') X = datasets[TRAIN][FEATS] clf.fit(X=X, y=datasets[TRAIN][LABEL_GOLD]) if kernel == 'linear': coef = sorted(list(enumerate(clf.coef_.ravel())), key=lambda _item: math.fabs(_item[1])) coef = list(map(lambda _item: _item[0], coef)) print(coef) for mode in [TRAIN, TEST]: X = datasets[mode][FEATS] labels_predict = clf.predict(X=X) labels_gold = datasets[mode][LABEL_GOLD] res = basic_evaluate(gold=labels_gold, pred=labels_predict, pos_label=pos_label) print(mode) print_evaluation(res) print()
def m3(config_path='e83.yaml'): """ [Usage] python3 -m algo.ensemble93 main -e mv --build-analysis :param config_path: :return: """ config_data = yaml.load(open(config_path)) config = Config(data=config_data) for mode in [TEST, ]: labels_gold = load_label_list(data_config.path(mode, LABEL, 'B')) b_result = combine(output_keys=config.components(), mode=mode) b_vote = list(map(lambda _item: _item[0], b_result)) b0_result = dict() b0_vote = dict() last_vote = b_vote res = basic_evaluate(gold=labels_gold, pred=last_vote) print('{}'.format(mode)) print_evaluation(res) for col in res[CONFUSION_MATRIX]: print(','.join(map(str, col))) for i in [1, 2, 3]: key = 'b0{}'.format(i) thr = config.thr(key) b0_result[i] = combine(output_keys=config.components(key), mode=mode) new_vote = list() for l_v, b0_res in zip(last_vote, b0_result[i]): this_vote = 0 if b0_res[0] == 0 else i if l_v in {0, i} and b0_res[1] >= thr: new_vote.append(this_vote) else: new_vote.append(l_v) last_vote = new_vote res = basic_evaluate(gold=labels_gold, pred=new_vote) print('{} - {}'.format(mode, i)) print_evaluation(res) for col in res[CONFUSION_MATRIX]: print(','.join(map(str, col))) open('latest_ef83.label', 'w').write('\n'.join(list(map(str, last_vote))))
def show_eval(output_key): labels_predict = list() labels_gold = list() for mode in [TRAIN, TEST]: path = data_config.output_path(output_key, mode, LABEL_PREDICT) labels_predict += load_label_list(path) path = data_config.path(mode, LABEL) labels_gold += load_label_list(path) res = basic_evaluate(gold=labels_gold, pred=labels_predict) print_evaluation(res) for col in res[CONFUSION_MATRIX]: print(','.join(map(str, col)))
def main(config_path='e83.yaml'): """ [Usage] python3 -m algo.ensemble93 main -e mv --build-analysis :param config_path: :return: """ config_data = yaml.load(open(config_path)) config = Config(data=config_data) for mode in [TRAIN, TEST]: b_result = combine(output_keys=config.components('b'), mode=mode) b_vote = list(map(lambda _item: _item[0], b_result)) b2_result = combine(output_keys=config.components('b2'), mode=mode) b2_vote = list(map(lambda _item: _item[0], b2_result)) last_vote = list() for b_v, b2_v in zip(b_vote, b2_vote): if b_v == 0: label = 0 elif b2_v == 0: label = 1 else: label = 2 last_vote.append(label) b3_result = combine(output_keys=config.components('b3'), mode=mode) b3_vote = list(map(lambda _item: _item[0], b3_result)) labels_predict = list() for last_v, b3_v in zip(last_vote, b3_vote): if last_v != 2: label = last_v elif b3_v == 0: label = 2 else: label = 3 labels_predict.append(label) labels_gold = load_label_list(data_config.path(mode, LABEL, 'B')) res = basic_evaluate(gold=labels_gold, pred=labels_predict) print(mode) print_evaluation(res) for col in res[CONFUSION_MATRIX]: print(','.join(map(str, col)))
def show_eval(output_key): """ [Usage] python algo/main.py eval A_ntua_ek_1542454066 :param output_key: string :return: """ for mode in [TRAIN, VALID, TEST]: res = json.load( open(data_config.output_path(output_key, mode, EVALUATION))) print(mode) print_evaluation(res) for col in res[CONFUSION_MATRIX]: print(','.join(map(str, col))) print()
def show_eval(dataset_key, output_key): """ [Usage] python algo/main.py eval semeval2018_task3 -o A_ntua_ek_1542454066 :param dataset_key: string :param output_key: string :return: """ data_config = getattr( importlib.import_module('dataset.{}.config'.format(dataset_key)), 'config') for mode in [TRAIN, TEST]: res = json.load( open(data_config.output_path(output_key, mode, EVALUATION))) print(mode) print_evaluation(res) print(res)
def m3a(target=0, thr=1, config_path='e83a.yaml'): target = int(target) thr = int(thr) config_data = yaml.load(open(config_path)) config = Config(data=config_data) for mode in [TEST, ]: labels_gold = load_label_list(data_config.path(mode, LABEL, 'A')) b_result = combine(output_keys=config.components(), mode=mode) new_vote = list() for r in b_result: if r[0] == target and r[1] >= thr: new_vote.append(target) else: new_vote.append(1 - target) res = basic_evaluate(gold=labels_gold, pred=new_vote) print('{}'.format(mode)) print_evaluation(res) for col in res[CONFUSION_MATRIX]: print(','.join(map(str, col))) last_vote = new_vote output_keys = config.components('b') b_result, counts = combine(output_keys=output_keys, mode=mode, full_output=True) new_vote = list() for count, l_v in zip(counts, last_vote): if count[0] <= 1: new_vote.append(0) else: new_vote.append(l_v) res = basic_evaluate(gold=labels_gold, pred=new_vote) print('{}'.format(mode)) print_evaluation(res) for col in res[CONFUSION_MATRIX]: print(','.join(map(str, col)))
def show_eval(output_key): labels_predict_ = dict() labels_gold_ = dict() for mode in [TRAIN, TEST, FINAL]: path = data_config.output_path(output_key, mode, LABEL_PREDICT) labels_predict_[mode] = load_label_list(path) path = data_config.path(mode, LABEL) labels_gold_[mode] = load_label_list(path) print('TRAIN + TEST') res = basic_evaluate(gold=labels_gold_[TRAIN] + labels_gold_[TEST], pred=labels_predict_[TRAIN] + labels_predict_[TEST]) print_evaluation(res) for col in res[CONFUSION_MATRIX]: print(','.join(map(str, col))) print() print('FINAL') res = basic_evaluate(gold=labels_gold_[FINAL], pred=labels_predict_[FINAL]) print_evaluation(res) for col in res[CONFUSION_MATRIX]: print(','.join(map(str, col))) print()
def test_submit(filename_pred, filename_gold): pred = map(lambda _item: _item[-1], Processor.load_origin(filename_pred)) gold = map(lambda _item: _item[-1], Processor.load_origin(filename_gold)) res = basic_evaluate(pred, gold) print_evaluation(res)
def train(dataset_key, text_version, label_version=None, config_path='config.yaml'): """ python algo/main.py train semeval2018_task3 -l A -t ek python algo/main.py train semeval2018_task3 -l A -t ek -c config_ntua.yaml python algo/main.py train semeval2018_task3 -l A -t raw -c config_ntua_char.yaml python algo/main.py train semeval2019_task3_dev -t ek python algo/main.py train semeval2018_task1 -l love python algo/main.py train semeval2014_task9 :param dataset_key: string :param text_version: string :param label_version: string :param config_path: string :return: """ pos_label = None if dataset_key == 'semeval2018_task3' and label_version == 'A': pos_label = 1 config_data = yaml.load(open(config_path)) data_config = getattr( importlib.import_module('dataset.{}.config'.format(dataset_key)), 'config') output_key = '{}_{}_{}'.format(config_data['module'].rsplit('.', 1)[1], text_version, int(time.time())) if label_version is not None: output_key = '{}_{}'.format(label_version, output_key) print('OUTPUT_KEY: {}'.format(output_key)) # 准备输出路径的文件夹 data_config.prepare_output_folder(output_key=output_key) data_config.prepare_model_folder(output_key=output_key) shutil.copy(config_path, data_config.output_path(output_key, ALL, CONFIG)) # 根据配置加载模块 module_relative_path = config_data['module'] NNModel = getattr(importlib.import_module(module_relative_path), 'NNModel') NNConfig = getattr(importlib.import_module(module_relative_path), 'NNConfig') if config_data['analyzer'] == WORD: w2v_key = '{}_{}'.format(config_data['word']['w2v_version'], text_version) w2v_model_path = data_config.path(ALL, WORD2VEC, w2v_key) vocab_train_path = data_config.path(TRAIN, VOCAB, text_version) # 加载字典集 # 在模型中会采用所有模型中支持的词向量, 并为有足够出现次数的单词随机生成词向量 vocab_meta_list = load_vocab_list(vocab_train_path) vocab_meta_list += load_vocab_list( semeval2018_task3_date_config.path(TRAIN, VOCAB, text_version)) vocabs = [ _meta['t'] for _meta in vocab_meta_list if _meta['tf'] >= config_data[WORD]['min_tf'] ] # 加载词向量与相关数据 lookup_table, vocab_id_mapping, embedding_dim = load_lookup_table( w2v_model_path=w2v_model_path, vocabs=vocabs) json.dump( vocab_id_mapping, open(data_config.output_path(output_key, ALL, VOCAB_ID_MAPPING), 'w')) max_seq_len = MAX_WORD_SEQ_LEN elif config_data['analyzer'] == CHAR: texts = load_text_list(data_config.path(TRAIN, TEXT)) char_set = set() for text in texts: char_set |= set(text) lookup_table, vocab_id_mapping, embedding_dim = build_random_lookup_table( vocabs=char_set, dim=config_data['char']['embedding_dim']) max_seq_len = MAX_CHAR_SEQ_LEN else: raise ValueError('invalid analyzer: {}'.format( config_data['analyzer'])) # 加载训练数据 datasets, output_dim = load_dataset(data_config=data_config, analyzer=config_data['analyzer'], vocab_id_mapping=vocab_id_mapping, seq_len=max_seq_len, with_label=True, label_version=label_version, text_version=text_version) # 加载配置 nn_config = NNConfig(config_data) train_config = TrainConfig(config_data['train']) # 初始化数据集的检索 index_iterators = { mode: IndexIterator(datasets[mode][LABEL_GOLD]) for mode in [TRAIN, TEST] } # 按配置将训练数据切割成训练集和验证集 index_iterators[TRAIN].split_train_valid(train_config.valid_rate) # 计算各个类的权重 if train_config.use_class_weights: label_weight = { # 参考 sklearn 中 class_weight='balanced'的公式, 实验显示效果显着 _label: float(index_iterators[TRAIN].n_sample()) / (index_iterators[TRAIN].dim * len(_index)) for _label, _index in index_iterators[TRAIN].label_index.items() } else: label_weight = { _label: 1. for _label in range(index_iterators[TRAIN].dim) } # 基于加载的数据更新配置 nn_config.set_embedding_dim(embedding_dim) nn_config.set_output_dim(output_dim) nn_config.set_seq_len(max_seq_len) # 搭建神经网络 nn = NNModel(config=nn_config) nn.build_neural_network(lookup_table=lookup_table) batch_size = train_config.batch_size fetches = { mode: {_key: nn.var(_key) for _key in fetch_key[mode]} for mode in [TRAIN, TEST] } last_eval = {TRAIN: None, VALID: None, TEST: None} model_output_prefix = data_config.model_path(key=output_key) + '/model' best_res = {mode: None for mode in [TRAIN, VALID]} no_update_count = {mode: 0 for mode in [TRAIN, VALID]} max_no_update_count = 10 with tf.Session() as sess: sess.run(tf.global_variables_initializer()) saver = tf.train.Saver(tf.global_variables()) dataset = datasets[TRAIN] index_iterator = index_iterators[TRAIN] # 训练开始 ########################################################################## for epoch in range(train_config.epoch): print('== epoch {} =='.format(epoch)) # 利用训练集进行训练 print('TRAIN') n_sample = index_iterator.n_sample(TRAIN) labels_predict = list() labels_gold = list() for batch_index in index_iterator.iterate(batch_size, mode=TRAIN, shuffle=True): feed_dict = { nn.var(_key): dataset[_key][batch_index] for _key in feed_key[TRAIN] } feed_dict[nn.var(SAMPLE_WEIGHTS)] = list( map(label_weight.get, feed_dict[nn.var(LABEL_GOLD)])) feed_dict[nn.var(TEST_MODE)] = 0 res = sess.run(fetches=fetches[TRAIN], feed_dict=feed_dict) labels_predict += res[LABEL_PREDICT].tolist() labels_gold += dataset[LABEL_GOLD][batch_index].tolist() labels_predict, labels_gold = labels_predict[: n_sample], labels_gold[: n_sample] labels_predict, labels_gold = labels_predict[: n_sample], labels_gold[: n_sample] res = basic_evaluate(gold=labels_gold, pred=labels_predict, pos_label=pos_label) last_eval[TRAIN] = res print_evaluation(res) global_step = tf.train.global_step(sess, nn.var(GLOBAL_STEP)) if train_config.valid_rate == 0.: if best_res[TRAIN] is None or res[F1_SCORE] > best_res[TRAIN][ F1_SCORE]: best_res[TRAIN] = res no_update_count[TRAIN] = 0 saver.save(sess, save_path=model_output_prefix, global_step=global_step) else: no_update_count[TRAIN] += 1 else: if best_res[TRAIN] is None or res[F1_SCORE] > best_res[TRAIN][ F1_SCORE]: best_res[TRAIN] = res no_update_count[TRAIN] = 0 else: no_update_count[TRAIN] += 1 # 计算在验证集上的表现, 不更新模型参数 print('VALID') n_sample = index_iterator.n_sample(VALID) labels_predict = list() labels_gold = list() for batch_index in index_iterator.iterate(batch_size, mode=VALID, shuffle=False): feed_dict = { nn.var(_key): dataset[_key][batch_index] for _key in feed_key[TEST] } feed_dict[nn.var(TEST_MODE)] = 1 res = sess.run(fetches=fetches[TEST], feed_dict=feed_dict) labels_predict += res[LABEL_PREDICT].tolist() labels_gold += dataset[LABEL_GOLD][batch_index].tolist() labels_predict, labels_gold = labels_predict[: n_sample], labels_gold[: n_sample] res = basic_evaluate(gold=labels_gold, pred=labels_predict, pos_label=pos_label) last_eval[VALID] = res print_evaluation(res) # Early Stop if best_res[VALID] is None or res[F1_SCORE] > best_res[VALID][ F1_SCORE]: saver.save(sess, save_path=model_output_prefix, global_step=global_step) best_res[VALID] = res no_update_count[VALID] = 0 else: no_update_count[VALID] += 1 if no_update_count[TRAIN] >= max_no_update_count: break # 训练结束 ########################################################################## # 确保输出文件夹存在 print( '========================= BEST ROUND EVALUATION =========================' ) with tf.Session() as sess: prefix_checkpoint = tf.train.latest_checkpoint( data_config.model_path(key=output_key)) saver = tf.train.import_meta_graph('{}.meta'.format(prefix_checkpoint)) saver.restore(sess, prefix_checkpoint) nn = BaseNNModel(config=None) nn.set_graph(tf.get_default_graph()) for mode in [TRAIN, TEST]: dataset = datasets[mode] index_iterator = index_iterators[mode] n_sample = index_iterator.n_sample() prob_predict = list() labels_predict = list() labels_gold = list() hidden_feats = list() for batch_index in index_iterator.iterate(batch_size, shuffle=False): feed_dict = { nn.var(_key): dataset[_key][batch_index] for _key in feed_key[TEST] } feed_dict[nn.var(TEST_MODE)] = 1 res = sess.run(fetches=fetches[TEST], feed_dict=feed_dict) prob_predict += res[PROB_PREDICT].tolist() labels_predict += res[LABEL_PREDICT].tolist() hidden_feats += res[HIDDEN_FEAT].tolist() labels_gold += dataset[LABEL_GOLD][batch_index].tolist() prob_predict = prob_predict[:n_sample] labels_predict = labels_predict[:n_sample] labels_gold = labels_gold[:n_sample] hidden_feats = hidden_feats[:n_sample] if mode == TEST: res = basic_evaluate(gold=labels_gold, pred=labels_predict, pos_label=pos_label) best_res[TEST] = res # 导出隐藏层 with open(data_config.output_path(output_key, mode, HIDDEN_FEAT), 'w') as file_obj: for _feat in hidden_feats: file_obj.write('\t'.join(map(str, _feat)) + '\n') # 导出预测的label with open(data_config.output_path(output_key, mode, LABEL_PREDICT), 'w') as file_obj: for _label in labels_predict: file_obj.write('{}\n'.format(_label)) with open(data_config.output_path(output_key, mode, PROB_PREDICT), 'w') as file_obj: for _prob in prob_predict: file_obj.write('\t'.join(map(str, _prob)) + '\n') for mode in [TRAIN, VALID, TEST]: if mode == VALID and train_config.valid_rate == 0.: continue res = best_res[mode] print(mode) print_evaluation(res) json.dump( res, open(data_config.output_path(output_key, mode, EVALUATION), 'w')) print() print('OUTPUT_KEY: {}'.format(output_key))
def logistic_regression(dataset_key, text_version, label_version=None, use_class_weights=True): """ python algo/svm.py lr semeval2018_task3 -t ek -l A """ from sklearn.preprocessing import Normalizer from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer, HashingVectorizer data_config = getattr( importlib.import_module('dataset.{}.config'.format(dataset_key)), 'config') pos_label = None if dataset_key == 'semeval2018_task3' and label_version == 'A': pos_label = 1 datasets = dict() for mode in [TRAIN, TEST]: datasets[mode] = { TEXT: load_text_list(data_config.path(mode, TEXT, text_version)), LABEL: load_label_list(data_config.path(mode, LABEL, label_version)) } max_features = 10000 '''vectorizer = TfidfVectorizer( ngram_range=(1, 1), #tokenizer=lambda x: x.split(' '), #tokenizer=lambda x: x.split(' '), analyzer='word', min_df=5, # max_df=0.9, lowercase=False, use_idf=True, smooth_idf=True, max_features=max_features, sublinear_tf=True )''' vectorizer = TfidfVectorizer( ngram_range=(1, 6), analyzer='char', lowercase=False, smooth_idf=True, #sublinear_tf=True, max_features=50000) clf = LogisticRegression(C=1., random_state=0, class_weight='balanced') #clf = svm.SVC(C=0.6, random_state=0, kernel='linear', class_weight='balanced') #clf = svm.SVC(C=0.6, random_state=0, kernel='rbf', class_weight='balanced') pipeline = Pipeline([ ('vectorizer', vectorizer), #('normalizer', Normalizer(norm='l2')), ('clf', clf) ]) pipeline.fit(datasets[TRAIN][TEXT], datasets[TRAIN][LABEL]) for mode in [TRAIN, TEST]: labels_predict = pipeline.predict(datasets[mode][TEXT]) #print(labels_predict) labels_gold = datasets[mode][LABEL] res = basic_evaluate(gold=labels_gold, pred=labels_predict, pos_label=pos_label) print(mode) print_evaluation(res) print()
def main(label_version, ensemble_mode='mv', config_path='e83.yaml', build_analysis=False): """ [Usage] python algo/ensemble.py main -d semeval2018_task3 -l A -e mv python algo/ensemble.py main -d semeval2018_task3 -l A -e mv --build-analysis :param dataset_key: :param label_version: :param ensemble_mode: :param config_path: :param build_analysis: bool :return: """ dataset_key = 'semeval2018_task3' output_key = 'ensemble_{}_{}'.format(ensemble_mode, int(time.time())) pos_label = None if dataset_key == 'semeval2018_task3' and label_version == 'A': pos_label = 1 config_data = yaml.load(open(config_path)) config = Config(data=config_data) data_config = getattr( importlib.import_module('dataset.{}.config'.format(dataset_key)), 'config') labels_predict = dict() labels_gold = dict() n_sample = dict() for mode in [TRAIN, TEST]: label_path = data_config.path(mode, LABEL, label_version) labels_gold[mode] = load_label_list(label_path) n_sample[mode] = len(labels_gold[mode]) output_dim = max(labels_gold[TEST]) + 1 if ensemble_mode == SOFT_VOTING: for mode in [TRAIN, TEST]: components = dict() for output_key in config.components: path = data_config.output_path(output_key, mode, PROB_PREDICT) prob_list = list() with open(path) as file_obj: for line in file_obj: line = line.strip() if line == '': continue prob = list(map(float, line.split('\t'))) prob_list.append(prob) components[output_key] = prob_list labels = list() for i in range(n_sample[mode]): prob = np.zeros((output_dim, )) for output_key, prob_list in components.items(): prob += np.asarray(prob_list[i]) labels.append(np.argmax(prob)) labels_predict[mode] = labels elif ensemble_mode == MAJORITY_VOTING: components = dict() for mode in [TRAIN, TEST]: for output_key in config.components: path = data_config.output_path(output_key, mode, LABEL_PREDICT) label_list = list() with open(path) as file_obj: for line in file_obj: line = line.strip() if line == '': continue label = int(line) label_list.append(label) components[output_key] = label_list labels = list() for i in range(n_sample[mode]): prob = np.zeros((output_dim, )) for output_key, label_list in components.items(): label = label_list[i] prob[label] += 1 labels.append(np.argmax(prob)) labels_predict[mode] = labels elif ensemble_mode == WEIGHTED_MAJORITY_VOTE: raise NotImplementedError else: raise ValueError('unknown mode: {}'.format(ensemble_mode)) for mode in [TRAIN, TEST]: res = basic_evaluate(gold=labels_gold[mode], pred=labels_predict[mode], pos_label=pos_label) print(mode) print_evaluation(res) print() if build_analysis: output_path = data_config.path(mode, ANALYSIS, WRONG_PREDICT) text_list = load_text_list(data_config.path(mode, TEXT)) res = generate_wrong_prediction_report( labels_gold=labels_gold[mode], labels_predict=labels_predict[mode], text_list=text_list) with open(output_path, 'w') as file_obj: file_obj.write('gold\tpredict\ttext') for l_gold, l_predict, t in res: file_obj.write('{} {} {}\n'.format(l_gold, l_predict, t))
def train(model_name, label_version=None, label_key=None, config_path='c93f.yaml', check=False): """ python -m algo.main93_v2 train python3 -m algo.main93_v2 train -c config_ntua93.yaml :param model_name: string :param label_version: string :param config_path: string :return: """ text_version = 'ek' config_data = yaml.load(open(config_path)) NNModel = getattr( importlib.import_module('algo.m93.{}'.format(model_name)), 'NNModel') output_key = '{}_{}_{}'.format(model_name, text_version, int(time.time())) if label_key is not None: output_key = '{}_{}'.format(label_key, output_key) output_key = 'f_{}'.format(output_key) print('OUTPUT_KEY: {}'.format(output_key)) # 准备输出路径的文件夹 data_config.prepare_output_folder(output_key=output_key) data_config.prepare_model_folder(output_key=output_key) shutil.copy(config_path, data_config.output_path(output_key, ALL, CONFIG)) w2v_key = '{}_{}'.format(config_data['word']['w2v_version'], text_version) w2v_model_path = data_config.path(ALL, WORD2VEC, w2v_key) vocab_train_path = data_config.path(TRAIN, VOCAB, text_version) # 加载字典集 # 在模型中会采用所有模型中支持的词向量, 并为有足够出现次数的单词随机生成词向量 vocab_meta_list = load_vocab_list(vocab_train_path) vocabs = [ _meta['t'] for _meta in vocab_meta_list if _meta['tf'] >= config_data['word']['min_tf'] ] # 加载词向量与相关数据 lookup_table, vocab_id_mapping, embedding_dim = load_lookup_table2( w2v_model_path=w2v_model_path, vocabs=vocabs) json.dump( vocab_id_mapping, open(data_config.output_path(output_key, ALL, VOCAB_ID_MAPPING), 'w')) # 加载配置 nn_config = NNConfig(config_data) train_config = TrainConfig(config_data['train']) early_stop_metric = train_config.early_stop_metric # 加载训练数据 datasets = dict() datasets[TRAIN], output_dim = load_dataset( mode=[TRAIN, TEST], vocab_id_mapping=vocab_id_mapping, max_seq_len=nn_config.seq_len, label_version=label_version, sampling=train_config.train_sampling, label_map=train_config.label_map(label_key)) datasets[TEST], _ = load_dataset( mode=FINAL, vocab_id_mapping=vocab_id_mapping, max_seq_len=nn_config.seq_len, label_version=label_version, sampling=False, label_map=train_config.label_map(label_key)) # 初始化数据集的检索 index_iterators = { TRAIN: IndexIterator.from_dataset(datasets[TRAIN]), TEST: IndexIterator.from_dataset(datasets[TEST]) } # 按配置将训练数据切割成训练集和验证集 index_iterators[TRAIN].split_train_valid(train_config.valid_rate) # 计算各个类的权重 if train_config.use_class_weights: label_weight = { # 参考 sklearn 中 class_weight='balanced'的公式, 实验显示效果显着 _label: float(index_iterators[TRAIN].n_sample()) / (index_iterators[TRAIN].dim * len(_index)) for _label, _index in index_iterators[TRAIN].label_index.items() } else: label_weight = { _label: 1. for _label in range(index_iterators[TRAIN].dim) } # 基于加载的数据更新配置 nn_config.set_embedding_dim(embedding_dim) nn_config.set_output_dim(output_dim) # 搭建神经网络 nn = NNModel(config=nn_config) nn.build_neural_network(lookup_table=lookup_table) batch_size = train_config.batch_size fetches = { mode: {_key: nn.var(_key) for _key in fetch_key[mode]} for mode in [TRAIN, TEST] } model_output_prefix = data_config.model_path(key=output_key) + '/model' best_res = {mode: None for mode in [TRAIN, VALID, TEST]} no_update_count = {mode: 0 for mode in [TRAIN, VALID]} max_no_update_count = 10 eval_history = {TRAIN: list(), VALID: list(), TEST: list()} best_epoch = -1 best_epoch_test = -1 with tf.Session() as sess: sess.run(tf.global_variables_initializer()) saver = tf.train.Saver(tf.global_variables()) dataset = datasets[TRAIN] index_iterator = index_iterators[TRAIN] # 训练开始 ########################################################################## for epoch in range(train_config.epoch): print('== epoch {} = {} ='.format(epoch, output_key)) # 利用训练集进行训练 print('TRAIN') n_sample = index_iterator.n_sample(TRAIN) labels_predict = list() labels_gold = list() for batch_index in index_iterator.iterate(batch_size, mode=TRAIN, shuffle=True): feed_dict = { nn.var(_key): dataset[_key][batch_index] for _key in feed_key[TRAIN] } feed_dict[nn.var(SAMPLE_WEIGHTS)] = list( map(label_weight.get, feed_dict[nn.var(LABEL_GOLD)])) feed_dict[nn.var(TEST_MODE)] = 0 res = sess.run(fetches=fetches[TRAIN], feed_dict=feed_dict) labels_predict += res[LABEL_PREDICT].tolist() labels_gold += dataset[LABEL_GOLD][batch_index].tolist() labels_predict, labels_gold = labels_predict[: n_sample], labels_gold[: n_sample] res = basic_evaluate(gold=labels_gold, pred=labels_predict) print_evaluation(res) eval_history[TRAIN].append(res) global_step = tf.train.global_step(sess, nn.var(GLOBAL_STEP)) if train_config.valid_rate == 0.: if best_epoch <= 10 or (best_res[TRAIN] is None or res[early_stop_metric] > best_res[TRAIN][early_stop_metric]): best_epoch = epoch best_res[TRAIN] = res no_update_count[TRAIN] = 0 saver.save(sess, save_path=model_output_prefix, global_step=global_step) else: no_update_count[TRAIN] += 1 else: if best_res[TRAIN] is None or res[ early_stop_metric] > best_res[TRAIN][early_stop_metric]: best_res[TRAIN] = res no_update_count[TRAIN] = 0 else: no_update_count[TRAIN] += 1 # 计算在验证集上的表现, 不更新模型参数 print('VALID') n_sample = index_iterator.n_sample(VALID) labels_predict = list() labels_gold = list() for batch_index in index_iterator.iterate(batch_size, mode=VALID, shuffle=False): feed_dict = { nn.var(_key): dataset[_key][batch_index] for _key in feed_key[TEST] } feed_dict[nn.var(TEST_MODE)] = 1 res = sess.run(fetches=fetches[TEST], feed_dict=feed_dict) labels_predict += res[LABEL_PREDICT].tolist() labels_gold += dataset[LABEL_GOLD][batch_index].tolist() labels_predict, labels_gold = labels_predict[: n_sample], labels_gold[: n_sample] res = basic_evaluate(gold=labels_gold, pred=labels_predict) eval_history[VALID].append(res) print_evaluation(res) # Early Stop if best_epoch <= 10 or (best_res[VALID] is None or res[early_stop_metric] > best_res[VALID][early_stop_metric]): best_epoch = epoch saver.save(sess, save_path=model_output_prefix, global_step=global_step) best_res[VALID] = res no_update_count[VALID] = 0 else: no_update_count[VALID] += 1 # eval test _mode = TEST _dataset = datasets[_mode] _index_iterator = SimpleIndexIterator.from_dataset(_dataset) _n_sample = _index_iterator.n_sample() labels_predict = list() labels_gold = list() for batch_index in _index_iterator.iterate(batch_size, shuffle=False): feed_dict = { nn.var(_key): _dataset[_key][batch_index] for _key in feed_key[TEST] } feed_dict[nn.var(TEST_MODE)] = 1 res = sess.run(fetches=fetches[TEST], feed_dict=feed_dict) labels_predict += res[LABEL_PREDICT].tolist() labels_gold += _dataset[LABEL_GOLD][batch_index].tolist() labels_predict, labels_gold = labels_predict[: _n_sample], labels_gold[: _n_sample] res = basic_evaluate(gold=labels_gold, pred=labels_predict) eval_history[TEST].append(res) print('TEST') print_evaluation(res) if best_res[TEST] is None or res[F1_SCORE] > best_res[TEST][ F1_SCORE]: best_res[TEST] = res best_epoch_test = epoch if no_update_count[TRAIN] >= max_no_update_count: break # 训练结束 ########################################################################## # 确保输出文件夹存在 print( '========================= BEST ROUND EVALUATION =========================' ) json.dump(eval_history, open(data_config.output_path(output_key, 'eval', 'json'), 'w')) labels_predict_final = None labels_gold_final = load_label_list(data_config.path(FINAL, LABEL)) with tf.Session() as sess: prefix_checkpoint = tf.train.latest_checkpoint( data_config.model_path(key=output_key)) saver = tf.train.import_meta_graph('{}.meta'.format(prefix_checkpoint)) saver.restore(sess, prefix_checkpoint) nn = BaseNNModel(config=None) nn.set_graph(tf.get_default_graph()) for mode in [TRAIN, TEST, FINAL]: dataset = load_dataset(mode=mode, vocab_id_mapping=vocab_id_mapping, max_seq_len=nn_config.seq_len, with_label=False) index_iterator = SimpleIndexIterator.from_dataset(dataset) n_sample = index_iterator.n_sample() prob_predict = list() labels_predict = list() for batch_index in index_iterator.iterate(batch_size, shuffle=False): feed_dict = { nn.var(_key): dataset[_key][batch_index] for _key in feed_key[TEST] } feed_dict[nn.var(TEST_MODE)] = 1 res = sess.run(fetches=fetches[TEST], feed_dict=feed_dict) prob_predict += res[PROB_PREDICT].tolist() labels_predict += res[LABEL_PREDICT].tolist() prob_predict = prob_predict[:n_sample] labels_predict = labels_predict[:n_sample] if mode == FINAL: labels_predict_final = labels_predict # 导出预测的label with open(data_config.output_path(output_key, mode, LABEL_PREDICT), 'w') as file_obj: for _label in labels_predict: file_obj.write('{}\n'.format(_label)) with open(data_config.output_path(output_key, mode, PROB_PREDICT), 'w') as file_obj: for _prob in prob_predict: file_obj.write('\t'.join(map(str, _prob)) + '\n') print('====== best epoch test: {} ======'.format(best_epoch_test)) for mode in [TRAIN, VALID, TEST]: if mode == VALID and train_config.valid_rate == 0.: continue print(mode) res = eval_history[mode][best_epoch_test] print_evaluation(res) if mode == TEST: for col in res[CONFUSION_MATRIX]: print(','.join(map(str, col))) print(eval_history[TEST][best_epoch_test]) print() print('====== best epoch valid: {} ======'.format(best_epoch)) for mode in [TRAIN, VALID, TEST]: if mode == VALID and train_config.valid_rate == 0.: continue print(mode) res = eval_history[mode][best_epoch] print_evaluation(res) if mode == TEST: for col in res[CONFUSION_MATRIX]: print(','.join(map(str, col))) json.dump( res, open(data_config.output_path(output_key, mode, EVALUATION), 'w')) print() print(eval_history[TEST][best_epoch]) print() if check: print('====== label_map check ======') label_map = train_config.label_map(label_key) if label_map is not None: new_gold = list() new_pred = list() for g, p in zip(labels_gold_final, labels_predict_final): if g in label_map: new_gold.append(label_map[g]) new_pred.append(p) labels_gold_final = new_gold labels_predict_final = new_pred res = basic_evaluate(gold=labels_gold_final, pred=labels_predict_final) print_evaluation(res) for col in res[CONFUSION_MATRIX]: print(','.join(map(str, col))) print('OUTPUT_KEY: {}'.format(output_key))
def main(ensemble_mode, config_path='e93.yaml', final_output=None): """ [Usage] python3 -m algo.ensemble93 main -e mv --build-analysis :param ensemble_mode: :param config_path: :param final_output: string :return: """ config_data = yaml.load(open(config_path)) config = Config(data=config_data) labels_predict = dict() labels_predict_last = dict() labels_gold = dict() n_sample = dict() modes = { TRAIN: [TRAIN, TEST], FINAL: [ FINAL, ] } for mode in [TRAIN, FINAL]: labels = list() for _mode in modes[mode]: label_path = data_config.path(_mode, LABEL, None) labels += load_label_list(label_path) labels_gold[mode] = labels n_sample[mode] = len(labels) output_dim = max(labels_gold[TRAIN]) + 1 if ensemble_mode == SOFT_VOTING: for mode in [TRAIN, TEST]: components = dict() for output_key in config.components: path = data_config.output_path(output_key, mode, PROB_PREDICT) prob_list = list() with open(path) as file_obj: for line in file_obj: line = line.strip() if line == '': continue prob = list(map(float, line.split('\t'))) prob_list.append(prob) components[output_key] = prob_list labels = list() for i in range(n_sample[mode]): prob = np.zeros((output_dim, )) for output_key, prob_list in components.items(): prob += np.asarray(prob_list[i]) labels.append(np.argmax(prob)) labels_predict[mode] = labels elif ensemble_mode == MAJORITY_VOTING: for mode in [TRAIN, FINAL]: components = list() for output_key in config.components: label_list = list() for _mode in modes[mode]: path = data_config.output_path(output_key, _mode, LABEL_PREDICT) label_list += load_label_list(path) components.append(label_list) labels = list() for i in range(n_sample[mode]): prob = np.zeros((output_dim, )) for label_list in components: label = label_list[i] prob[label] += 1 labels.append(np.argmax(prob)) labels_predict[mode] = labels elif ensemble_mode == WEIGHTED_MAJORITY_VOTE: raise NotImplementedError else: raise ValueError('unknown mode: {}'.format(ensemble_mode)) for mode in [TRAIN, FINAL]: if mode == TRAIN: continue print('=== {} ==='.format(mode)) res = basic_evaluate(gold=labels_gold[mode], pred=labels_predict[mode]) print(mode) print_evaluation(res) for col in res[CONFUSION_MATRIX]: print(','.join(map(str, col))) print() n_sample = len(labels_predict[mode]) labels_predict_last[mode] = labels_predict[mode] # 修正HAS if config.tri_enabled: n_changed = 0 votes = [[0 for _ in range(4)] for _ in range(n_sample)] for output_key in config.tri: labels = list() for _mode in modes[mode]: path = data_config.output_path(output_key, _mode, LABEL_PREDICT) labels += load_label_list(path) if len(labels) != n_sample: raise Exception('mismatch {}({}) != {}'.format( output_key, len(labels), n_sample)) for i, label in enumerate(labels): votes[i][label] += 1 base = list() + labels_predict_last[mode] for i, vote in enumerate(votes): if base[i] != 0: arg_max = int(np.argmax(vote)) if arg_max != 0 and vote[arg_max] >= config.tri_min_vote: if base[i] != arg_max: n_changed += 1 base[i] = arg_max print('n_exchanged within "HAS": {}'.format(n_changed)) labels_predict_last[mode] = base res = basic_evaluate(gold=labels_gold[mode], pred=base) print(mode, '(after TRI)') print_evaluation(res) for col in res[CONFUSION_MATRIX]: print(','.join(map(str, col))) print() # 将判成HAS的样本修正为Others if config.others_enabled: votes = [0 for i in range(n_sample)] n_changed = 0 for output_key in config.others: labels = list() for _mode in modes[mode]: path = data_config.output_path(output_key, _mode, LABEL_PREDICT) labels += load_label_list(path) if len(labels) != n_sample: raise Exception('mismatch {}({}) != {}'.format( output_key, len(labels), n_sample)) for i, label in enumerate(labels): if label == 0: votes[i] += 1 if config.others_min_vote == 'all': min_vote = len(config.others) else: min_vote = int(config.others_min_vote) base = list() + labels_predict_last[mode] for i, vote in enumerate(votes): if vote >= min_vote: if base[i] != 0: n_changed += 1 base[i] = 0 print('n_changed to "OTHERS": {}'.format(n_changed)) labels_predict_last[mode] = base res = basic_evaluate(gold=labels_gold[mode], pred=base) print(mode, '(after OTHERS)') print_evaluation(res) for col in res[CONFUSION_MATRIX]: print(','.join(map(str, col))) print() if mode == FINAL and final_output is not None: first_line = open(data_config.path_train, 'r').readline() with open(final_output, 'w') as o_obj: o_obj.write(first_line) lines = open( data_config.path_test_no_labels).read().strip().split('\n') lines = lines[1:] lines = list(map(lambda l: l.strip(), lines)) labels = labels_predict_last[FINAL] labels = list(map(lambda l: label_str[l], labels)) assert len(labels) == len(lines) for line, label in zip(lines, labels): o_obj.write('{}\t{}\n'.format(line, label))
def tf_idf(dataset_key, text_version, label_version=None, use_class_weights=True): """ python algo/svm.py tf_idf semeval2018_task3 -t ek -l A """ from sklearn import preprocessing from sklearn.preprocessing import Normalizer from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer, HashingVectorizer data_config = getattr( importlib.import_module('dataset.{}.config'.format(dataset_key)), 'config') pos_label = None if dataset_key == 'semeval2018_task3' and label_version == 'A': pos_label = 1 datasets = dict() for mode in [TRAIN, TEST]: datasets[mode] = { TEXT: load_text_list(data_config.path(mode, TEXT, text_version)), LABEL: load_label_list(data_config.path(mode, LABEL, label_version)) } vectorizers = { #TF_IDF: TfidfVectorizer(ngram_range=(1, 3), min_df=0.01), TF: CountVectorizer( #tokenizer=lambda x: filter(lambda _t: not _t.startswith('</'), x.split(' ')), tokenizer=lambda x: x.split(' '), ngram_range=(1, 3), min_df=0.02, max_features=1000), TF_C: TfidfVectorizer(ngram_range=(1, 1), analyzer='char', lowercase=False, smooth_idf=True, sublinear_tf=True) } for key, vectorizer in vectorizers.items(): feat = vectorizer.fit_transform(datasets[TRAIN][TEXT]) datasets[TRAIN][key] = feat feat = vectorizer.transform(datasets[TEST][TEXT]) datasets[TEST][key] = feat if use_class_weights: class_weight = 'balanced' else: class_weight = None clf = svm.SVC(class_weight=class_weight) #clf = LogisticRegression(C=1., random_state=0, class_weight='balanced') X = hstack([datasets[TRAIN][k] for k in vectorizers.keys()]) #scaler = preprocessing.StandardScaler() #X = scaler.fit_transform(X=X.todense()) clf.fit(X=X, y=datasets[TRAIN][LABEL]) for mode in [TRAIN, TEST]: X = hstack([datasets[mode][k] for k in vectorizers.keys()]) #X = scaler.transform(X=X.todense()) labels_predict = clf.predict(X=X) labels_gold = datasets[mode][LABEL] res = basic_evaluate(gold=labels_gold, pred=labels_predict, pos_label=pos_label) print(mode) print_evaluation(res) print()
def train(text_version='ek', label_version=None, config_path='config93_naive.yaml'): """ python -m algo.main93_v2 train python3 -m algo.main93_v2 train -c config_ntua93.yaml :param text_version: string :param label_version: string :param config_path: string :return: """ config_data = yaml.load(open(config_path)) output_key = '{}_{}_{}'.format(NNModel.name, text_version, int(time.time())) if label_version is not None: output_key = '{}_{}'.format(label_version, output_key) print('OUTPUT_KEY: {}'.format(output_key)) # 准备输出路径的文件夹 data_config.prepare_output_folder(output_key=output_key) data_config.prepare_model_folder(output_key=output_key) shutil.copy(config_path, data_config.output_path(output_key, ALL, CONFIG)) w2v_key = '{}_{}'.format(config_data['word']['w2v_version'], text_version) w2v_model_path = data_config.path(ALL, WORD2VEC, w2v_key) vocab_train_path = data_config.path(TRAIN, VOCAB, text_version) # 加载字典集 # 在模型中会采用所有模型中支持的词向量, 并为有足够出现次数的单词随机生成词向量 vocab_meta_list = load_vocab_list(vocab_train_path) vocabs = [ _meta['t'] for _meta in vocab_meta_list if _meta['tf'] >= config_data['word']['min_tf'] ] # 加载词向量与相关数据 lookup_table, vocab_id_mapping, embedding_dim = load_lookup_table2( w2v_model_path=w2v_model_path, vocabs=vocabs) json.dump( vocab_id_mapping, open(data_config.output_path(output_key, ALL, VOCAB_ID_MAPPING), 'w')) # 加载配置 nn_config = NNConfig(config_data) train_config = TrainConfig(config_data['train']) early_stop_metric = train_config.early_stop_metric # 加载训练数据 datasets, output_dim = load_dataset(vocab_id_mapping=vocab_id_mapping, max_seq_len=nn_config.seq_len, with_label=True, label_version=label_version) # 初始化数据集的检索 index_iterators = { mode: IndexIterator(datasets[mode][LABEL_GOLD]) for mode in [TRAIN, TEST] } # 按配置将训练数据切割成训练集和验证集 index_iterators[TRAIN].split_train_valid(train_config.valid_rate) # 计算各个类的权重 if train_config.use_class_weights: label_weight = { # 参考 sklearn 中 class_weight='balanced'的公式, 实验显示效果显着 _label: float(index_iterators[TRAIN].n_sample()) / (index_iterators[TRAIN].dim * len(_index)) for _label, _index in index_iterators[TRAIN].label_index.items() } else: label_weight = { _label: 1. for _label in range(index_iterators[TRAIN].dim) } # 基于加载的数据更新配置 nn_config.set_embedding_dim(embedding_dim) nn_config.set_output_dim(output_dim) # 搭建神经网络 nn = NNModel(config=nn_config) nn.build_neural_network(lookup_table=lookup_table) batch_size = train_config.batch_size fetches = { mode: {_key: nn.var(_key) for _key in fetch_key[mode]} for mode in [TRAIN, TEST] } last_eval = {TRAIN: None, VALID: None, TEST: None} model_output_prefix = data_config.model_path(key=output_key) + '/model' best_res = {mode: None for mode in [TRAIN, VALID]} no_update_count = {mode: 0 for mode in [TRAIN, VALID]} max_no_update_count = 10 eval_history = {TRAIN: list(), DEV: list(), TEST: list()} with tf.Session() as sess: sess.run(tf.global_variables_initializer()) saver = tf.train.Saver(tf.global_variables()) dataset = datasets[TRAIN] index_iterator = index_iterators[TRAIN] # 训练开始 ########################################################################## for epoch in range(train_config.epoch): print('== epoch {} = {} ='.format(epoch, output_key)) # 利用训练集进行训练 print('TRAIN') n_sample = index_iterator.n_sample(TRAIN) labels_predict = list() labels_gold = list() for batch_index in index_iterator.iterate(batch_size, mode=TRAIN, shuffle=True): feed_dict = { nn.var(_key): dataset[_key][batch_index] for _key in feed_key[TRAIN] } feed_dict[nn.var(SAMPLE_WEIGHTS)] = list( map(label_weight.get, feed_dict[nn.var(LABEL_GOLD)])) feed_dict[nn.var(TEST_MODE)] = 0 if train_config.input_dropout_keep_prob < 1.: for _key in [TID_0, TID_1, TID_2]: var = nn.var(_key) _tids = feed_dict[var] feed_dict[var] = tid_dropout( _tids, train_config.input_dropout_keep_prob) res = sess.run(fetches=fetches[TRAIN], feed_dict=feed_dict) labels_predict += res[LABEL_PREDICT].tolist() labels_gold += dataset[LABEL_GOLD][batch_index].tolist() labels_predict, labels_gold = labels_predict[: n_sample], labels_gold[: n_sample] res = basic_evaluate(gold=labels_gold, pred=labels_predict) print_evaluation(res) eval_history[TRAIN].append(res) global_step = tf.train.global_step(sess, nn.var(GLOBAL_STEP)) if train_config.valid_rate == 0.: if best_res[TRAIN] is None or res[ early_stop_metric] > best_res[TRAIN][early_stop_metric]: best_res[TRAIN] = res no_update_count[TRAIN] = 0 saver.save(sess, save_path=model_output_prefix, global_step=global_step) else: no_update_count[TRAIN] += 1 else: if best_res[TRAIN] is None or res[ early_stop_metric] > best_res[TRAIN][early_stop_metric]: best_res[TRAIN] = res no_update_count[TRAIN] = 0 else: no_update_count[TRAIN] += 1 # 计算在验证集上的表现, 不更新模型参数 print('VALID') n_sample = index_iterator.n_sample(VALID) labels_predict = list() labels_gold = list() for batch_index in index_iterator.iterate(batch_size, mode=VALID, shuffle=False): feed_dict = { nn.var(_key): dataset[_key][batch_index] for _key in feed_key[TEST] } feed_dict[nn.var(TEST_MODE)] = 1 res = sess.run(fetches=fetches[TEST], feed_dict=feed_dict) labels_predict += res[LABEL_PREDICT].tolist() labels_gold += dataset[LABEL_GOLD][batch_index].tolist() labels_predict, labels_gold = labels_predict[: n_sample], labels_gold[: n_sample] res = basic_evaluate(gold=labels_gold, pred=labels_predict) eval_history[DEV].append(res) print_evaluation(res) # Early Stop if best_res[VALID] is None or res[ early_stop_metric] > best_res[VALID][early_stop_metric]: saver.save(sess, save_path=model_output_prefix, global_step=global_step) best_res[VALID] = res no_update_count[VALID] = 0 else: no_update_count[VALID] += 1 # eval test _mode = TEST _dataset = datasets[_mode] _index_iterator = index_iterators[_mode] _n_sample = _index_iterator.n_sample() labels_predict = list() labels_gold = list() for batch_index in _index_iterator.iterate(batch_size, shuffle=False): feed_dict = { nn.var(_key): _dataset[_key][batch_index] for _key in feed_key[TEST] } feed_dict[nn.var(TEST_MODE)] = 1 res = sess.run(fetches=fetches[TEST], feed_dict=feed_dict) labels_predict += res[LABEL_PREDICT].tolist() labels_gold += _dataset[LABEL_GOLD][batch_index].tolist() labels_predict, labels_gold = labels_predict[: _n_sample], labels_gold[: _n_sample] res = basic_evaluate(gold=labels_gold, pred=labels_predict) eval_history[TEST].append(res) if no_update_count[TRAIN] >= max_no_update_count: break # 训练结束 ########################################################################## # 确保输出文件夹存在 print( '========================= BEST ROUND EVALUATION =========================' ) json.dump(eval_history, open(data_config.output_path(output_key, 'eval', 'json'), 'w')) with tf.Session() as sess: prefix_checkpoint = tf.train.latest_checkpoint( data_config.model_path(key=output_key)) saver = tf.train.import_meta_graph('{}.meta'.format(prefix_checkpoint)) saver.restore(sess, prefix_checkpoint) nn = BaseNNModel(config=None) nn.set_graph(tf.get_default_graph()) for mode in [TRAIN, TEST]: dataset = datasets[mode] index_iterator = index_iterators[mode] n_sample = index_iterator.n_sample() prob_predict = list() labels_predict = list() labels_gold = list() hidden_feats = list() for batch_index in index_iterator.iterate(batch_size, shuffle=False): feed_dict = { nn.var(_key): dataset[_key][batch_index] for _key in feed_key[TEST] } feed_dict[nn.var(TEST_MODE)] = 1 res = sess.run(fetches=fetches[TEST], feed_dict=feed_dict) prob_predict += res[PROB_PREDICT].tolist() labels_predict += res[LABEL_PREDICT].tolist() hidden_feats += res[HIDDEN_FEAT].tolist() labels_gold += dataset[LABEL_GOLD][batch_index].tolist() prob_predict = prob_predict[:n_sample] labels_predict = labels_predict[:n_sample] labels_gold = labels_gold[:n_sample] hidden_feats = hidden_feats[:n_sample] if mode == TEST: res = basic_evaluate(gold=labels_gold, pred=labels_predict) best_res[TEST] = res # 导出隐藏层 with open(data_config.output_path(output_key, mode, HIDDEN_FEAT), 'w') as file_obj: for _feat in hidden_feats: file_obj.write('\t'.join(map(str, _feat)) + '\n') # 导出预测的label with open(data_config.output_path(output_key, mode, LABEL_PREDICT), 'w') as file_obj: for _label in labels_predict: file_obj.write('{}\n'.format(_label)) with open(data_config.output_path(output_key, mode, PROB_PREDICT), 'w') as file_obj: for _prob in prob_predict: file_obj.write('\t'.join(map(str, _prob)) + '\n') for mode in [TRAIN, VALID, TEST]: if mode == VALID and train_config.valid_rate == 0.: continue res = best_res[mode] print(mode) print_evaluation(res) json.dump( res, open(data_config.output_path(output_key, mode, EVALUATION), 'w')) print() test_score_list = map(lambda _item: _item['f1'], eval_history[TEST]) print('best test f1 reached: {}'.format(max(test_score_list))) print('OUTPUT_KEY: {}'.format(output_key))
def main(input_filename, config_path='e93.yaml', final_output=None): """ [Usage] python3 -m algo.ensemble93 main -e mv --build-analysis """ config_data = yaml.load(open(config_path)) config = Config(data=config_data) labels_gold = dict() labels_predict = dict() labels_predict_last = dict() dataset = Processor.load_origin(input_filename) labels_predict[FINAL] = list(map(lambda _item: _item[-1], dataset)) for mode in [ FINAL, ]: if not mode == FINAL: res = basic_evaluate(gold=labels_gold[mode], pred=labels_predict[mode]) print(mode) print_evaluation(res) for col in res[CONFUSION_MATRIX]: print(','.join(map(str, col))) print() n_sample = len(labels_predict[mode]) labels_predict_last[mode] = labels_predict[mode] # 修正HAS if config.tri_enabled: n_changed = 0 votes = [[0 for _ in range(4)] for _ in range(n_sample)] for output_key in config.tri: labels = list() for _mode in modes[mode]: path = data_config.output_path(output_key, _mode, LABEL_PREDICT) labels += load_label_list(path) if len(labels) != n_sample: raise Exception('mismatch {}({}) != {}'.format( output_key, len(labels), n_sample)) for i, label in enumerate(labels): votes[i][label] += 1 base = list() + labels_predict_last[mode] for i, vote in enumerate(votes): arg_max = int(np.argmax(vote)) if arg_max == 0: continue if base[i] != 0: if vote[arg_max] >= config.tri_min_vote: if base[i] != arg_max: n_changed += 1 base[i] = arg_max elif vote[arg_max] >= config.tri_out_vote: base[i] = arg_max n_changed += 1 print('n_exchanged within "HAS": {}'.format(n_changed)) labels_predict_last[mode] = base if not mode == FINAL: res = basic_evaluate(gold=labels_gold[mode], pred=base) print(mode, '(after TRI)') print_evaluation(res) for col in res[CONFUSION_MATRIX]: print(','.join(map(str, col))) print() # 将判成HAS的样本修正为Others if config.others_enabled: votes = [0 for _ in range(n_sample)] n_changed = 0 for output_key in config.others: labels = list() for _mode in modes[mode]: path = data_config.output_path(output_key, _mode, LABEL_PREDICT) labels += load_label_list(path) if len(labels) != n_sample: raise Exception('mismatch {}({}) != {}'.format( output_key, len(labels), n_sample)) for i, label in enumerate(labels): if label == 0: votes[i] += 1 if config.others_min_vote == 'all': min_vote = len(config.others) else: min_vote = int(config.others_min_vote) base = list() + labels_predict_last[mode] for i, vote in enumerate(votes): if vote >= min_vote: if base[i] != 0: n_changed += 1 base[i] = 0 print('n_changed to "OTHERS": {}'.format(n_changed)) labels_predict_last[mode] = base if not mode == FINAL: res = basic_evaluate(gold=labels_gold[mode], pred=base) print(mode, '(after OTHERS)') print_evaluation(res) for col in res[CONFUSION_MATRIX]: print(','.join(map(str, col))) print() if mode == FINAL and final_output is not None: labels = labels_predict_last[FINAL] export_final(final_output, labels)