def main(): path = Path('../data/full') train_X, train_X_str, train_Y, train_Y_str, test_X, test_X_str, test_Y, test_Y_str, word2index, tag2index, postag2index, index2tag = get_xy(path) feature2index = feature2idx(word2index, tag2index, postag2index) np.random.seed(1) theta = np.random.rand(len(feature2index)) print('************Test*************') epsilon, idx = 0.00000001, -2 test_gradient(theta, epsilon, loss_gradient, feature2index, feature_activation, train_X, train_Y, tag2index, idx, param=0.1) def callbackF(w): loss = get_loss_grad(w)[0] print('Loss:{0:.4f}'.format(loss)) def get_loss_grad(w): loss, grads = loss_gradient(w, feature2index, feature_activation, train_X, train_Y, tag2index) return loss, grads print('************Train*************') init_w = np.zeros(len(feature2index)) optimal_weight, final_loss, result_dict = fmin_l_bfgs_b(get_loss_grad, init_w, pgtol=0.01, callback=callbackF) path_weight = path/'best_weight_features2.npy' np.save(path_weight, optimal_weight) # optimal_weight = np.random.rand(len(feature2index)) path_output = path/'dev.p5.CRF.f3.out' viterbi_output(path_output, test_X, test_X_str, tag2index, feature2index, feature_activation, optimal_weight) prec, rec, f1 = eval(train_X, train_Y_str, tag2index, feature2index, feature_activation, optimal_weight) print('precision, recall, f1 on training set: {0} {1} {2}'.format(prec, rec, f1)) prec, rec, f1 = eval(test_X, test_Y_str, tag2index, feature2index, feature_activation, optimal_weight) print('precision, recall, f1 on test set: {0} {1} {2}'.format(prec, rec, f1))
def interpret_file(filename, env): """Interpret a list source file, returning the value of the last expression""" with open(filename, 'r') as f: source = f.read() results = [eval(ast, env) for ast in parse_multiple(source)] return results[-1]
def execute(line, context = execution_context): import core parsed = mexpr(line) if exists_eval_in_lisp(context): result = core.apply('*eval', to_lisp_list([parsed, context]), context) else: result = core.eval(parsed, context) string = sexpr(result) return (string, result)
restore=params.src_classifier_restore) tgt_encoder = init_model(net=LeNetEncoder(), restore=params.tgt_encoder_restore) critic = init_model(Discriminator(), restore=params.d_model_restore) # train source model print("=== Training classifier for source domain ===") if not (src_encoder.restored and src_classifier.restored and params.src_model_trained): src_encoder, src_classifier = train_src(src_encoder, src_classifier, src_data_loader, params) # eval source model print("=== Evaluating classifier for source domain ===") eval(src_encoder, src_classifier, src_data_loader) print("=== Evaluating classifier for target domain ===") eval(src_encoder, src_classifier, tgt_data_loader) # train target encoder by GAN print("=== Training encoder for target domain ===") # init weights of target encoder with those of source encoder if not tgt_encoder.restored: tgt_encoder.load_state_dict(src_encoder.state_dict()) if not (tgt_encoder.restored and critic.restored and params.tgt_model_trained): tgt_encoder = train_tgt(src_encoder, src_classifier, tgt_encoder, critic, src_data_loader, tgt_data_loader, params)
if opt.k_fold != 1: util.writelog( '------------------------------ k-fold:' + str(fold + 1) + ' ------------------------------', opt, True) core.fold = fold core.network_init() for epoch in range(opt.epochs): if opt.mode in ['classify_1d', 'classify_2d', 'autoencoder']: core.train(signals, labels, train_sequences[fold]) elif opt.mode in ['domain', 'domain_1d']: core.dann_train(signals, labels, train_sequences[fold], eval_sequences[fold]) core.eval(signals, labels, eval_sequences[fold]) core.epoch_save() core.check_remain_time() final_results[fold] = core.results #save result if opt.mode != 'autoencoder': fold_best_confusion_mat = core.results['confusion_mat'][ core.results['best_epoch']] final_confusion_mat += fold_best_confusion_mat if opt.k_fold != 1: util.writelog( 'fold' + str(fold + 1) + ' -> macro-prec,reca,F1,err,kappa: ' + str(statistics.report(fold_best_confusion_mat)), opt, True, True)
def interpret(exp, env=None): """Interpret a single Lisp expression""" exp = eval(parse(exp), env if env is not None else []) return unparse(exp)