logfile=LOG_FILE) rankings_train = prs.Rankings(train_theorems, model, params_data_trans, n_jobs=N_JOBS, logfile=LOG_FILE) rankings_test = prs.Rankings(test_theorems, model, params_data_trans, n_jobs=N_JOBS, logfile=LOG_FILE) params_atp_eval = {} proofs_train.update( prs.atp_evaluation(rankings_train, statements, params_atp_eval, dirpath=ATP_DIR, n_jobs=N_JOBS, logfile=LOG_FILE)) prs.utils.printline("STATS OF TRAINING PROOFS", logfile=LOG_FILE) proofs_train.print_stats(logfile=LOG_FILE) proofs_test = prs.atp_evaluation(rankings_test, statements, params_atp_eval, dirpath=ATP_DIR, n_jobs=N_JOBS, logfile=LOG_FILE) prs.utils.printline("STATS OF TEST PROOFS", logfile=LOG_FILE) proofs_test.print_stats(logfile=LOG_FILE) params_data_trans['level_of_negative_mining'] = 2 for i in range(10):
params_data_trans = { 'features': features, 'chronology': chronology, 'only_short_proofs': False } # randomly generated rankings rankings_random = prs.Rankings(theorems, model=None, params=params_data_trans, n_jobs=N_JOBS, logfile=LOG_FILE) proofs = prs.atp_evaluation(rankings_random, statements, dirpath=ATP_DIR, n_jobs=N_JOBS, logfile=LOG_FILE) for i in range(40): prs.utils.printline("ITERATION: {}".format(i), LOG_FILE) train_labels, train_array = prs.proofs_to_train(proofs, params_data_trans, n_jobs=N_JOBS, logfile=LOG_FILE) params_train = {} model = prs.train(train_labels, train_array, params=params_train, n_jobs=N_JOBS, logfile=LOG_FILE)