Precision_negative /= samples_with_negative_judgement Precision_positivie /= samples_with_positive_judgement msg += "samples_with_negative_judgement: {}\n".format( samples_with_negative_judgement ) msg += "samples_with_positive_judgement: {}\n".format( samples_with_positive_judgement ) msg += "MRR_negative: {}\n".format(MRR_negative) msg += "MRR_positive: {}\n".format(MRR_positive) msg += "Precision_negative: {}\n".format(Precision_negative) msg += "Precision_positivie: {}\n".format(Precision_positivie) logger.info("\n" + msg + "\n") print("\n" + msg + "\n") # dump pickle with the result of the experiment all_results = dict( list_of_results=list_of_results, global_MRR=MRR, global_P_at_10=Precision ) with open("{}/result.pkl".format(log_directory), "wb") as f: pickle.dump(all_results, f) return Precision1 if __name__ == "__main__": parser = options.get_eval_KB_completion_parser() args = options.parse_args(parser) main(args)
sentences = [[json_test_official[index]['claim']]] print("\n{}:".format(model_name)) contextual_embeddings, sentence_lengths, tokenized_text_list = model.get_contextual_embeddings( sentences) x_test_official[index] = contextual_embeddings[11][0][0] print(tokenized_text_list) return (x_train, json_train, x_test, json_test, x_test_official, json_test_official) if __name__ == '__main__': parser = options.get_general_parser() args = options.parse_args(parser) #We pass the command --lm bert x_train, json_train, x_test, json_test, x_test_official, json_test_official = main( args) #Save the datasets #We create a numpy array with the labels of the datasets y_train = np.zeros((len(json_train), 1)) for index in range(len(json_train)): if json_train[index]['label'] == 'SUPPORTS': y_train[index] = 1 else: y_train[index] = 0 y_train = np.ravel(y_train) #Reshaping the array y_train = y_train.astype(int) #Convert the array into int64 #We do the same for the test dataset