def main(train_path, test_path, results_file_path, sigmorphon_root_dir, input_dim, hidden_dim, epochs, layers, optimization, feat_input_dim): hyper_params = {'INPUT_DIM': input_dim, 'HIDDEN_DIM': hidden_dim, 'EPOCHS': epochs, 'LAYERS': layers, 'MAX_PREDICTION_LEN': MAX_PREDICTION_LEN, 'OPTIMIZATION': optimization} print 'train path = ' + str(train_path) print 'test path =' + str(test_path) for param in hyper_params: print param + '=' + str(hyper_params[param]) # load data (train_target_words, train_source_words, train_target_feat_dicts, train_source_feat_dicts) = prepare_sigmorphon_data.load_data( train_path, 2) (test_target_words, test_source_words, test_target_feat_dicts, test_source_feat_dicts) = prepare_sigmorphon_data.load_data( test_path, 2) alphabet, feature_types = prepare_sigmorphon_data.get_alphabet(train_target_words, train_source_words, train_target_feat_dicts, train_source_feat_dicts) # used for character dropout alphabet.append(NULL) alphabet.append(UNK) # used during decoding alphabet.append(EPSILON) alphabet.append(BEGIN_WORD) alphabet.append(END_WORD) feature_alphabet = common.get_feature_alphabet(train_source_feat_dicts + train_target_feat_dicts) feature_alphabet.append(UNK_FEAT) # add indices to alphabet - used to indicate when copying from lemma to word for marker in [str(i) for i in xrange(MAX_PREDICTION_LEN)]: alphabet.append(marker) # feat 2 int feat_index = dict(zip(feature_alphabet, range(0, len(feature_alphabet)))) # char 2 int alphabet_index = dict(zip(alphabet, range(0, len(alphabet)))) inverse_alphabet_index = {index: char for char, index in alphabet_index.items()} # cluster the data by POS type (features) # TODO: do we need to cluster on both source and target feats? # probably enough to cluster on source here becasue pos will be same # (no derivational morphology in this task) train_cluster_to_data_indices = common.cluster_data_by_pos(train_source_feat_dicts) test_cluster_to_data_indices = common.cluster_data_by_pos(test_source_feat_dicts) # cluster the data by inflection type (features) # train_cluster_to_data_indices = common.cluster_data_by_morph_type(train_feat_dicts, feature_types) # test_cluster_to_data_indices = common.cluster_data_by_morph_type(test_feat_dicts, feature_types) accuracies = [] final_results = {} # factored model: new model per inflection type for cluster_index, cluster_type in enumerate(train_cluster_to_data_indices): # get the inflection-specific data train_cluster_target_words = [train_target_words[i] for i in train_cluster_to_data_indices[cluster_type]] if len(train_cluster_target_words) < 1: print 'only ' + str(len(train_cluster_target_words)) + ' samples for this inflection type. skipping' continue else: print 'now evaluating model for cluster ' + str(cluster_index + 1) + '/' + \ str(len(train_cluster_to_data_indices)) + ': ' + cluster_type + ' with ' + \ str(len(train_cluster_target_words)) + ' examples' # test best model try: test_cluster_source_words = [test_source_words[i] for i in test_cluster_to_data_indices[cluster_type]] test_cluster_target_words = [test_target_words[i] for i in test_cluster_to_data_indices[cluster_type]] test_cluster_source_feat_dicts = [test_source_feat_dicts[i] for i in test_cluster_to_data_indices[cluster_type]] test_cluster_target_feat_dicts = [test_target_feat_dicts[i] for i in test_cluster_to_data_indices[cluster_type]] # load best model best_model, encoder_frnn, encoder_rrnn, decoder_rnn = load_best_model(str(cluster_index), alphabet, results_file_path, input_dim, hidden_dim, layers, feature_alphabet, feat_input_dim, feature_types) predicted_templates = task2_joint_structured_inflection_feedback_fix.predict_templates(best_model, decoder_rnn, encoder_frnn, encoder_rrnn, alphabet_index, inverse_alphabet_index, test_cluster_source_words, test_cluster_source_feat_dicts, test_cluster_target_feat_dicts, feat_index, feature_types) accuracy = task2_joint_structured_inflection_feedback_fix.evaluate_model(predicted_templates, test_cluster_source_words, test_cluster_source_feat_dicts, test_cluster_target_words, test_cluster_target_feat_dicts, feature_types, print_results=False) accuracies.append(accuracy) # get predicted_templates in the same order they appeared in the original file # iterate through them and foreach concat morph, lemma, features in order to print later in the task format for i in test_cluster_to_data_indices[cluster_type]: joint_index = test_source_words[i] + ':' + common.get_morph_string(test_source_feat_dicts[i], feature_types) \ + ':' + common.get_morph_string(test_target_feat_dicts[i], feature_types) inflection = task2_joint_structured_inflection_feedback_fix.instantiate_template(predicted_templates[joint_index], test_source_words[i]) final_results[i] = (test_source_words[i], test_source_feat_dicts[i], inflection, test_target_feat_dicts[i]) except KeyError: print 'could not find relevant examples in test data for cluster: ' + cluster_type accuracy_vals = [accuracies[i][1] for i in xrange(len(accuracies))] macro_avg_accuracy = sum(accuracy_vals) / len(accuracies) print 'macro avg accuracy: ' + str(macro_avg_accuracy) mic_nom = sum([accuracies[i][0] * accuracies[i][1] for i in xrange(len(accuracies))]) mic_denom = sum([accuracies[i][0] for i in xrange(len(accuracies))]) micro_average_accuracy = mic_nom / mic_denom print 'micro avg accuracy: ' + str(micro_average_accuracy) if 'test' in test_path: suffix = '.best.test' else: suffix = '.best' task2_joint_structured_inflection.write_results_file(hyper_params, micro_average_accuracy, train_path, test_path, results_file_path + suffix, sigmorphon_root_dir, final_results)
def main(train_path, test_path, results_file_path, sigmorphon_root_dir, input_dim, hidden_dim, epochs, layers, optimization, feat_input_dim, nbest): hyper_params = { 'INPUT_DIM': input_dim, 'HIDDEN_DIM': hidden_dim, 'EPOCHS': epochs, 'LAYERS': layers, 'MAX_PREDICTION_LEN': MAX_PREDICTION_LEN, 'OPTIMIZATION': optimization, 'NBEST': nbest } print 'train path = ' + str(train_path) print 'test path =' + str(test_path) for param in hyper_params: print param + '=' + str(hyper_params[param]) # load data (train_target_words, train_source_words, train_target_feat_dicts, train_source_feat_dicts) = prepare_sigmorphon_data.load_data( train_path, 2) (test_target_words, test_source_words, test_target_feat_dicts, test_source_feat_dicts) = prepare_sigmorphon_data.load_data(test_path, 2) alphabet, feature_types = prepare_sigmorphon_data.get_alphabet( train_target_words, train_source_words, train_target_feat_dicts, train_source_feat_dicts) # used for character dropout alphabet.append(NULL) alphabet.append(UNK) # used during decoding alphabet.append(EPSILON) alphabet.append(BEGIN_WORD) alphabet.append(END_WORD) feature_alphabet = common.get_feature_alphabet(train_source_feat_dicts + train_target_feat_dicts) feature_alphabet.append(UNK_FEAT) # add indices to alphabet - used to indicate when copying from lemma to word for marker in [str(i) for i in xrange(MAX_PREDICTION_LEN)]: alphabet.append(marker) # feat 2 int feat_index = dict(zip(feature_alphabet, range(0, len(feature_alphabet)))) # char 2 int alphabet_index = dict(zip(alphabet, range(0, len(alphabet)))) inverse_alphabet_index = { index: char for char, index in alphabet_index.items() } # cluster the data by POS type (features) # TODO: do we need to cluster on both source and target feats? # probably enough to cluster on source here becasue pos will be same # (no derivational morphology in this task) # train_cluster_to_data_indices = common.cluster_data_by_pos(train_source_feat_dicts) # test_cluster_to_data_indices = common.cluster_data_by_pos(test_source_feat_dicts) # cluster the data by inflection type (features) # train_cluster_to_data_indices = common.cluster_data_by_morph_type(train_feat_dicts, feature_types) # test_cluster_to_data_indices = common.cluster_data_by_morph_type(test_feat_dicts, feature_types) # no clustering, single model train_cluster_to_data_indices = common.get_single_pseudo_cluster( train_source_feat_dicts) test_cluster_to_data_indices = common.get_single_pseudo_cluster( test_source_feat_dicts) accuracies = [] final_results = {} # factored model: new model per inflection type for cluster_index, cluster_type in enumerate( train_cluster_to_data_indices): # get the inflection-specific data train_cluster_target_words = [ train_target_words[i] for i in train_cluster_to_data_indices[cluster_type] ] if len(train_cluster_target_words) < 1: print 'only ' + str( len(train_cluster_target_words )) + ' samples for this inflection type. skipping' continue else: print 'now evaluating model for cluster ' + str(cluster_index + 1) + '/' + \ str(len(train_cluster_to_data_indices)) + ': ' + cluster_type + ' with ' + \ str(len(train_cluster_target_words)) + ' examples' # test best model test_cluster_source_words = [ test_source_words[i] for i in test_cluster_to_data_indices[cluster_type] ] test_cluster_target_words = [ test_target_words[i] for i in test_cluster_to_data_indices[cluster_type] ] test_cluster_source_feat_dicts = [ test_source_feat_dicts[i] for i in test_cluster_to_data_indices[cluster_type] ] test_cluster_target_feat_dicts = [ test_target_feat_dicts[i] for i in test_cluster_to_data_indices[cluster_type] ] # load best model best_model, encoder_frnn, encoder_rrnn, decoder_rnn = load_best_model( str(cluster_index), alphabet, results_file_path, input_dim, hidden_dim, layers, feature_alphabet, feat_input_dim, feature_types) lang = train_path.split('/')[-1].replace('-task{0}-train'.format('1'), '') # handle greedy prediction if nbest == 1: is_nbest = False predicted_templates = task2_ms2s.predict_templates( best_model, decoder_rnn, encoder_frnn, encoder_rrnn, alphabet_index, inverse_alphabet_index, test_cluster_source_words, test_cluster_source_feat_dicts, test_cluster_target_feat_dicts, feat_index, feature_types) accuracy = task2_ms2s.evaluate_model( predicted_templates, test_cluster_source_words, test_cluster_source_feat_dicts, test_cluster_target_words, test_cluster_target_feat_dicts, feature_types, print_results=False) accuracies.append(accuracy) print '{0} {1} accuracy: {2}'.format(lang, cluster_type, accuracy[1]) # get predicted_templates in the same order they appeared in the original file # iterate through them and foreach concat morph, lemma, features in order to print later in the task format for i in test_cluster_to_data_indices[cluster_type]: joint_index = test_source_words[i] + ':' + common.get_morph_string(test_source_feat_dicts[i], feature_types) \ + ':' + common.get_morph_string(test_target_feat_dicts[i], feature_types) inflection = task2_ms2s.instantiate_template( predicted_templates[joint_index], test_source_words[i]) final_results[i] = (test_source_words[i], test_source_feat_dicts[i], inflection, test_target_feat_dicts[i]) micro_average_accuracy = accuracy[1] # handle n-best prediction else: is_nbest = True predicted_nbset_templates = task2_ms2s.predict_nbest_templates( best_model, decoder_rnn, encoder_frnn, encoder_rrnn, alphabet_index, inverse_alphabet_index, test_cluster_source_words, test_cluster_source_feat_dicts, test_cluster_target_feat_dicts, feat_index, feature_types, nbest, test_cluster_target_words) # get predicted_templates in the same order they appeared in the original file # iterate through them and foreach concat morph, lemma, features in order to print later in the task format for i in test_cluster_to_data_indices[cluster_type]: joint_index = test_source_words[i] + ':' + common.get_morph_string(test_source_feat_dicts[i], feature_types) \ + ':' + common.get_morph_string(test_target_feat_dicts[i], feature_types) nbest_inflections = [] templates = [ t for (t, p) in predicted_nbset_templates[joint_index] ] for template in templates: nbest_inflections.append( task2_ms2s.instantiate_template( template, test_source_words[i])) final_results[i] = (test_source_words[i], test_source_feat_dicts[i], nbest_inflections, test_target_feat_dicts[i]) micro_average_accuracy = -1 if 'test' in test_path: suffix = '.best.test' else: suffix = '.best' task2_joint_structured_inflection.write_results_file( hyper_params, micro_average_accuracy, train_path, test_path, results_file_path + suffix, sigmorphon_root_dir, final_results, is_nbest)
def main(train_path, test_path, results_file_path, sigmorphon_root_dir, input_dim, hidden_dim, epochs, layers, optimization, feat_input_dim, nbest): hyper_params = {'INPUT_DIM': input_dim, 'HIDDEN_DIM': hidden_dim, 'EPOCHS': epochs, 'LAYERS': layers, 'MAX_PREDICTION_LEN': MAX_PREDICTION_LEN, 'OPTIMIZATION': optimization, 'NBEST': nbest} print 'train path = ' + str(train_path) print 'test path =' + str(test_path) for param in hyper_params: print param + '=' + str(hyper_params[param]) # load data (train_target_words, train_source_words, train_target_feat_dicts, train_source_feat_dicts) = prepare_sigmorphon_data.load_data( train_path, 2) (test_target_words, test_source_words, test_target_feat_dicts, test_source_feat_dicts) = prepare_sigmorphon_data.load_data( test_path, 2) alphabet, feature_types = prepare_sigmorphon_data.get_alphabet(train_target_words, train_source_words, train_target_feat_dicts, train_source_feat_dicts) # used for character dropout alphabet.append(NULL) alphabet.append(UNK) # used during decoding alphabet.append(EPSILON) alphabet.append(BEGIN_WORD) alphabet.append(END_WORD) feature_alphabet = common.get_feature_alphabet(train_source_feat_dicts + train_target_feat_dicts) feature_alphabet.append(UNK_FEAT) # add indices to alphabet - used to indicate when copying from lemma to word for marker in [str(i) for i in xrange(MAX_PREDICTION_LEN)]: alphabet.append(marker) # feat 2 int feat_index = dict(zip(feature_alphabet, range(0, len(feature_alphabet)))) # char 2 int alphabet_index = dict(zip(alphabet, range(0, len(alphabet)))) inverse_alphabet_index = {index: char for char, index in alphabet_index.items()} # cluster the data by POS type (features) # TODO: do we need to cluster on both source and target feats? # probably enough to cluster on source here becasue pos will be same # (no derivational morphology in this task) train_cluster_to_data_indices = common.cluster_data_by_pos(train_source_feat_dicts) test_cluster_to_data_indices = common.cluster_data_by_pos(test_source_feat_dicts) # cluster the data by inflection type (features) # train_cluster_to_data_indices = common.cluster_data_by_morph_type(train_feat_dicts, feature_types) # test_cluster_to_data_indices = common.cluster_data_by_morph_type(test_feat_dicts, feature_types) accuracies = [] final_results = {} # factored model: new model per inflection type for cluster_index, cluster_type in enumerate(train_cluster_to_data_indices): # get the inflection-specific data train_cluster_target_words = [train_target_words[i] for i in train_cluster_to_data_indices[cluster_type]] if len(train_cluster_target_words) < 1: print 'only ' + str(len(train_cluster_target_words)) + ' samples for this inflection type. skipping' continue else: print 'now evaluating model for cluster ' + str(cluster_index + 1) + '/' + \ str(len(train_cluster_to_data_indices)) + ': ' + cluster_type + ' with ' + \ str(len(train_cluster_target_words)) + ' examples' # test best model test_cluster_source_words = [test_source_words[i] for i in test_cluster_to_data_indices[cluster_type]] test_cluster_target_words = [test_target_words[i] for i in test_cluster_to_data_indices[cluster_type]] test_cluster_source_feat_dicts = [test_source_feat_dicts[i] for i in test_cluster_to_data_indices[cluster_type]] test_cluster_target_feat_dicts = [test_target_feat_dicts[i] for i in test_cluster_to_data_indices[cluster_type]] # load best model best_model, params = load_best_model(str(cluster_index), alphabet, results_file_path, input_dim, hidden_dim, layers, feature_alphabet, feat_input_dim, feature_types) lang = train_path.split('/')[-1].replace('-task{0}-train'.format('1'), '') # handle greedy prediction if nbest == 1: is_nbest = False predicted_templates = task2_ms2s.predict_templates( best_model, params, alphabet_index, inverse_alphabet_index, test_cluster_source_words, test_cluster_source_feat_dicts, test_cluster_target_feat_dicts, feat_index, feature_types) accuracy = task2_ms2s.evaluate_model(predicted_templates, test_cluster_source_words, test_cluster_source_feat_dicts, test_cluster_target_words, test_cluster_target_feat_dicts, feature_types, print_results=False) accuracies.append(accuracy) print '{0} {1} accuracy: {2}'.format(lang, cluster_type, accuracy[1]) # get predicted_templates in the same order they appeared in the original file # iterate through them and foreach concat morph, lemma, features in order to print later in the task format for i in test_cluster_to_data_indices[cluster_type]: joint_index = test_source_words[i] + ':' + common.get_morph_string(test_source_feat_dicts[i], feature_types) \ + ':' + common.get_morph_string(test_target_feat_dicts[i], feature_types) inflection = task2_ms2s.instantiate_template( predicted_templates[joint_index], test_source_words[i]) final_results[i] = ( test_source_words[i], test_source_feat_dicts[i], inflection, test_target_feat_dicts[i]) micro_average_accuracy = accuracy[1] # handle n-best prediction else: is_nbest = True predicted_nbset_templates = task2_ms2s.predict_nbest_templates( best_model, params, alphabet_index, inverse_alphabet_index, test_cluster_source_words, test_cluster_source_feat_dicts, test_cluster_target_feat_dicts, feat_index, feature_types, nbest, test_cluster_target_words) # get predicted_templates in the same order they appeared in the original file # iterate through them and foreach concat morph, lemma, features in order to print later in the task format for i in test_cluster_to_data_indices[cluster_type]: joint_index = test_source_words[i] + ':' + common.get_morph_string(test_source_feat_dicts[i], feature_types) \ + ':' + common.get_morph_string(test_target_feat_dicts[i], feature_types) nbest_inflections = [] templates = [t for (t, p) in predicted_nbset_templates[joint_index]] for template in templates: nbest_inflections.append( task2_ms2s.instantiate_template( template, test_source_words[i])) final_results[i] = ( test_source_words[i], test_source_feat_dicts[i], nbest_inflections, test_target_feat_dicts[i]) micro_average_accuracy = -1 if 'test' in test_path: suffix = '.best.test' else: suffix = '.best' task2_joint_structured_inflection.write_results_file(hyper_params, micro_average_accuracy, train_path, test_path, results_file_path + suffix, sigmorphon_root_dir, final_results, is_nbest)