def test_model_builder_with_class_weights_sgd(self): # get ambiguity map ambig_fname = config["test_data_dir"] +"/de-en_ambig.tab" ambig_map = AmbiguityMap(ambig_fname) # get samples samp_hdf_fname = config["test_data_dir"] + "/de-en_samples.hdf5_" samp_hdfile = h5py.File(samp_hdf_fname, "r") # create data generator data_gen = DataSetGenerator(ambig_map, samp_hdfile) # get lemma counts # TODO: reading all counts is slow - file with subset of counts needed counts_fname = config["count"]["lemma"]["en"]["pkl_fname"] from sklearn.linear_model import SGDClassifier # build models_hdf_fname = tempfile.NamedTemporaryFile().name builder = ModelBuilder( data_gen, models_hdf_fname = models_hdf_fname, classifier = SGDClassifier(), counts_fname=counts_fname) builder.run()
def nb_build_model(ns): """ build NB models with class weighting """ ns.models_fname = ns.fname_prefix + "_models.hdf5" counts_fname = config["count"]["lemma"][ns.target_lang]["pkl_fname"] model_builder = ModelBuilder(ns.get_data_generator(ns), ns.models_fname, ns.classifier, counts_fname=counts_fname) model_builder.run() # clean up params delattr(ns, "ambig_map")
def test_model_builder(self): # get ambiguity map ambig_fname = config["test_data_dir"] +"/de-en_ambig.tab" ambig_map = AmbiguityMap(ambig_fname) # get samples samp_hdf_fname = config["test_data_dir"] + "/de-en_samples.hdf5_" samp_hdfile = h5py.File(samp_hdf_fname, "r") # create data generator data_gen = DataSetGenerator(ambig_map, samp_hdfile) # build models_hdf_fname = tempfile.NamedTemporaryFile().name builder = ModelBuilder( data_gen, models_hdf_fname = models_hdf_fname, classifier = MultinomialNB() ) builder.run()
def make_classifiers(): for lang_pair in "de-en",: ambig_fname = "{}/{}_ambig.tab".format( config["test_data_dir"], lang_pair) ambig_map = AmbiguityMap(ambig_fname) samp_fname = "{}/{}_samples.hdf5_".format( config["test_data_dir"], lang_pair) samp_hdfile = h5py.File(samp_fname, "r") data_gen = DataSetGenerator(ambig_map, samp_hdfile) models_fname = "{}/{}_models.hdf5_".format( config["test_data_dir"], lang_pair) builder = ModelBuilder( data_generator = data_gen, models_hdf_fname = models_fname, classifier = MultinomialNB() ) builder.run()
def nb_exp(data_sets=config["eval"]["data_sets"], lang_pairs=(), text=False, draw=False, diff=False, trash_models=False): n_components = 10 descriptor = [ ("data", "S16"), ("lang", "S8"), ("nist", "f"), ("blue", "f"), ("name", "S256") ] results = np.zeros(9999, dtype=descriptor) exp_count = 0 script_fname = os.path.splitext(os.path.basename(__file__))[0] results_fname = "_" + script_fname + "_results.txt" results_outf = open(results_fname, "w") for data in data_sets: for lang in lang_pairs or config["eval"][data].keys(): ambig_fname = config["sample"][lang]["ambig_fname"] try: samples_fname = config["sample"][lang]["samples_filt_fname"] except KeyError: samples_fname = config["sample"][lang]["samples_fname"] log.warn("backing off to unfiltered samples from " + samples_fname) graphs_fname = config["eval"][data][lang]["graphs_fname"] name = "{}_{}_{}".format( script_fname, data, lang) exp_dir = "_" + name if not os.path.exists(exp_dir): os.makedirs(exp_dir) models_fname = exp_dir + "/" + name + ".hdf5" classifier = Pipeline( [("MCF", MinCountFilter(5)), ("MFF", MaxFreqFilter(0.05)), ("CHI2", SelectFpr(chi2, alpha=0.001 )), ("NMF", NMF(n_components=n_components)), ("MNB", MultinomialNB()), ]) # get ambiguity map ambig_map = AmbiguityMap(ambig_fname, graphs_fname=graphs_fname) #ambig_map = AmbiguityMap(ambig_fname, subset={"klar/adj"}) # train classifier model_builder = ModelBuilder( ambig_map, samples_fname, models_fname, classifier) #,with_vocab_mask=True) model_builder.run() # apply classifier model = TranslationClassifier(models_fname) score_attr="nb_score" source_lang = lang.split("-")[0] scorer = ClassifierScore(model, score_attr=score_attr, filter=filter_functions(source_lang), vectorizer="mft") graph_list = cPickle.load(open(graphs_fname)) scorer(graph_list) best_scorer = BestScorer(["nb_score", "freq_score"]) best_scorer(graph_list) scored_graphs_fname = exp_dir + "/" + name + "_graphs.pkl" log.info("saving scored graphs to " + scored_graphs_fname) cPickle.dump(graph_list, open(scored_graphs_fname, "w")) #graph_list = cPickle.load(open(scored_graphs_fname)) nist_score, bleu_score = postprocess( name, data, lang, graph_list, best_score_attr="best_score", base_score_attrs=["nb_score","freq_score"], out_dir=exp_dir, base_fname=name, text=text, draw=draw, diff=diff ) results[exp_count] = (data, lang, nist_score, bleu_score, name) results_fname = exp_dir + "/" + name + ".npy" log.info("saving result to " + results_fname) np.save(results_fname, results[exp_count]) exp_count += 1 if trash_models: log.info("Trashing models file " + models_fname) os.remove(models_fname) # add to table of results per data set & language pair sub_results = results[(results["lang"] == lang) & (results["data"] == data)] sub_results = np.sort(sub_results, axis=0, order=("lang", "blue"))[::-1] text_table(sub_results, results_outf) results_outf.write("\n\n") results_outf.close() results = results[:exp_count] results_fname = "_" + script_fname + "_results.npy" log.info("saving pickled results to " + results_fname) np.save(results_fname, results) text_table(results) return results
def centroid_exp(data_sets=config["eval"]["data_sets"], lang_pairs=(), text=False, draw=False, diff=False, trash_models=False, dump_centroids=False): descriptor = [ ("data", "S16"), ("lang", "S8"), ("min_count", "f"), ("max_freq", "f"), ("nist", "f"), ("blue", "f"), ("name", "S256") ] results = np.zeros(9999, dtype=descriptor) exp_count = 0 script_fname = os.path.splitext(os.path.basename(__file__))[0] results_fname = "_" + script_fname + "_results.txt" results_outf = open(results_fname, "w") for data in data_sets: for lang in lang_pairs or config["eval"][data].keys(): ambig_fname = config["sample"][lang]["ambig_fname"] try: samples_fname = config["sample"][lang]["samples_filt_fname"] except KeyError: samples_fname = config["sample"][lang]["samples_fname"] log.warn("backing off to unfiltered samples from " + samples_fname) graphs_fname = config["eval"][data][lang]["graphs_fname"] #for min_count in (1, 5, 10, 25, 50, 100, 250, 1000, 2500, 5000): # for max_freq in (0.0001, 0.001, 0.005, 0.01, 0.05, 0.10, 0.25, 0.5, 1.0): for min_count in (5,): for max_freq in (0.01,): name = "{}_{}_{}_min_count={:d}_max_freq={:f}".format( script_fname, data, lang, min_count, max_freq) exp_dir = "_" + name if not os.path.exists(exp_dir): os.makedirs(exp_dir) models_fname = exp_dir + "/" + name + ".hdf5" classifier = Pipeline( [("MCF", MinCountFilter(min_count)), ("MFF", MaxFreqFilter(max_freq)), ("CHI2", SelectFpr()), #("TFIDF", TfidfTransformer()), ("CNC", CosNearestCentroid()) #("NC", NearestCentroidProb()) ]) # train classifier model_builder = ModelBuilder( ambig_fname, samples_fname, models_fname, classifier, graphs_fname, with_vocab_mask=True) model_builder.run() # print the centroids to a file, only the 50 best features if dump_centroids: print_fname = exp_dir + "/" + name + "_centroids.txt" print_centroids(models_fname, n=50, outf=print_fname) # apply classifier model = TranslationClassifier(models_fname) score_attr="centroid_score" source_lang = lang.split("-")[0] scorer = ClassifierScore(model, score_attr=score_attr, filter=filter_functions(source_lang)) graph_list = cPickle.load(open(graphs_fname)) scorer(graph_list) best_scorer = BestScorer(["centroid_score", "freq_score"]) best_scorer(graph_list) scored_graphs_fname = exp_dir + "/" + name + "_graphs.pkl" log.info("saving scored graphs to " + scored_graphs_fname) cPickle.dump(graph_list, open(scored_graphs_fname, "w")) #graph_list = cPickle.load(open(scored_graphs_fname)) nist_score, bleu_score = postprocess( name, data, lang, graph_list, best_score_attr="best_score", base_score_attrs=["centroid_score","freq_score"], out_dir=exp_dir, base_fname=name, text=text, draw=draw, diff=diff ) results[exp_count] = (data, lang, min_count, max_freq, nist_score, bleu_score, name) results_fname = exp_dir + "/" + name + ".npy" log.info("saving result to " + results_fname) np.save(results_fname, results[exp_count]) exp_count += 1 if trash_models: log.info("Trashing models file " + models_fname) os.remove(models_fname) sub_results = results[(results["lang"] == lang) & (results["data"] == data)] sub_results = np.sort(sub_results, axis=0, order=("lang", "blue"))[::-1] text_table(sub_results, results_outf) results_outf.write("\n\n") results_outf.close() results = results[:exp_count] results_fname = "_" + script_fname + "_results.npy" log.info("saving pickled results to " + results_fname) np.save(results_fname, results) text_table(results) return results