def load_model(): config = util.initialize_from_env() log_dir = config['log_dir'] model = cm.CorefModel(config) with tf.Session() as session: vars_to_restore = [v for v in tf.global_variables() if 'pg_reward' not in v.name] saver = tf.train.Saver(vars_to_restore) saver.restore(session, os.path.join(log_dir, "model.max.ckpt")) all_vars = tf.trainable_variables() values = session.run(all_vars) return values
def __init__(self): print("here we go") # from util.initialize_from_env() gpus = [] os.environ["CUDA_VISIBLE_DEVICES"] = ",".join(str(g) for g in gpus) name = "final" print("Running experiment: {}".format(name)) config = pyhocon.ConfigFactory.parse_file("experiments.conf")[name] config["log_dir"] = util.mkdirs(os.path.join(config["log_root"], name)) print(pyhocon.HOCONConverter.convert(config, "hocon")) self.model = cm.CorefModel(config) self.session = tf.Session() self.model.restore(self.session)
def _load_model(self,experiment): util.set_gpus() print "Running experiment: {}.".format(experiment) config = util.get_config("experiments.conf")[experiment] config["log_dir"] = util.mkdirs(os.path.join(config["log_root"], experiment)) util.print_config(config) model = cm.CorefModel(config) saver = tf.train.Saver() log_dir = config["log_dir"] with tf.Session() as session: checkpoint_path = os.path.join(log_dir, "model.max.ckpt") saver.restore(session, checkpoint_path) self.model=model self.session=session
def run_1model(eval_path): outfpath = sys.argv[3] if len(sys.argv) == 4 else None sys.argv = sys.argv[:2] args = util.get_args() config = util.initialize_from_env(args.experiment, args.logdir) config['eval_path'] = eval_path model = cm.CorefModel(config, eval_mode=True) with tf.Session() as session: model.restore(session, args.latest_checkpoint) model.evaluate(session, official_stdout=True, pprint=False, test=True, outfpath=outfpath)
def run(config, input_filename, output_filename, cluster_key): model = cm.CorefModel(config) with tf.Session() as session: model.restore(session) with open(output_filename, "w") as output_file: with open(input_filename) as input_file: for example_num, line in enumerate(input_file.readlines()): example = json.loads(line) tensorized_example = model.tensorize_example( example, is_training=False) if tensorized_example is None: example[cluster_key] = [] else: feed_dict = { i: t for i, t in zip(model.input_tensors, tensorized_example) } _, _, _, top_span_starts, top_span_ends, top_antecedents, top_antecedent_scores = session.run( model.predictions, feed_dict=feed_dict) predicted_antecedents = model.get_predicted_antecedents( top_antecedents, top_antecedent_scores) example[cluster_key], _ = model.get_predicted_clusters( top_span_starts, top_span_ends, predicted_antecedents) if cluster_key == "predicted_clusters" and "clusters" in example: del example["clusters"] output_file.write(json.dumps(example)) output_file.write("\n") if example_num % 100 == 0: print("Decoded {} examples.".format(example_num + 1)) print(f"Predicted {example_num+1} examples.")
def __init__(self, experiment: str = 'final', path_context_emb: str = None, path_head_emb: str = None, dir_elmo: str = None, dir_log_root: str = None, path_char_vocab: str = None): ''' :param experiment: 'final' or 'test' :param path_context_emb: absolute path of context embedding :param path_head_emb: absolute path of head embedding :param dir_elmo: absolute path of elmo directory :param dir_log_root: absolute path of log root directory :param path_char_vocab: absolute path of char_vocab file ''' super(E2ECoref, self).__init__() self.config = util.initialize_experiment(experiment, path_context_emb, path_head_emb, dir_elmo, dir_log_root, path_char_vocab) self.model = coref_model.CorefModel(self.config) self.session = tf.Session() # Currently no closing operation self.model.restore(self.session)
import util from tensorflow.python import debug as tf_debug from sklearn import metrics from tensorboard import summary as summary_lib from datetime import datetime if __name__ == "__main__": config = util.initialize_from_env() report_frequency = config["report_frequency"] eval_frequency = config["eval_frequency"] # cluster_config = config["cluster"] # util.set_gpus(*cluster_config["gpus"]) model = cm.CorefModel(config) saver = tf.train.Saver() TIMESTAMP = "{0:%Y-%m-%dT%H-%M-%S/}".format(datetime.now()) log_dir = util.mkdirs(os.path.join(config["log_dir"], "Parallel_Postulate")) writer = tf.summary.FileWriter(log_dir, flush_secs=20) max_f1 = 0 max_presission = 0 max_recall = 0 max_accuracy = 0 coord = tf.train.Coordinator() with tf.Session() as session: # session = tf_debug.LocalCLIDebugWrapperSession(session)
#!/usr/bin/env python from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf import util import coref_model as cm if __name__ == "__main__": args = util.get_args() config = util.initialize_from_env(args.experiment, args.logdir) config["eval_path"] = "test.english.jsonlines" config["conll_eval_path"] = "test.english.v4_gold_conll" config["context_embeddings"]["path"] = "glove.840B.300d.txt" model = cm.CorefModel(config, eval_mode=True) with tf.Session() as session: model.restore(session, args.latest_checkpoint) model.evaluate(session, official_stdout=True, pprint=False, test=True)
import conll import metrics if __name__ == "__main__": if "GPU" in os.environ: util.set_gpus(int(os.environ["GPU"])) else: util.set_gpus() names = sys.argv[1:] print "Ensembling models from {}.".format(names) configs = util.get_config("experiments.conf") main_config = configs[names[0]] model = cm.CorefModel(main_config) model.load_eval_data() saver = tf.train.Saver() with tf.Session() as session: all_mention_scores = collections.defaultdict(list) for name in names: config = configs[name] log_dir = os.path.join(config["log_root"], name) checkpoint_path = os.path.join(log_dir, "model.max.ckpt") print "Computing mention scores for {}".format(checkpoint_path) saver.restore(session, checkpoint_path) for example_num, (tensorized_example,