def generate_graphs_on_latest_model(dataset, config): config = configurations_qa[config](dataset) latest_model = get_latest_model(os.path.join(config["training"]["basepath"], config["training"]["exp_dirname"])) if latest_model is not None: evaluator = Evaluator(dataset, latest_model) _ = evaluator.evaluate(dataset.test_data, save_results=True, is_embds=False) print('outside eval') generate_graphs(dataset, config["training"]["exp_dirname"], evaluator.model, test_data=dataset.test_data)
def generate_graphs_on_latest_model(dataset, config): try: config = configurations_qa[config](dataset) latest_model = get_latest_model( os.path.join(config['training']['basepath'], config['training']['exp_dirname'])) if latest_model is not None: evaluator = Evaluator(dataset, latest_model) _ = evaluator.evaluate(dataset.test_data, save_results=True) generate_graphs(dataset, config['training']['exp_dirname'], evaluator.model, test_data=dataset.test_data) except: return
def generate_graphs_on_latest_model(dataset, config): print("GENERATING GRAPHS FOR EXPERIMENT ON LATEST MODEL") config = configurations_qa[config](dataset) latest_model = get_latest_model( os.path.join(config["training"]["basepath"], config["training"]["exp_dirname"])) if latest_model is not None: evaluator = Evaluator(dataset, latest_model) _ = evaluator.evaluate(dataset.test_data, save_results=True, is_embds=False) print("outside eval") generate_graphs( dataset, config["training"]["exp_dirname"], evaluator.model, test_data=dataset.test_data, )