def train_dataset(dataset, config='lstm'): try: config = configurations[config](dataset) trainer = Trainer(dataset, config=config, _type=dataset.trainer_type) trainer.train(dataset.train_data, dataset.dev_data, n_iters=8, save_on_metric=dataset.save_on_metric) evaluator = Evaluator(dataset, trainer.model.dirname, _type=dataset.trainer_type) _ = evaluator.evaluate(dataset.test_data, save_results=True) return trainer, evaluator except: return
def train_dataset(dataset, config='lstm'): config = configurations[config](dataset) trainer = Trainer(dataset, config=config, _type=dataset.trainer_type) if hasattr(dataset, 'n_iter'): n_iters = dataset.n_iter else: n_iters = 8 trainer.train(dataset.train_data, dataset.dev_data, n_iters=n_iters, save_on_metric=dataset.save_on_metric) evaluator = Evaluator(dataset, trainer.model.dirname, _type=dataset.trainer_type) _ = evaluator.evaluate(dataset.test_data, save_results=True) return trainer, evaluator
def train_dataset_and_get_lime_explanations(dataset, encoders, num_iters=15): for e in encoders: config = configurations[e](dataset) trainer = Trainer(dataset, config=config, _type=dataset.trainer_type) trainer.train(dataset.train_data, dataset.dev_data, n_iters=num_iters, save_on_metric=dataset.save_on_metric) evaluator = Evaluator(dataset, trainer.model.dirname, _type=dataset.trainer_type) predictions, attentions = evaluator.evaluate(dataset.test_data, save_results=True) lime_explanations = trainer.model.get_lime_explanations( dataset.test_data.X) return predictions, attentions, lime_explanations
def train_dataset_and_get_gradient(dataset, encoders, num_iters=15): for e in encoders: config = configurations[e](dataset) trainer = Trainer(dataset, config=config, _type=dataset.trainer_type) trainer.train(dataset.train_data, dataset.dev_data, n_iters=num_iters, save_on_metric=dataset.save_on_metric) evaluator = Evaluator(dataset, trainer.model.dirname, _type=dataset.trainer_type) predictions, attentions = evaluator.evaluate(dataset.test_data, save_results=True) grads = evaluator.gradient_experiment_get_grads(dataset.test_data) from Trainers.PlottingBC import process_grads process_grads(grads) return predictions, attentions, grads
def train_dataset(dataset, config="lstm"): print("STARTING TRAINING") config = configurations[config](dataset) trainer = Trainer(dataset, config=config, _type=dataset.trainer_type) if hasattr(dataset, "n_iter"): n_iters = dataset.n_iter else: n_iters = 8 trainer.train( dataset.train_data, dataset.dev_data, n_iters=n_iters, save_on_metric=dataset.save_on_metric, ) evaluator = Evaluator(dataset, trainer.model.dirname, _type=dataset.trainer_type) _ = evaluator.evaluate(dataset.test_data, save_results=True) return trainer, evaluator
def train_dataset_and_get_atn_map(dataset, encoders, num_iters=15): for e in encoders: config = configurations[e](dataset) trainer = Trainer(dataset, config=config, _type=dataset.trainer_type) trainer.train(dataset.train_data, dataset.dev_data, n_iters=num_iters, save_on_metric=dataset.save_on_metric) train_losses = trainer.model.train_losses evaluator = Evaluator(dataset, trainer.model.dirname, _type=dataset.trainer_type) predictions, attentions = evaluator.evaluate(dataset.test_data, save_results=True) # evaluator = Evaluator(dataset, trainer.model.last_epch_dirname, # _type=dataset.trainer_type) predictions_lst_epch, attentions_lst_epch = evaluator.evaluate( dataset.test_data) return predictions, attentions, predictions_lst_epch, attentions_lst_epch, train_losses
def train_dataset_and_temp_scale(dataset, encoders): for e in encoders: config = configurations[e](dataset) trainer = Trainer(dataset, config=config, _type=dataset.trainer_type) trainer.train(dataset.train_data, dataset.dev_data, n_iters=8, save_on_metric=dataset.save_on_metric) evaluator = Evaluator(dataset, trainer.model.dirname, _type=dataset.trainer_type) print("Temperature-scaling..") orig_model = evaluator.model dev_x_tensor = BatchHolder(dataset.dev_data.X).seq dev_x_tensor_lengths = BatchHolder(dataset.dev_data.X).lengths dev_x_tensor_masks = BatchHolder(dataset.dev_data.X).masks valid_dataset = TensorDataset( dev_x_tensor, dev_x_tensor_lengths, dev_x_tensor_masks, torch.from_numpy(np.array(dataset.dev_data.y))) valid_loader = DataLoader(valid_dataset, batch_size=1) scaled_model = ModelWithTemperature(orig_model) scaled_model.set_temperature(valid_loader)
def train_dataset(dataset, args, config='lstm') : config = generate_config(dataset, args, config) trainer = Trainer(dataset, args, config=config) #go ahead and save model dirname = trainer.model.save_values(save_model=False) print("DIRECTORY:", dirname) if args.adversarial : trainer.train_adversarial(dataset.train_data, dataset.test_data, args) else : trainer.train_standard(dataset.train_data, dataset.test_data, args, save_on_metric=dataset.save_on_metric) print('####################################') print("TEST RESULTS FROM BEST MODEL") evaluator = Evaluator(dataset, trainer.model.dirname, args) _ = evaluator.evaluate(dataset.test_data, save_results=True) return trainer, evaluator