def main(): """Create a trained feature set. 1. Extract features from a cropped image set 2. Evaluate features on labelled training data 3. Remove features with high false positive rate 4. Evaluate features again on same data """ trainer = Trainer() detector = Detector(750) trainer.set_detector(detector) # feat = load_features('data/gazebo1_932.dat') # trainer.set_features(feat) trainer.load_data('train/gazebo/pos_info_PreyEmptyWorld.dat') trainer.load_data('train/gazebo/pos_info_PurplePrey1.dat') trainer.load_data('train/gazebo/pos_info_PurplePrey2.dat') trainer.load_data('train/gazebo/sim2.dat') # trainer.load_data('train/quadbox/white_courtyard1.dat') # trainer.load_data('train/quadbox/white_courtyard2.dat') # trainer.load_data('train/quadbox/black_drone_court.dat') # trainer.load_data('train/quadbox/pos_info.txt') # trainer.load_data('train/idea2/pos_info_red-inclass.dat') # trainer.load_data('train/idea2/red2m.dat') # trainer.load_data('train/idea2/courtyard323.dat') # trainer.load_data('train/idea2/orange10am.dat') # trainer.load_data('train/idea2/orange7_30am.dat') # trainer.load_data('train/pos_info_Courtyard_multi.dat') # trainer.load_data('train/idea2/orange9am.dat') # trainer.subsample_data(2) trainer.train_and_test(.8) trainer.evaluate(1) trainer.feature_selection() # trainer.evaluate(subsample=0.4) # trainer.feature_selection() # trainer.evaluate(subsample=0.7) trainer.save_features('sim2') return trainer.train_and_test(show=False) trainer.feature_selection() trainer.evaluate(show=False) trainer.save_features('ir_2') return
def run_evaluation(datasets, model, verbose=True): results = [] if verbose: print("Training Set Results") train_acc, train_loss = Trainer.evaluate( model, datasets.get_loader(DatasetType.Train), verbose=verbose, ) results.append(train_acc) results.append(train_loss) time.sleep(0.1) if verbose: print("") print("Validation Set Results") val_acc, val_loss = Trainer.evaluate( model, datasets.get_loader(DatasetType.Validation), verbose=verbose, ) results.append(val_acc) results.append(val_loss) time.sleep(0.1) if verbose: print("") print("Test Set Results") test_acc, test_loss = Trainer.evaluate( model, datasets.get_loader(DatasetType.Test), verbose=verbose, ) results.append(test_acc) results.append(test_loss) time.sleep(0.1) if verbose: print("") print("Output for results.md") print((" {:.3f} |" * len(results)).format(*results)) return results
model.config.length_token) dl = DataLoader(args, tokenizer) tr_dataset, val_dataset, test_dataset = dl() trainer = Trainer(model, tokenizer, args) if args.load_training_state: trainer.load_training_state_dict(args.base_dir) trainer.fit(tr_dataset, val_dataset) if args.save_pretrained_path: trainer.model.save_pretrained( os.path.join(args.base_dir, args.save_pretrained_path)) tst_loss = trainer.evaluate(test_dataset) wandb.log({"tst_loss": tst_loss}) for mode, dataset in zip(["tr", "val", "tst"], [tr_dataset, val_dataset, test_dataset]): out = fetch_translations_and_bleu(model, dataset, tokenizer, args.iterations, args.B, args.bleu_num_samples) data = list(zip(out["src"], out["tgt"], out["pred"])) wandb.log({ mode + '_bleu': out["bleu"], mode + '_predictions': wandb.Table(data=data, columns=['src', 'tgt', 'tgt_pred']) })