예제 #1
0
def get_model(model_name, num_classes):
    if model_name == "create_new_model":
        return create_new_model(num_classes)
    elif model_name == "AlexNet":
        return AlexNet(num_classes)
    elif model_name == "LeNet5":
        return LeNet5(num_classes)
    elif model_name == "VGG16":
        return VGG16(num_classes)
    elif model_name == "ResNet50":
        return ResNet50(num_classes)
    elif model_name == "InceptionV3":
        return InceptionV3(num_classes)
    elif model_name == "DeepFace":
        return DeepFace(num_classes)
예제 #2
0
	def __init__(self, config):
		super(PairFeatureTrainer, self).__init__(config)
		
		self.config = config
		self.model 	= create_new_model(self.config)
		self.trainCanopies, self.testCanopies, self.devCanopies = load_canopy_data_splits(self.config)
		self.logger = create_logger(config=config, logFile=config.logFile, currLogger=None)
		
		self.optimizer = None
		self.resetOptimizer()

		if self.config.useGPU:
			self.logger.info("Shifting model to cuda because GPUs are available!")
			self.model = self.model.cuda()
		
		self.logger.info("Successfully initialized model trainer...")
		self.logger.info(str(self))
		self.config.save_config(self.config.resultDir)
		
		self.evalFunc = eval_model_pair_feat
예제 #3
0
def main():
    model = models.create_new_model(None, None)
    num = 0
    simulation_test(None, model, num)
예제 #4
0
def main():
    #-------------------------------------------------------------------------------------------------------------------
    # Parse arguments.
    parser = argparse.ArgumentParser(description="Set purpose of run.")
    parser.add_argument("--dataset",  default=False, action="store_true", help="Create new datasets from raw data.")
    parser.add_argument("--train",    default=False, action="store_true", help="Train ML model.")
    parser.add_argument("--test",     default=False, action="store_true", help="Test pre-trained ML model.")
    parser.add_argument("--use",      default=False, action="store_true", help="Use pre-trained ML model on new data.")
    parser.add_argument("--grs",      default=False, action="store_true", help="Perform parameter grid search.")
    args = parser.parse_args()

    print("\nEXECUTION INITIATED\n")

    if args.grs:
        print("-----------------------------------------------")
        print("-----------------------------------------------")
        print("Initiating parameter grid search.\n")
        for model_num in range(len(config.model_keys)):
            cfg = config.Config(
                group_name = config.group_name,
                run_name   = config.run_names[model_num][0],
                system     = config.systems[0],
                data_tag   = config.data_tags[0],
                model_key  = config.model_keys[model_num],
                do_train   = False,
                do_test    = False
            )
            print("- - - - - - - - - - - - - - - - - - - - - - - -")
            print("- - - - - - - - - - - - - - - - - - - - - - - -")
            print("Finding optimal parameters for model " + cfg.model_name)
            parameter_grid_search.grid_search(cfg)
            print("")
        print("Initiating parameter grid search.\n\nEXECUTION COMPLETED")
        return

    group_name = config.group_name
    for model_num in range(len(config.model_keys)):
        for sys_num in range(len(config.systems)):
            print("\n********************************************************")
            print("Model  number:", model_num)
            print("System number:", sys_num)
            print("********************************************************\n")

            # -------------------------------------------------------------------------------------------------------------------
            # Configuration setup.
            cfg = config.Config(
                group_name = group_name,
                run_name   = config.run_names[model_num][sys_num],
                system     = config.systems[sys_num],
                data_tag   = config.data_tags[sys_num],
                model_key  = config.model_keys[model_num],
                do_train   = args.train,
                do_test    = args.test
            )

            #-------------------------------------------------------------------------------------------------------------------
            # Ensure directories exist.
            #os.makedirs(config.datasets_dir, exist_ok=True)
            #os.makedirs(config.raw_data_dir, exist_ok=True)
            #os.makedirs(config.results_dir,  exist_ok=True)
            #os.makedirs(cfg.group_dir,    exist_ok=True)
            os.makedirs(cfg.run_dir,      exist_ok=False)
            #os.makedirs(config.tb_dir,       exist_ok=True)
            #if config.is_train:
            #    os.makedirs(config.tb_run_dir,   exist_ok=False)
            #os.makedirs(config.cp_load_dir,  exist_ok=True)
            #os.makedirs(config.cp_save_dir,  exist_ok=True)
            #os.makedirs(config.eval_im_dir,  exist_ok=True)
            #os.makedirs(config.metrics_dir,  exist_ok=True)

            #-------------------------------------------------------------------------------------------------------------------
            # Save configurations.
            config.save_config(cfg)

            #-------------------------------------------------------------------------------------------------------------------
            # Create datasets.
            if model_num == 0:
                if args.dataset:
                    print("----------------------------")
                    print("Initiating dataset creation.\n")
                    print("Data tag:", cfg.data_tag)
                    datasets.main(cfg)
                    print("\nCompleted dataset creation.")
                    print("----------------------------\n")

            #-------------------------------------------------------------------------------------------------------------------
            # Define network model(s).

            ensemble = []
            print("----------------------------")
            print("Initiating model definition.")
            for i in range(cfg.ensemble_size):
                model = models.create_new_model(cfg, cfg.model_specific_params)
                ensemble.append(model)
                if i == 0 and sys_num == 0:
                    print("\n" + cfg.model_name + "\n")
                    if cfg.model_name[:8] == 'Ensemble':
                        print("Ensemble model containing " + str(len(model.nets)) + " networks as shown below.")
                        print(model.nets[0].net)
                    elif cfg.model_name[:5] == 'Local':
                        print("Ensemble model containing " + str(len(model.net.nets)) + " networks as shown below.")
                        print(model.net.nets[0])
                    else:
                        print(model.net)
            print("\nCompleted model definition.")
            print("----------------------------\n")


            # -------------------------------------------------------------------------------------------------------------------
            # Train model(s).

            if args.train:
                print("----------------------------")
                print("Initiating training")
                dataset_train, dataset_val, _ = datasets.load_datasets(cfg, True, True, False)

                dataloader_train = torch.utils.data.DataLoader(
                    dataset=dataset_train,
                    batch_size=cfg.batch_size_train,
                    shuffle=True,
                    num_workers=0,
                    pin_memory=True
                )
                dataloader_val = torch.utils.data.DataLoader(
                    dataset=dataset_val,
                    batch_size=cfg.batch_size_val,
                    shuffle=True,
                    num_workers=0,
                    pin_memory=True
                )
                for i, model in enumerate(ensemble):
                    print("\nTraining instance " + str(i))
                    _ = train.train(cfg, model, i, dataloader_train, dataloader_val)
                print("\nCompleted training.")
                print("----------------------------\n")

            #-------------------------------------------------------------------------------------------------------------------
            # Test model(s).

            if args.test:
                print("----------------------------")
                print("Initiating testing.")
                error_dicts = []
                plot_data_dicts = []
                for i, model in enumerate(ensemble):
                    print("\nTesting instance " + str(i))
                    if cfg.do_simulation_test:
                        error_dict, plot_data_dict = test.simulation_test(cfg, model, i)
                    else:
                        error_dict, plot_data_dict = test.single_step_test(cfg, model, i)
                    error_dicts.append(error_dict)
                    plot_data_dicts.append(plot_data_dict)
                print("")
                error_stats_dict, plot_stats_dict = test.save_test_data(cfg, error_dicts, plot_data_dicts)
                test.visualize_test_data(cfg, error_stats_dict, plot_stats_dict)
                print("\nCompleted testing.")
                print("----------------------------\n")

            # ------------------------------------------------------------------------------------------------------------------
            # Use pre-trained network to make predictions.

            if args.use:
                print("Prediction is currently not implemented.") # TODO: Implement prediction in 'predict.py'

    print("EXECUTION COMPLETED\n")
예제 #5
0
def grid_search(cfg):
    os.makedirs(cfg.run_dir, exist_ok=False)
    # Load datasets and create dataloaders.
    dataset_paths = [[
        os.path.join(cfg.datasets_dir, 's2B_no-aug_sst_train.pt'),
        os.path.join(cfg.datasets_dir, 's2B_no-aug_sst_val.pt'),
        os.path.join(cfg.datasets_dir, 's2B_no-aug_sst_test.pt')
    ],
                     [
                         os.path.join(cfg.datasets_dir,
                                      's2B_no-aug_sst_train.pt'),
                         os.path.join(cfg.datasets_dir,
                                      's2B_no-aug_sst_val.pt'),
                         os.path.join(cfg.datasets_dir,
                                      's2B_no-aug_sst_test.pt')
                     ],
                     [
                         os.path.join(cfg.datasets_dir,
                                      's2B_no-aug_sst_train.pt'),
                         os.path.join(cfg.datasets_dir,
                                      's2B_no-aug_sst_val.pt'),
                         os.path.join(cfg.datasets_dir,
                                      's2B_no-aug_sst_test.pt')
                     ]]
    dataloaders = []
    for i in range(len(dataset_paths)):
        train_set, val_set, test_set = load_datasets_from_path(
            dataset_paths[i][0], dataset_paths[i][1], dataset_paths[i][2])
        dataloader_train = torch.utils.data.DataLoader(
            dataset=train_set,
            batch_size=cfg.batch_size_train,
            shuffle=True,
            num_workers=0,
            pin_memory=True)
        dataloader_val = torch.utils.data.DataLoader(
            dataset=val_set,
            batch_size=cfg.batch_size_val,
            shuffle=True,
            num_workers=0,
            pin_memory=True)
        dataloader_test = torch.utils.data.DataLoader(
            dataset=test_set,
            batch_size=cfg.batch_size_test,
            shuffle=True,
            num_workers=0,
            pin_memory=True)
        dataloaders.append([dataloader_train, dataloader_val, dataloader_test])

    search_data = []

    axes = None
    labels = None
    if cfg.model_name == "GlobalDense":
        learning_rates = [1e-4, 1e-5]
        dropout_probs = [0.0, 0.2]
        widths = (cfg.N_coarse * np.asarray([2, 3, 4, 5, 7])).astype(int)
        depths = [3, 4, 5, 6, 8, 10]
        axes = [learning_rates, dropout_probs, widths, depths]
        labels = "learning rate,\tdropout prob.,\twidth,\tdepth"
    elif cfg.model_name == "GlobalCNN":
        learning_rates = [1e-4, 1e-5]
        conv_nums = [3, 5, 7, 9, 12]
        channel_nums = [10, 15, 20, 25, 30, 40]
        fc_nums = [1, 2]
        axes = [learning_rates, conv_nums, channel_nums, fc_nums]
        labels = "learning rate,\tNo. conv layers,  No. conv channels,  No. fc layers"
    elif cfg.model_name == "LocalDense":
        learning_rates = [1e-4, 1e-5]
        dropout_probs = [0.0, 0.2]
        widths = [3, 5, 7, 10, 15]
        depths = [3, 5, 7, 10, 15]
        axes = [learning_rates, dropout_probs, widths, depths]
        labels = "learning rate,\tdropout prob.,\twidth,\tdepth"
    elif cfg.model_name == "EnsembleLocalDense":
        learning_rates = [1e-4, 1e-5]
        dropout_probs = [0.0, 0.2]
        widths = [3, 5, 7, 10, 15]
        depths = [3, 5, 7, 10, 15]
        axes = [learning_rates, dropout_probs, widths, depths]
        labels = "learning rate,\tdropout prob.,\twidth,\tdepth"
    elif cfg.model_name == "EnsembleGlobalCNN":
        learning_rates = [1e-4, 1e-5]
        conv_nums = [3, 5, 7, 9, 12]
        channel_nums = [10, 15, 20, 25, 30, 40]
        axes = [learning_rates, conv_nums, channel_nums]
        labels = "learning rate,\tNo. conv layers,  No. conv channels"

    combos = list(itertools.product(*axes))

    for combo in combos:
        print("\n-------------------------------------------------------")
        print(labels)
        print(combo)
        print("")

        if cfg.model_name == "GlobalDense":
            cfg.learning_rate = combo[0]
            cfg.dropout_prob = combo[1]
            cfg.hidden_layer_size = combo[2]
            cfg.num_layers = combo[3]
        elif cfg.model_name == "GlobalCNN":
            cfg.learning_rate = combo[0]
            cfg.num_conv_layers = combo[1]
            cfg.num_channels = combo[2]
            cfg.num_fc_layers = combo[3]
        elif cfg.model_name == "LocalDense":
            cfg.learning_rate = combo[0]
            cfg.dropout_prob = combo[1]
            cfg.hidden_layer_size = combo[2]
            cfg.num_layers = combo[3]
        elif cfg.model_name == "EnsembleLocalDense":
            cfg.learning_rate = combo[0]
            cfg.dropout_prob = combo[1]
            cfg.hidden_layer_size = combo[2]
            cfg.num_layers = combo[3]
        elif cfg.model_name == "EnsembleGlobalCNN":
            cfg.learning_rate = combo[0]
            cfg.num_conv_layers = combo[1]
            cfg.num_channels = combo[2]

        final_val_losses = np.zeros(len(dataloaders))

        for system_num in range(len(dataloaders)):
            print("System num:", system_num)
            model = models.create_new_model(cfg,
                                            cfg.get_model_specific_params())
            data_dict = train.train(cfg, model, 0, dataloaders[system_num][0],
                                    dataloaders[system_num][1])
            final_val_losses[system_num] = data_dict["Validation loss"][1][-1]

        final_val_losses_sum = np.sum(final_val_losses)
        search_data.append({
            "str": labels + "\n" + str(combo),
            "sum": final_val_losses_sum,
            "losses": final_val_losses
        })

    lowest_loss = np.inf
    best_params = ""

    with open(os.path.join(cfg.run_dir, "grid_search_results" + ".txt"),
              "w") as f:
        print("\n")
        print("Results from parameter grid search for model " +
              cfg.model_name + "\n")
        f.write("Results from parameter grid search for model " +
                cfg.model_name + "\n\n")
        for data_dict in search_data:
            print("Parameters:\t", data_dict["str"])
            print("Losses:\t", data_dict["losses"])
            print("Loss sum:\t", data_dict["sum"])
            print("\n")
            f.write("Parameters:\t" + str(data_dict["str"]) + "\n")
            f.write("Losses:\t" + str(data_dict["losses"]) + "\n")
            f.write("Loss sum:\t" + str(data_dict["sum"]) + "\n")
            f.write("\n")
            if data_dict["sum"] < lowest_loss:
                lowest_loss = data_dict["sum"]
                best_params = data_dict["str"]

        print("BEST PARAMETERS:")
        print(best_params)

        f.write("BEST PARAMETERS:\n")
        f.write(best_params)