Exemple #1
0
    all_gBest_metrics = np.zeros((number_runs, 2))
    runs_time = []
    all_gbest_par = []
    best_gBest_acc = 0

    for i in range(number_runs):
        print("Run number: " + str(i))
        start_time = time.time()
        pso = psoCNN(dataset=dataset,
                     n_iter=number_iterations,
                     pop_size=population_size,
                     batch_size=batch_size_pso,
                     epochs=epochs_pso,
                     min_layer=min_layer,
                     max_layer=max_layer,
                     conv_prob=probability_convolution,
                     pool_prob=probability_pooling,
                     fc_prob=probability_fully_connected,
                     max_conv_kernel=max_conv_kernel_size,
                     max_out_ch=max_conv_output_channels,
                     max_fc_neurons=max_fully_connected_neurons,
                     dropout_rate=dropout)

        pso.fit(Cg=Cg, dropout_rate=dropout)

        print(pso.gBest_acc)

        # Plot current gBest
        matplotlib.use('Agg')
        plt.plot(pso.gBest_acc)
        plt.xlabel("Iteration")
Exemple #2
0
    def pso_train(self, ui):

        # Custom changes to psoCNN:
        # within this function: cfg.epochs_pso set to 10 as per original paper for optimal performance
        # within particle.py module, an argument verbose=2 was added to .fit call in model_fit and model_fit_complete

        # function which uses pso to train on either the fashion_mnist or cifar10 datasets
        # one hot encoding, using to_categorical is carried out within pso library (pso_cnn.py)

        if ui.dataset == "fashion-mnist":
            dataset = "fashion-mnist"
        elif ui.dataset == "cifar10":
            dataset = "cifar10"

        # Algorithm parameters #
        number_runs = ui.pso_runs  # 10 runs is often used as default
        number_iterations = ui.pso_iter  # 10 runs is often used as default
        population_size = ui.pso_pop  # 20 runs is often used as default

        # Run the algorithm #
        results_path = "./results/" + dataset + "/"

        if not os.path.exists(results_path):
            os.makedirs(results_path)

        all_gBest_metrics = np.zeros((number_runs, 2))
        runs_time = []
        all_gbest_par = []
        best_gBest_acc = 0

        for i in range(number_runs):
            print("Run number: " + str(i))
            start_time = time.time()
            new_run = datetime.now()
            print(new_run)
            pso = psoCNN(dataset=dataset, n_iter=number_iterations, pop_size=population_size, \
                        batch_size=cfg.batch_size_pso, epochs=cfg.epochs_pso, min_layer=cfg.min_layer,
                        max_layer=cfg.max_layer, \
                        conv_prob=cfg.probability_convolution, pool_prob=cfg.probability_pooling, \
                        fc_prob=cfg.probability_fully_connected, max_conv_kernel=cfg.max_conv_kernel_size, \
                        max_out_ch=cfg.max_fully_connected_neurons, max_fc_neurons=cfg.max_fully_connected_neurons,
                        dropout_rate=cfg.dropout)

            pso.fit(Cg=cfg.Cg, dropout_rate=cfg.dropout)

            print(pso.gBest_acc)

            # Plot current gBest
            matplotlib.use('Agg')
            plt.plot(pso.gBest_acc)
            plt.xlabel("Iteration")
            plt.ylabel("gBest acc")
            plt.savefig(results_path + "gBest-iter-" + str(i) + ".png")
            plt.close()

            print('gBest architecture: ')
            print(pso.gBest)

            np.save(
                results_path + "gBest_inter_" + str(i) + "_acc_history.npy",
                pso.gBest_acc)
            np.save(
                results_path + "gBest_iter_" + str(i) +
                "_test_acc_history.npy", pso.gBest_test_acc)

            end_time = time.time()

            running_time = end_time - start_time

            runs_time.append(running_time)

            # Fully train the gBest model found
            n_parameters = pso.fit_gBest(
                batch_size=cfg.batch_size_full_training,
                epochs=cfg.epochs_full_training,
                dropout_rate=dropout)

            all_gbest_par.append(n_parameters)

            # Evaluate the fully trained gBest model
            gBest_metrics = pso.evaluate_gBest(
                batch_size=cfg.batch_size_full_training)

            if gBest_metrics[1] >= best_gBest_acc:
                best_gBest_acc = gBest_metrics[1]

                # Save best gBest model
                best_gBest_yaml = pso.gBest.model.to_yaml()

                with open(results_path + "best-gBest-model.yaml",
                          "w") as yaml_file:
                    yaml_file.write(best_gBest_yaml)

                # Save best gBest model weights to HDF5 file
                pso.gBest.model.save_weights(results_path +
                                             "best-gBest-weights.h5")

            all_gBest_metrics[i, 0] = gBest_metrics[0]
            all_gBest_metrics[i, 1] = gBest_metrics[1]

            print("This run took: " + str(running_time) + " seconds.")

            # Compute mean accuracy of all runs
            all_gBest_mean_metrics = np.mean(all_gBest_metrics, axis=0)

            np.save(results_path + "/time_to_run.npy", runs_time)

            # Save all gBest metrics
            np.save(results_path + "/all_gBest_metrics.npy", all_gBest_metrics)

            # Save results in a text file
            output_str = "All gBest number of parameters: " + str(
                all_gbest_par) + "\n"
            output_str = output_str + "All gBest test accuracies: " + str(
                all_gBest_metrics[:, 1]) + "\n"
            output_str = output_str + "All running times: " + str(
                runs_time) + "\n"
            output_str = output_str + "Mean loss of all runs: " + str(
                all_gBest_mean_metrics[0]) + "\n"
            output_str = output_str + "Mean accuracy of all runs: " + str(
                all_gBest_mean_metrics[1]) + "\n"

            print(output_str)

            with open(results_path + "/final_results.txt", "w") as f:
                try:
                    print(output_str, file=f)
                except SyntaxError:
                    print >> f, output_str

        return