def test_compare(system_dict):
    forward = True
    test = "compare_object_creation"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ctf = compare(verbose=0)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "compare_Comparison()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ctf.Comparison("Sample-Comparison-1")
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "compare_Add_Experiment()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ctf.Add_Experiment("sample-project-1", "sample-experiment-1")
            ctf.Add_Experiment("sample-project-1", "sample-experiment-2")
            ctf.Add_Experiment("sample-project-1", "sample-experiment-3")
            ctf.Add_Experiment("sample-project-1", "sample-experiment-4")
            ctf.Add_Experiment("sample-project-1", "sample-experiment-5")
            ctf.Add_Experiment("sample-project-1", "sample-experiment-6")
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "compare_Generate_Statistics()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ctf.Generate_Statistics()
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    return system_dict
Esempio n. 2
0
def test_compare(system_dict):
    forward = True;
    if(not os.path.isdir("datasets")):
        os.system("! wget --load-cookies /tmp/cookies.txt \"https://docs.google.com/uc?export=download&confirm=$(wget --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1rG-U1mS8hDU7_wM56a1kc-li_zHLtbq2' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1rG-U1mS8hDU7_wM56a1kc-li_zHLtbq2\" -O datasets.zip && rm -rf /tmp/cookies.txt")
        os.system("! unzip -qq datasets.zip")
    
    test = "compare_object_creation";
    system_dict["total_tests"] += 1;
    print_start(test, system_dict["total_tests"])
    if(forward):
        try:
            ctf = compare(verbose=0);
            system_dict["successful_tests"] += 1;
            print_status("Pass");
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e);
            system_dict["failed_tests_lists"].append(test);
            forward = False;
            print_status("Fail");
    else:
        system_dict["skipped_tests_lists"].append(test);
        print_status("Skipped");


    test = "compare_Comparison()";
    system_dict["total_tests"] += 1;
    print_start(test, system_dict["total_tests"])
    if(forward):
        try:
            ctf.Comparison("Sample-Comparison-1");
            system_dict["successful_tests"] += 1;
            print_status("Pass");
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e);
            system_dict["failed_tests_lists"].append(test);
            forward = False;
            print_status("Fail");
    else:
        system_dict["skipped_tests_lists"].append(test);
        print_status("Skipped");



    test = "compare_Add_Experiment()";
    system_dict["total_tests"] += 1;
    print_start(test, system_dict["total_tests"])
    if(forward):
        try:
            ctf.Add_Experiment("sample-project-1", "sample-experiment-1");
            ctf.Add_Experiment("sample-project-1", "sample-experiment-2");
            ctf.Add_Experiment("sample-project-1", "sample-experiment-3");
            ctf.Add_Experiment("sample-project-1", "sample-experiment-4");
            ctf.Add_Experiment("sample-project-1", "sample-experiment-5");
            ctf.Add_Experiment("sample-project-1", "sample-experiment-6");
            system_dict["successful_tests"] += 1;
            print_status("Pass");
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e);
            system_dict["failed_tests_lists"].append(test);
            forward = False;
            print_status("Fail");
    else:
        system_dict["skipped_tests_lists"].append(test);
        print_status("Skipped");


    test = "compare_Generate_Statistics()";
    system_dict["total_tests"] += 1;
    print_start(test, system_dict["total_tests"])
    if(forward):
        try:
            ctf.Generate_Statistics();
            system_dict["successful_tests"] += 1;
            print_status("Pass");
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e);
            system_dict["failed_tests_lists"].append(test);
            forward = False;
            print_status("Fail");
    else:
        system_dict["skipped_tests_lists"].append(test);
        print_status("Skipped");


    return system_dict;
    def Analyse_Optimizers(self, analysis_name, optimizer_list, percent_data, num_epochs=2, state="keep_all"):
        from pytorch_prototype import prototype
        
        project = analysis_name;
        self.custom_print("");
        self.custom_print("Running Optimizer analysis");                                                #Change 1
        self.custom_print("Analysis Name      : {}".format(project));
        self.custom_print("");

        for i in range(len(optimizer_list)):                                                            #Change 2
            ptf_ = prototype(verbose=0);    
            self.custom_print("Running experiment : {}/{}".format(i+1, len(optimizer_list)));             #Change 3        

            experiment = "Optimizer_" + str(optimizer_list[i]);                                          #Change 4, 5
            self.custom_print("Experiment name    : {}".format(experiment))
            
            ptf_.Prototype(project, experiment, pseudo_copy_from=[self.system_dict["project_name"], self.system_dict["experiment_name"]]);

            ptf_.Dataset_Percent(percent_data);
            dataset_type = ptf_.system_dict["dataset"]["dataset_type"];
            dataset_train_path = ptf_.system_dict["dataset"]["train_path"];
            dataset_val_path = ptf_.system_dict["dataset"]["val_path"];
            csv_train = ptf_.system_dict["dataset"]["csv_train"];
            csv_val = ptf_.system_dict["dataset"]["csv_val"];
            if(dataset_type=="train"):
                ptf_.update_dataset(dataset_path=dataset_train_path, path_to_csv="sampled_dataset_train.csv");
            elif(dataset_type=="train-val"):
                ptf_.update_dataset(dataset_path=[dataset_train_path, dataset_val_path], 
                    path_to_csv=["sampled_dataset_train.csv", "sampled_dataset_val.csv"]);
            elif(dataset_type=="csv_train"):
                ptf_.update_dataset(dataset_path=dataset_train_path, path_to_csv="sampled_dataset_train.csv");
            elif(dataset_type=="csv_train-val"):
                ptf_.update_dataset(dataset_path=[dataset_train_path, dataset_val_path], 
                    path_to_csv=["sampled_dataset_train.csv", "sampled_dataset_val.csv"]);


            lr = ptf_.system_dict["hyper-parameters"]["learning_rate"]  
            if(optimizer_list[i] == "adagrad"):                                                 #Change 6 
                ptf_.optimizer_adagrad(lr);
            elif(optimizer_list[i] == "adadelta"):
                ptf_.optimizer_adadelta(lr);
            elif(optimizer_list[i] == "adam"):
                ptf_.optimizer_adam(lr);
            elif(optimizer_list[i] == "adamw"):
                ptf_.optimizer_adamw(lr);
            elif(optimizer_list[i] == "sparseadam"):
                ptf_.optimizer_sparseadam(lr);
            elif(optimizer_list[i] == "adamax"):
                ptf_.optimizer_adamax(lr);
            elif(optimizer_list[i] == "asgd"):
                ptf_.optimizer_asgd(lr);
            elif(optimizer_list[i] == "rmsprop"):
                ptf_.optimizer_rmsprop(lr);
            elif(optimizer_list[i] == "rprop"):
                ptf_.optimizer_rprop(lr);
            elif(optimizer_list[i] == "sgd"):
                ptf_.optimizer_sgd(lr);

                                                      
            ptf_.Reload();                                                                                  #Change 7

            ptf_.update_num_epochs(num_epochs);
            ptf_.update_display_progress_realtime(False)
            ptf_.update_save_intermediate_models(False); 

            total_time_per_epoch = ptf_.get_training_estimate();
            total_time = total_time_per_epoch*num_epochs;
            if(int(total_time//60) == 0):
                self.custom_print("Estimated time     : {} sec".format(total_time));
            else:
                self.custom_print("Estimated time     : {} min".format(int(total_time//60)+1));

            ptf_.Train();
            self.custom_print("Experiment Complete");
            self.custom_print("\n");
            

        self.custom_print("Comparing Experiments");
        from compare_prototype import compare

        ctf_ = compare(verbose=0);
        ctf_.Comparison("Comparison_" + analysis_name);
        self.custom_print("Comparison ID:      {}".format("Comparison_" + analysis_name));


        training_accuracies = [];
        validation_accuracies = [];
        training_losses = [];
        validation_losses = [];

        tabular_data = [];

        for i in range(len(optimizer_list)):                                                                  #Change 8
            project = analysis_name;
            experiment = "Optimizer_" + str(optimizer_list[i]);                                              #Change 9, 10
            ctf_.Add_Experiment(project, experiment)

            tmp = [];
            tmp.append(experiment);
            training_accuracy_file = self.system_dict["master_systems_dir_relative"] + "/" + project + "/" + experiment + "/output/logs/train_acc_history.npy";
            tmp.append(np.load(training_accuracy_file, allow_pickle=True)[-1]);
            validation_accuracy_file = self.system_dict["master_systems_dir_relative"] + "/" + project + "/" + experiment + "/output/logs/val_acc_history.npy";
            tmp.append(np.load(validation_accuracy_file, allow_pickle=True)[-1]);
            training_loss_file = self.system_dict["master_systems_dir_relative"] + "/" + project + "/" + experiment + "/output/logs/train_loss_history.npy";
            tmp.append(np.load(training_loss_file, allow_pickle=True)[-1]);
            validation_loss_file = self.system_dict["master_systems_dir_relative"] + "/" + project + "/" + experiment + "/output/logs/val_loss_history.npy";
            tmp.append(np.load(validation_loss_file, allow_pickle=True)[-1]);
            tabular_data.append(tmp)

        
        ctf_.Generate_Statistics();

        self.custom_print("Generated statistics post all epochs");
        self.custom_print(tabulate(tabular_data, headers=['Experiment Name', 'Train Acc', 'Val Acc', 'Train Loss', 'Val Loss'], tablefmt='orgtbl'));
        self.custom_print("");


        
        return_dict = {};
        for i in range(len(tabular_data)):
            return_dict[tabular_data[i][0]] = {};
            return_dict[tabular_data[i][0]]["training_accuracy"] = tabular_data[i][1];
            return_dict[tabular_data[i][0]]["validation_accuracy"] = tabular_data[i][2];
            return_dict[tabular_data[i][0]]["training_loss"] = tabular_data[i][3];
            return_dict[tabular_data[i][0]]["validation_loss"] = tabular_data[i][4];

            fname = self.system_dict["master_systems_dir_relative"] + analysis_name + "/" + tabular_data[i][0] + "/experiment_state.json";
            system_dict = read_json(fname);
            return_dict[tabular_data[i][0]]["training_time"] = system_dict["training"]["outputs"]["training_time"];


        
        if(state=="keep_none"):
            shutil.rmtree(self.system_dict["master_systems_dir_relative"] + analysis_name);

        return return_dict
        
    ###############################################################################################################################################
Esempio n. 4
0
import os
import sys

sys.path.append("../../../monk/")
import psutil

from compare_prototype import compare

ctf = compare(verbose=1)
ctf.Comparison("Sample-Comparison-1")
ctf.Add_Experiment("sample-project-1", "sample-experiment-1")
ctf.Add_Experiment("sample-project-1", "sample-experiment-2")
ctf.Add_Experiment("sample-project-1", "sample-experiment-3")
ctf.Add_Experiment("sample-project-1", "sample-experiment-4")
ctf.Add_Experiment("sample-project-1", "sample-experiment-5")
ctf.Add_Experiment("sample-project-1", "sample-experiment-6")

ctf.Generate_Statistics()
Esempio n. 5
0
    def Analyse_Models(self, analysis_name, model_list, percent_data, num_epochs=2, state="keep_all"):
        from gluon_prototype import prototype
        
        project = analysis_name;
        self.custom_print("");
        self.custom_print("Running Model analysis");                                                #Change 1
        self.custom_print("Analysis Name      : {}".format(project));
        self.custom_print("");

        for i in range(len(model_list)):                                                            #Change 2
            gtf_ = prototype(verbose=0);    
            self.custom_print("Running experiment : {}/{}".format(i+1, len(model_list)));             #Change 3        

            if(model_list[i][1]):
                experiment = "Model_" + str(model_list[i][0]) + "_freeze_base";                        #Change 4, 5
            else:
                experiment = "Model_" + str(model_list[i][0]) + "_unfreeze_base";

            if(model_list[i][2]):
                experiment += "_pretrained";
            else:
                experiment += "_uninitialized";

            self.custom_print("Experiment name    : {}".format(experiment))
            
            gtf_.Prototype(project, experiment, pseudo_copy_from=[self.system_dict["project_name"], self.system_dict["experiment_name"]]);

            gtf_.Dataset_Percent(percent_data);
            dataset_type = gtf_.system_dict["dataset"]["dataset_type"];
            dataset_train_path = gtf_.system_dict["dataset"]["train_path"];
            dataset_val_path = gtf_.system_dict["dataset"]["val_path"];
            csv_train = gtf_.system_dict["dataset"]["csv_train"];
            csv_val = gtf_.system_dict["dataset"]["csv_val"];
            if(dataset_type=="train"):
                gtf_.update_dataset(dataset_path=dataset_train_path, path_to_csv="sampled_dataset_train.csv");
            elif(dataset_type=="train-val"):
                gtf_.update_dataset(dataset_path=[dataset_train_path, dataset_val_path], 
                    path_to_csv=["sampled_dataset_train.csv", "sampled_dataset_val.csv"]);
            elif(dataset_type=="csv_train"):
                gtf_.update_dataset(dataset_path=dataset_train_path, path_to_csv="sampled_dataset_train.csv");
            elif(dataset_type=="csv_train-val"):
                gtf_.update_dataset(dataset_path=[dataset_train_path, dataset_val_path], 
                    path_to_csv=["sampled_dataset_train.csv", "sampled_dataset_val.csv"]);


            gtf_.update_model_name(model_list[i][0])                                                        #Change 6 
            gtf_.update_freeze_base_network(model_list[i][1])
            gtf_.update_use_pretrained(model_list[i][2])
            gtf_.Reload();                                                                                  #Change 7

            gtf_.update_num_epochs(num_epochs);
            gtf_.update_display_progress_realtime(False)
            gtf_.update_save_intermediate_models(False); 

            total_time_per_epoch = gtf_.get_training_estimate();
            total_time = total_time_per_epoch*num_epochs;
            if(int(total_time//60) == 0):
                self.custom_print("Estimated time     : {} sec".format(total_time));
            else:
                self.custom_print("Estimated time     : {} min".format(int(total_time//60)+1));

            gtf_.Train();
            self.custom_print("Experiment Complete");
            self.custom_print("\n");
            

        self.custom_print("Comparing Experiments");
        from compare_prototype import compare

        ctf_ = compare(verbose=0);
        ctf_.Comparison("Comparison_" + analysis_name);
        self.custom_print("Comparison ID:      {}".format("Comparison_" + analysis_name));


        training_accuracies = [];
        validation_accuracies = [];
        training_losses = [];
        validation_losses = [];

        tabular_data = [];

        for i in range(len(model_list)):                                                                  #Change 8
            project = analysis_name;
            if(model_list[i][1]):
                experiment = "Model_" + str(model_list[i][0]) + "_freeze_base";                        #Change 9, 10
            else:
                experiment = "Model_" + str(model_list[i][0]) + "_unfreeze_base";

            if(model_list[i][2]):
                experiment += "_pretrained";
            else:
                experiment += "_uninitialized";

            ctf_.Add_Experiment(project, experiment)

            tmp = [];
            tmp.append(experiment);
            training_accuracy_file = self.system_dict["master_systems_dir_relative"] + "/" + project + "/" + experiment + "/output/logs/train_acc_history.npy";
            tmp.append(np.load(training_accuracy_file)[-1]);
            validation_accuracy_file = self.system_dict["master_systems_dir_relative"] + "/" + project + "/" + experiment + "/output/logs/val_acc_history.npy";
            tmp.append(np.load(validation_accuracy_file)[-1]);
            training_loss_file = self.system_dict["master_systems_dir_relative"] + "/" + project + "/" + experiment + "/output/logs/train_loss_history.npy";
            tmp.append(np.load(training_loss_file)[-1]);
            validation_loss_file = self.system_dict["master_systems_dir_relative"] + "/" + project + "/" + experiment + "/output/logs/val_loss_history.npy";
            tmp.append(np.load(validation_loss_file)[-1]);
            tabular_data.append(tmp)

        
        ctf_.Generate_Statistics();

        self.custom_print("Generated statistics post all epochs");
        self.custom_print(tabulate(tabular_data, headers=['Experiment Name', 'Train Acc', 'Val Acc', 'Train Loss', 'Val Loss'], tablefmt='orgtbl'));
        self.custom_print("");


        
        return_dict = {};
        for i in range(len(tabular_data)):
            return_dict[tabular_data[i][0]] = {};
            return_dict[tabular_data[i][0]]["training_accuracy"] = tabular_data[i][1];
            return_dict[tabular_data[i][0]]["validation_accuracy"] = tabular_data[i][2];
            return_dict[tabular_data[i][0]]["training_loss"] = tabular_data[i][3];
            return_dict[tabular_data[i][0]]["validation_loss"] = tabular_data[i][4];

            fname = self.system_dict["master_systems_dir_relative"] + analysis_name + "/" + tabular_data[i][0] + "/experiment_state.json";
            system_dict = read_json(fname);
            return_dict[tabular_data[i][0]]["training_time"] = system_dict["training"]["outputs"]["training_time"];


        
        if(state=="keep_none"):
            shutil.rmtree(self.system_dict["master_systems_dir_relative"] + analysis_name);

        return return_dict