Exemplo n.º 1
0
def test_block_densenet(system_dict):
    forward = True;

    test = "test_block_densenet";
    system_dict["total_tests"] += 1;
    print_start(test, system_dict["total_tests"])
    if(forward):
        try:
            gtf = prototype(verbose=0);
            gtf.Prototype("sample-project-1", "sample-experiment-1");


            network = [];
            network.append(gtf.densenet_block(bottleneck_size=4, growth_rate=16, dropout=0.2));
            gtf.Compile_Network(network, data_shape=(1, 64, 64), use_gpu=False);

            x = torch.randn(1, 1, 64, 64);
            y = gtf.system_dict["local"]["model"](x);     

            system_dict["successful_tests"] += 1;
            print_status("Pass");

        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e);
            system_dict["failed_tests_lists"].append(test);
            forward = False;
            print_status("Fail");
    else:
        system_dict["skipped_tests_lists"].append(test);
        print_status("Skipped");

    return system_dict
Exemplo n.º 2
0
def test_block_inception_a(system_dict):
    forward = True;

    test = "test_block_inception_a";
    system_dict["total_tests"] += 1;
    print_start(test, system_dict["total_tests"])
    if(forward):
        try:
            gtf = prototype(verbose=0);
            gtf.Prototype("sample-project-1", "sample-experiment-1");


            network = [];
            network.append(gtf.inception_a_block(pooling_branch_channels=32, pool_type="avg"));
            network.append(gtf.inception_a_block(pooling_branch_channels=32, pool_type="max"));
            gtf.Compile_Network(network, data_shape=(1, 64, 64), use_gpu=False);

            x = torch.randn(1, 1, 64, 64);
            y = gtf.system_dict["local"]["model"](x);         

            system_dict["successful_tests"] += 1;
            print_status("Pass");

        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e);
            system_dict["failed_tests_lists"].append(test);
            forward = False;
            print_status("Fail");
    else:
        system_dict["skipped_tests_lists"].append(test);
        print_status("Skipped");

    return system_dict
Exemplo n.º 3
0
def test_loss_sigmoid_binary_crossentropy(system_dict):
    forward = True;

    test = "test_loss_sigmoid_binary_crossentropy";
    system_dict["total_tests"] += 1;
    print_start(test, system_dict["total_tests"])
    if(forward):
        try:
            gtf = prototype(verbose=0);
            gtf.Prototype("sample-project-1", "sample-experiment-1");

            label = torch.empty((1, 5)).random_(2);

            y = torch.randn(1, 5);

            gtf.loss_sigmoid_binary_crossentropy();
            load_loss(gtf.system_dict);
            loss_obj = gtf.system_dict["local"]["criterion"];
            loss_val = loss_obj(y, label);           

            system_dict["successful_tests"] += 1;
            print_status("Pass");

        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e);
            system_dict["failed_tests_lists"].append(test);
            forward = False;
            print_status("Fail");
    else:
        system_dict["skipped_tests_lists"].append(test);
        print_status("Skipped");

    return system_dict
def test_block_resnet_v2_bottleneck(system_dict):
    forward = True;

    test = "test_block_resnet_v2_bottleneck";
    system_dict["total_tests"] += 1;
    print_start(test, system_dict["total_tests"])
    if(forward):
        try:
            gtf = prototype(verbose=0);
            gtf.Prototype("sample-project-1", "sample-experiment-1");


            network = [];
            network.append(gtf.resnet_v2_bottleneck_block(output_channels=32, stride=1, downsample=True));
            network.append(gtf.resnet_v2_bottleneck_block(output_channels=32, stride=1, downsample=False));
            gtf.Compile_Network(network, data_shape=(1, 64, 64), use_gpu=False);

            x = torch.randn(1, 1, 64, 64);
            y = gtf.system_dict["local"]["model"](x);    

            system_dict["successful_tests"] += 1;
            print_status("Pass");

        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e);
            system_dict["failed_tests_lists"].append(test);
            forward = False;
            print_status("Fail");
    else:
        system_dict["skipped_tests_lists"].append(test);
        print_status("Skipped");

    return system_dict
Exemplo n.º 5
0
def test_activation_rrelu(system_dict):
    forward = True;

    test = "test_activation_rrelu";
    system_dict["total_tests"] += 1;
    print_start(test, system_dict["total_tests"])
    if(forward):
        try:
            gtf = prototype(verbose=0);
            gtf.Prototype("sample-project-1", "sample-experiment-1");


            network = [];
            network.append(gtf.rrelu());
            gtf.Compile_Network(network, data_shape=(3, 64, 64), use_gpu=False);

            x = torch.randn(1, 3, 64, 64);
            y = gtf.system_dict["local"]["model"](x);       

            system_dict["successful_tests"] += 1;
            print_status("Pass");

        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e);
            system_dict["failed_tests_lists"].append(test);
            forward = False;
            print_status("Fail");
    else:
        system_dict["skipped_tests_lists"].append(test);
        print_status("Skipped");

    return system_dict
Exemplo n.º 6
0
def test_loss_kldiv(system_dict):
    forward = True

    test = "test_loss_kldiv"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            gtf = prototype(verbose=0)
            gtf.Prototype("sample-project-1", "sample-experiment-1")

            label = torch.randn(1, 5)

            y = torch.randn(1, 5)

            gtf.loss_kldiv()
            load_loss(gtf.system_dict)
            loss_obj = gtf.system_dict["local"]["criterion"]
            loss_val = loss_obj(y, label)

            system_dict["successful_tests"] += 1
            print_status("Pass")

        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    return system_dict
Exemplo n.º 7
0
def test_optimizer_momentum_rmsprop(system_dict):
    forward = True;
    if(not os.path.isdir("datasets")):
        os.system("! wget --load-cookies /tmp/cookies.txt \"https://docs.google.com/uc?export=download&confirm=$(wget --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1rG-U1mS8hDU7_wM56a1kc-li_zHLtbq2' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1rG-U1mS8hDU7_wM56a1kc-li_zHLtbq2\" -O datasets.zip && rm -rf /tmp/cookies.txt")
        os.system("! unzip -qq datasets.zip")

    test = "test_optimizer_momentum_rmsprop";
    system_dict["total_tests"] += 1;
    print_start(test, system_dict["total_tests"])
    if(forward):
        try:
            gtf = prototype(verbose=0);
            gtf.Prototype("sample-project-1", "sample-experiment-1");
            gtf.Default(dataset_path="datasets/dataset_cats_dogs_train", 
                model_name="resnet18", freeze_base_network=True, num_epochs=2);
            gtf.optimizer_momentum_rmsprop(0.01, weight_decay=0.0001, decay_rate=0.9);
            gtf.Train();
            system_dict["successful_tests"] += 1;
            print_status("Pass");
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e);
            system_dict["failed_tests_lists"].append(test);
            forward = False;
            print_status("Fail");
    else:
        system_dict["skipped_tests_lists"].append(test);
        print_status("Skipped");

    return system_dict
Exemplo n.º 8
0
def test_layer_transposed_convolution2d(system_dict):
    forward = True

    test = "test_layer_transposed_convolution2d"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            gtf = prototype(verbose=0)
            gtf.Prototype("sample-project-1", "sample-experiment-1")

            network = []
            network.append(
                gtf.transposed_convolution2d(output_channels=3, kernel_size=3))
            gtf.Compile_Network(network,
                                data_shape=(3, 128, 128),
                                use_gpu=False)

            x = torch.randn(1, 3, 128, 128)
            y = gtf.system_dict["local"]["model"](x)

            system_dict["successful_tests"] += 1
            print_status("Pass")

        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    return system_dict
Exemplo n.º 9
0
def test_activation_softmin(system_dict):
    forward = True

    test = "test_activation_softmin"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            gtf = prototype(verbose=0)
            gtf.Prototype("sample-project-1", "sample-experiment-1")

            network = []
            network.append(gtf.softmin())
            gtf.Compile_Network(network, data_shape=(3, 64, 64), use_gpu=False)

            x = torch.randn(1, 3, 64, 64)
            y = gtf.system_dict["local"]["model"](x)

            system_dict["successful_tests"] += 1
            print_status("Pass")

        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    return system_dict
Exemplo n.º 10
0
def test_block_resnext(system_dict):
    forward = True

    test = "test_block_resnext"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            gtf = prototype(verbose=0)
            gtf.Prototype("sample-project-1", "sample-experiment-1")

            network = []
            network.append(
                gtf.resnext_block(output_channels=256,
                                  cardinality=8,
                                  bottleneck_width=4,
                                  stride=1,
                                  downsample=True))
            network.append(
                gtf.resnext_block(output_channels=256,
                                  cardinality=8,
                                  bottleneck_width=4,
                                  stride=1,
                                  downsample=False))
            gtf.Compile_Network(network, data_shape=(1, 64, 64), use_gpu=False)

            x = torch.randn(1, 1, 64, 64)
            y = gtf.system_dict["local"]["model"](x)

            system_dict["successful_tests"] += 1
            print_status("Pass")

        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    return system_dict
Exemplo n.º 11
0
def test_analyse(system_dict):
    forward = True;
    if(not os.path.isdir("datasets")):
        os.system("! wget --load-cookies /tmp/cookies.txt \"https://docs.google.com/uc?export=download&confirm=$(wget --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1rG-U1mS8hDU7_wM56a1kc-li_zHLtbq2' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1rG-U1mS8hDU7_wM56a1kc-li_zHLtbq2\" -O datasets.zip && rm -rf /tmp/cookies.txt")
        os.system("! unzip -qq datasets.zip")
        

    test = "analyse_object_creation";
    system_dict["total_tests"] += 1;
    print_start(test, system_dict["total_tests"])
    if(forward):
        try:
            ptf = prototype(verbose=0);
            system_dict["successful_tests"] += 1;
            print_status("Pass");
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e);
            system_dict["failed_tests_lists"].append(test);
            forward = False;
            print_status("Fail");
    else:
        system_dict["skipped_tests_lists"].append(test);
        print_status("Skipped");


    test = "analyse_Prototype()";
    system_dict["total_tests"] += 1;
    print_start(test, system_dict["total_tests"])
    if(forward): 
        try:
            ptf.Prototype("sample-project-1", "sample-experiment-1");
            system_dict["successful_tests"] += 1;
            print_status("Pass");
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e);
            system_dict["failed_tests_lists"].append(test);
            forward = False;
            print_status("Fail");
    else:
        system_dict["skipped_tests_lists"].append(test);
        print_status("Skipped");


    test = "analyse_Default()";
    system_dict["total_tests"] += 1;
    print_start(test, system_dict["total_tests"])
    if(forward): 
        try:
            ptf.Default(dataset_path="datasets/dataset_cats_dogs_train", 
                model_name="resnet18", freeze_base_network=True, num_epochs=2);
            system_dict["successful_tests"] += 1;
            print_status("Pass");
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e);
            system_dict["failed_tests_lists"].append(test);
            forward = False;
            print_status("Fail");
    else:
        system_dict["skipped_tests_lists"].append(test);
        print_status("Skipped");


    test = "analyse_Analyse_Learning_Rates()";
    system_dict["total_tests"] += 1;
    print_start(test, system_dict["total_tests"])
    if(forward): 
        try:
            analysis_name = "analyse_learning_rates"
            lrs = [0.1, 0.05];
            epochs=2
            percent_data=40
            analysis = ptf.Analyse_Learning_Rates(analysis_name, lrs, percent_data, num_epochs=epochs, state="keep_none");
            system_dict["successful_tests"] += 1;
            print_status("Pass");
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e);
            system_dict["failed_tests_lists"].append(test);
            forward = False;
            print_status("Fail");
    else:
        system_dict["skipped_tests_lists"].append(test);
        print_status("Skipped");


    test = "analyse_Analyse_Input_Sizes()";
    system_dict["total_tests"] += 1;
    print_start(test, system_dict["total_tests"])
    if(forward): 
        try:
            analysis_name = "analyse_input_sizes";
            input_sizes = [128, 256];
            epochs=2;
            percent_data=40;
            analysis = ptf.Analyse_Input_Sizes(analysis_name, input_sizes, percent_data, num_epochs=epochs, state="keep_none");
            system_dict["successful_tests"] += 1;
            print_status("Pass");
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e);
            system_dict["failed_tests_lists"].append(test);
            forward = False;
            print_status("Fail");
    else:
        system_dict["skipped_tests_lists"].append(test);
        print_status("Skipped");



    test = "analyse_Analyse_Batch_Sizes()";
    system_dict["total_tests"] += 1;
    print_start(test, system_dict["total_tests"])
    if(forward): 
        try:
            analysis_name = "analyse_batch_sizes";
            batch_sizes = [2, 3];
            epochs = 2;
            percent_data = 40;
            analysis = ptf.Analyse_Batch_Sizes(analysis_name, batch_sizes, percent_data, num_epochs=epochs, state="keep_none");
            system_dict["successful_tests"] += 1;
            print_status("Pass");
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e);
            system_dict["failed_tests_lists"].append(test);
            forward = False;
            print_status("Fail");
    else:
        system_dict["skipped_tests_lists"].append(test);
        print_status("Skipped");




    test = "analyse_Analyse_Models()";
    system_dict["total_tests"] += 1;
    print_start(test, system_dict["total_tests"])
    if(forward): 
        try:
            analysis_name = "analyse_models";
            models = [["resnet18", True, True], ["resnet34", False, True]]; 
            percent_data=40;
            analysis = ptf.Analyse_Models(analysis_name, models, percent_data, num_epochs=epochs, state="keep_none");
            system_dict["successful_tests"] += 1;
            print_status("Pass");
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e);
            system_dict["failed_tests_lists"].append(test);
            forward = False;
            print_status("Fail");
    else:
        system_dict["skipped_tests_lists"].append(test);
        print_status("Skipped");



    test = "analyse_Analyse_Optimizers()";
    system_dict["total_tests"] += 1;
    print_start(test, system_dict["total_tests"])
    if(forward): 
        try:
            analysis_name = "analyse_optimizers";
            optimizers = ["sgd", "adam"];
            epochs = 2;
            percent_data = 40;
            analysis = ptf.Analyse_Optimizers(analysis_name, optimizers, percent_data, num_epochs=epochs, state="keep_none");
            system_dict["successful_tests"] += 1;
            print_status("Pass");
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e);
            system_dict["failed_tests_lists"].append(test);
            forward = False;
            print_status("Fail");
    else:
        system_dict["skipped_tests_lists"].append(test);
        print_status("Skipped");







    

    return system_dict
Exemplo n.º 12
0
def test_default_eval_infer(system_dict):
    forward = True
    if (not os.path.isdir("datasets")):
        os.system(
            "! wget --load-cookies /tmp/cookies.txt \"https://docs.google.com/uc?export=download&confirm=$(wget --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1rG-U1mS8hDU7_wM56a1kc-li_zHLtbq2' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1rG-U1mS8hDU7_wM56a1kc-li_zHLtbq2\" -O datasets.zip && rm -rf /tmp/cookies.txt"
        )
        os.system("! unzip -qq datasets.zip")

    test = "default_eval_infer_object_creation"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf = prototype(verbose=0)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "default_eval_infer_Prototype()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.Prototype("sample-project-1",
                          "sample-experiment-1",
                          eval_infer=True)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "default_eval_infer_Infer-img()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            img_name = "datasets/dataset_cats_dogs_test/0.jpg"
            predictions = ptf.Infer(img_name=img_name, return_raw=True)
            img_name = "datasets/dataset_cats_dogs_test/84.jpg"
            predictions = ptf.Infer(img_name=img_name, return_raw=True)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "default_eval_infer_Infer-folder()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            inference_dataset = "datasets/dataset_cats_dogs_test"
            output = ptf.Infer(img_dir=inference_dataset, return_raw=True)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "default_eval_infer_Dataset_Params()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.Dataset_Params(dataset_path="datasets/dataset_cats_dogs_eval")
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "default_eval_infer_Dataset()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.Dataset()
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "default_eval_infer_Evaluate()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            accuracy, class_based_accuracy = ptf.Evaluate()
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    return system_dict
Exemplo n.º 13
0
def test_update_eval_infer(system_dict):
    forward = True
    if (not os.path.isdir("datasets")):
        os.system(
            "! wget --load-cookies /tmp/cookies.txt \"https://docs.google.com/uc?export=download&confirm=$(wget --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1rG-U1mS8hDU7_wM56a1kc-li_zHLtbq2' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1rG-U1mS8hDU7_wM56a1kc-li_zHLtbq2\" -O datasets.zip && rm -rf /tmp/cookies.txt"
        )
        os.system("! unzip -qq datasets.zip")

    test = "update_normal_object_creation"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf = prototype(verbose=0)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "update_normal_Prototype()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.Prototype("sample-project-1",
                          "sample-experiment-3",
                          eval_infer=True)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "update_normal_Dataset_Params()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.Dataset_Params(dataset_path="datasets/dataset_csv_id/test",
                               path_to_csv="datasets/dataset_csv_id/test.csv",
                               delimiter=",")
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "default_eval_infer_Dataset()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.Dataset()
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "update_normal_update_model_path()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.update_model_path(
                "workspace/sample-project-1/sample-experiment-3/output/models/best_model"
            )
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "update_normal_Reload()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.Reload()
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "default_eval_infer_Evaluate()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            accuracy, class_based_accuracy = ptf.Evaluate()
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    return system_dict
Exemplo n.º 14
0
def test_switch_default(system_dict):
    forward = True
    if (not os.path.isdir("datasets")):
        os.system(
            "! wget --load-cookies /tmp/cookies.txt \"https://docs.google.com/uc?export=download&confirm=$(wget --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1rG-U1mS8hDU7_wM56a1kc-li_zHLtbq2' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1rG-U1mS8hDU7_wM56a1kc-li_zHLtbq2\" -O datasets.zip && rm -rf /tmp/cookies.txt"
        )
        os.system("! unzip -qq datasets.zip")

    test = "switch_default_object_object_creation"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf = prototype(verbose=0)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "switch_default_object_Prototype()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.Prototype("sample-project-1", "sample-experiment-5")
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "switch_default_Default()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.Default(dataset_path=[
                "datasets/dataset_cats_dogs_train",
                "datasets/dataset_cats_dogs_eval"
            ],
                        model_name="resnet18",
                        freeze_base_network=True,
                        num_epochs=10)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "switch_default_EDA()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.EDA(check_missing=True, check_corrupt=True)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "switch_default_Switch_Mode()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.Switch_Mode(eval_infer=True)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "switch_default_Dataset_Params()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.Dataset_Params(dataset_path="datasets/dataset_cats_dogs_eval")
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "switch_default_Dataset()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.Dataset()
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "switch_default_Evaluate()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            accuracy, class_based_accuracy = ptf.Evaluate()
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "switch_default_Switch_Mode()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.Switch_Mode(train=True)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "switch_default_Train()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.Train()
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    return system_dict
Exemplo n.º 15
0
def test_switch_expert(system_dict):
    forward = True
    if (not os.path.isdir("datasets")):
        os.system(
            "! wget --load-cookies /tmp/cookies.txt \"https://docs.google.com/uc?export=download&confirm=$(wget --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1rG-U1mS8hDU7_wM56a1kc-li_zHLtbq2' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1rG-U1mS8hDU7_wM56a1kc-li_zHLtbq2\" -O datasets.zip && rm -rf /tmp/cookies.txt"
        )
        os.system("! unzip -qq datasets.zip")

    test = "switch_expert_object_creation"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf = prototype(verbose=0)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "switch_expert_Prototype()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.Prototype("sample-project-1", "sample-experiment-6")
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "switch_expert_switch_mode()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.Switch_Mode(eval_infer=True)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "switch_expert_Model_Params()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.Model_Params(
                model_path=
                "workspace/sample-project-1/sample-experiment-5/output/models/intermediate_model_9",
                use_gpu=True)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "switch_expert_Model()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.Model()
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "switch_expert_update_input_size()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.update_input_size(224)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "switch_expert_Infer-Img()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            img_name = "datasets/dataset_cats_dogs_test/0.jpg"
            predictions = ptf.Infer(img_name=img_name, return_raw=True)
            img_name = "datasets/dataset_cats_dogs_test/84.jpg"
            predictions = ptf.Infer(img_name=img_name, return_raw=True)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "switch_expert_Infer-Folder()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            inference_dataset = "datasets/dataset_cats_dogs_test/"
            output = ptf.Infer(img_dir=inference_dataset, return_raw=True)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "switch_expert_Dataset_Params()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.Dataset_Params(dataset_path="datasets/dataset_cats_dogs_eval",
                               input_size=224)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "switch_expert_Dataset()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.Dataset()
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "switch_expert_Evaluate()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            accuracy, class_based_accuracy = ptf.Evaluate()
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "switch_expert_switch_mode()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.Switch_Mode(train=True)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "expert_train_Dataset_Params()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.Dataset_Params(dataset_path="datasets/dataset_cats_dogs_train",
                               split=0.9,
                               input_size=224,
                               batch_size=16,
                               shuffle_data=True,
                               num_processors=3)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "expert_train_apply_transforms()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.apply_random_resized_crop(256, train=True, val=True, test=True)
            ptf.apply_random_perspective(train=True, val=True)
            ptf.apply_random_vertical_flip(train=True, val=True)
            ptf.apply_random_horizontal_flip(train=True, val=True)
            ptf.apply_normalize(mean=[0.485, 0.456, 0.406],
                                std=[0.229, 0.224, 0.225],
                                train=True,
                                val=True,
                                test=True)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "expert_train_Dataset()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.Dataset()
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "expert_train_Model_Params()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.Model_Params(model_name="resnet18",
                             freeze_base_network=True,
                             use_gpu=True,
                             use_pretrained=True)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "expert_train_Model()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.Model()
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "expert_train_lr_multistep_decrease()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.lr_multistep_decrease([1, 3], gamma=0.9)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "expert_train_optimizer_sgd()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.optimizer_sgd(0.001, momentum=0.9)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "expert_train_loss_softmax_crossentropy()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.loss_softmax_crossentropy()
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "expert_train_Training_Params()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.Training_Params(
                num_epochs=4,
                display_progress=True,
                display_progress_realtime=True,
                save_intermediate_models=True,
                intermediate_model_prefix="intermediate_model_",
                save_training_logs=True)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "expert_train_Train()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.Train()
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    return system_dict
Exemplo n.º 16
0
def test_expert_train(system_dict):
    forward = True
    if (not os.path.isdir("datasets")):
        os.system(
            "! wget --load-cookies /tmp/cookies.txt \"https://docs.google.com/uc?export=download&confirm=$(wget --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1rG-U1mS8hDU7_wM56a1kc-li_zHLtbq2' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1rG-U1mS8hDU7_wM56a1kc-li_zHLtbq2\" -O datasets.zip && rm -rf /tmp/cookies.txt"
        )
        os.system("! unzip -qq datasets.zip")

    test = "expert_train_object_creation"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf = prototype(verbose=0)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "expert_train_Prototype()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.Prototype("sample-project-1", "sample-experiment-4")
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "expert_train_Dataset_Params()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.Dataset_Params(dataset_path=[
                "datasets/dataset_csv_id/train", "datasets/dataset_csv_id/val"
            ],
                               path_to_csv=[
                                   "datasets/dataset_csv_id/train.csv",
                                   "datasets/dataset_csv_id/val.csv"
                               ],
                               split=0.9,
                               input_size=224,
                               batch_size=16,
                               shuffle_data=True,
                               num_processors=3)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "expert_train_Dataset()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.Dataset()
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "expert_train_Model_Params()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.Model_Params(model_name="resnet18",
                             freeze_base_network=True,
                             use_gpu=True,
                             use_pretrained=True)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "expert_train_append_layer()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.append_dropout(probability=0.1)
            ptf.append_linear(final_layer=True)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "expert_train_Model()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.Model()
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "expert_train_lr_step_decrease()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.lr_step_decrease(1, gamma=0.9)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "expert_train_optimizer_sgd()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.optimizer_sgd(0.001, momentum=0.9)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "expert_train_loss_softmax_crossentropy()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.loss_softmax_crossentropy()
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "expert_train_Training_Params()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.Training_Params(
                num_epochs=3,
                display_progress=True,
                display_progress_realtime=True,
                save_intermediate_models=True,
                intermediate_model_prefix="intermediate_model_",
                save_training_logs=True)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "expert_train_Train()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.Train()
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    return system_dict
Exemplo n.º 17
0
def test_layer_concatenate(system_dict):
    forward = True

    test = "test_layer_concatenate"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            gtf = prototype(verbose=0)
            gtf.Prototype("sample-project-1", "sample-experiment-1")

            network = []
            network.append(gtf.convolution(output_channels=16))
            network.append(gtf.batch_normalization())
            network.append(gtf.relu())
            network.append(gtf.max_pooling())

            subnetwork = []
            branch1 = []
            branch1.append(gtf.convolution(output_channels=16))
            branch1.append(gtf.batch_normalization())
            branch1.append(gtf.convolution(output_channels=16))
            branch1.append(gtf.batch_normalization())

            branch2 = []
            branch2.append(gtf.convolution(output_channels=16))
            branch2.append(gtf.batch_normalization())

            branch3 = []
            branch3.append(gtf.identity())

            subnetwork.append(branch1)
            subnetwork.append(branch2)
            subnetwork.append(branch3)
            subnetwork.append(gtf.concatenate())

            network.append(subnetwork)

            network.append(gtf.convolution(output_channels=16))
            network.append(gtf.batch_normalization())
            network.append(gtf.relu())
            network.append(gtf.max_pooling())

            network.append(gtf.flatten())
            network.append(gtf.fully_connected(units=1024))
            network.append(gtf.dropout(drop_probability=0.2))
            network.append(gtf.fully_connected(units=2))

            gtf.Compile_Network(network, data_shape=(3, 64, 64), use_gpu=False)

            x = torch.randn(1, 3, 64, 64)
            y = gtf.system_dict["local"]["model"](x)

            system_dict["successful_tests"] += 1
            print_status("Pass")

        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    return system_dict
Exemplo n.º 18
0
def test_update_copy_from(system_dict):
    forward = True
    if (not os.path.isdir("datasets")):
        os.system(
            "! wget --load-cookies /tmp/cookies.txt \"https://docs.google.com/uc?export=download&confirm=$(wget --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1rG-U1mS8hDU7_wM56a1kc-li_zHLtbq2' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1rG-U1mS8hDU7_wM56a1kc-li_zHLtbq2\" -O datasets.zip && rm -rf /tmp/cookies.txt"
        )
        os.system("! unzip -qq datasets.zip")

    test = "update_copy_from_object_creation"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf = prototype(verbose=0)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "update_copy_from_Prototype()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.Prototype(
                "sample-project-1",
                "sample-experiment-2",
                copy_from=["sample-project-1", "sample-experiment-1"])
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "update_copy_from_reset_transforms()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.reset_transforms()
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "update_copy_from_apply_transforms()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.apply_random_resized_crop(256, train=True, val=True, test=True)
            ptf.apply_random_perspective(train=True, val=True)
            ptf.apply_random_vertical_flip(train=True, val=True)
            ptf.apply_random_horizontal_flip(train=True, val=True)
            ptf.apply_normalize(mean=[0.485, 0.456, 0.406],
                                std=[0.229, 0.224, 0.225],
                                train=True,
                                val=True,
                                test=True)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "update_copy_from_update_dataset()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.update_dataset(dataset_path=[
                "datasets/dataset_cats_dogs_train",
                "datasets/dataset_cats_dogs_eval"
            ])
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "update_copy_from_update_input_size()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.update_input_size(256)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "update_copy_from_update_batch_size()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.update_batch_size(6)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "update_copy_from_update_shuffle_data()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.update_shuffle_data(False)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "update_copy_from_update_num_processors()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.update_num_processors(16)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "update_copy_from_update_trainval_split()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.update_trainval_split(0.6)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "update_copy_from_Reload()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.Reload()
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "update_copy_from_EDA()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.EDA(check_missing=True, check_corrupt=True)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "update_copy_from_Train()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.Train()
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    return system_dict
Exemplo n.º 19
0
def test_update_normal(system_dict):
    forward = True
    if (not os.path.isdir("datasets")):
        os.system(
            "! wget --load-cookies /tmp/cookies.txt \"https://docs.google.com/uc?export=download&confirm=$(wget --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1rG-U1mS8hDU7_wM56a1kc-li_zHLtbq2' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1rG-U1mS8hDU7_wM56a1kc-li_zHLtbq2\" -O datasets.zip && rm -rf /tmp/cookies.txt"
        )
        os.system("! unzip -qq datasets.zip")

    test = "update_normal_object_creation"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf = prototype(verbose=0)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "update_normal_Prototype()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.Prototype("sample-project-1", "sample-experiment-3")
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "update_normal_Default()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.Default(dataset_path="datasets/dataset_csv_id/train",
                        path_to_csv="datasets/dataset_csv_id/train.csv",
                        delimiter=",",
                        model_name="resnet18",
                        freeze_base_network=True,
                        num_epochs=10)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "update_normal_update_model_name()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.update_model_name("resnet50")
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "update_normal_update_use_gpu()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.update_use_gpu(False)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "update_normal_update_use_pretrained()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.update_use_pretrained(True)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "update_normal_update_freeze_base_network()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.update_freeze_base_network(False)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "update_normal_update_freeze_layers()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.update_freeze_layers(10)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "update_normal_update_num_epochs()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.update_num_epochs(2)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "update_normal_update_display_progress_realtime()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.update_display_progress_realtime(False)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "update_normal_update_display_progress()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.update_display_progress(False)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "update_normal_update_save_intermediate_models()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.update_save_intermediate_models(False)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "update_normal_update_save_training_logs()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.update_save_training_logs(True)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "update_normal_lr_fixed()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.lr_fixed()
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "update_normal_Reload()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.Reload()
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "update_normal_EDA()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.EDA(check_missing=True, check_corrupt=True)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "update_normal_Estimate_Train_Time()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.Estimate_Train_Time()
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "update_normal_Train()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.Train()
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    return system_dict