示例#1
0
def test_update_copy_from(system_dict):
    forward = True
    if (not os.path.isdir("datasets")):
        os.system(
            "! wget --load-cookies /tmp/cookies.txt \"https://docs.google.com/uc?export=download&confirm=$(wget --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1rG-U1mS8hDU7_wM56a1kc-li_zHLtbq2' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1rG-U1mS8hDU7_wM56a1kc-li_zHLtbq2\" -O datasets.zip && rm -rf /tmp/cookies.txt"
        )
        os.system("! unzip -qq datasets.zip")

    test = "update_copy_from_object_creation"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf = prototype(verbose=0)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "update_copy_from_Prototype()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.Prototype(
                "sample-project-1",
                "sample-experiment-2",
                copy_from=["sample-project-1", "sample-experiment-1"])
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "update_copy_from_reset_transforms()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.reset_transforms()
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "update_copy_from_apply_transforms()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.apply_random_resized_crop(256, train=True, val=True, test=True)
            ptf.apply_random_perspective(train=True, val=True)
            ptf.apply_random_vertical_flip(train=True, val=True)
            ptf.apply_random_horizontal_flip(train=True, val=True)
            ptf.apply_normalize(mean=[0.485, 0.456, 0.406],
                                std=[0.229, 0.224, 0.225],
                                train=True,
                                val=True,
                                test=True)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "update_copy_from_update_dataset()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.update_dataset(dataset_path=[
                "datasets/dataset_cats_dogs_train",
                "datasets/dataset_cats_dogs_eval"
            ])
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "update_copy_from_update_input_size()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.update_input_size(256)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "update_copy_from_update_batch_size()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.update_batch_size(6)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "update_copy_from_update_shuffle_data()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.update_shuffle_data(False)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "update_copy_from_update_num_processors()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.update_num_processors(16)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "update_copy_from_update_trainval_split()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.update_trainval_split(0.6)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "update_copy_from_Reload()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.Reload()
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "update_copy_from_EDA()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.EDA(check_missing=True, check_corrupt=True)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "update_copy_from_Train()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.Train()
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    return system_dict
示例#2
0
def test_switch_default(system_dict):
    forward = True
    if (not os.path.isdir("datasets")):
        os.system(
            "! wget --load-cookies /tmp/cookies.txt \"https://docs.google.com/uc?export=download&confirm=$(wget --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1rG-U1mS8hDU7_wM56a1kc-li_zHLtbq2' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1rG-U1mS8hDU7_wM56a1kc-li_zHLtbq2\" -O datasets.zip && rm -rf /tmp/cookies.txt"
        )
        os.system("! unzip -qq datasets.zip")

    test = "switch_default_object_object_creation"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf = prototype(verbose=0)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "switch_default_object_Prototype()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.Prototype("sample-project-1", "sample-experiment-5")
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "switch_default_Default()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.Default(dataset_path=[
                "datasets/dataset_cats_dogs_train",
                "datasets/dataset_cats_dogs_eval"
            ],
                        model_name="resnet18",
                        freeze_base_network=True,
                        num_epochs=10)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "switch_default_EDA()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.EDA(check_missing=True, check_corrupt=True)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "switch_default_Switch_Mode()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.Switch_Mode(eval_infer=True)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "switch_default_Dataset_Params()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.Dataset_Params(dataset_path="datasets/dataset_cats_dogs_eval")
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "switch_default_Dataset()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.Dataset()
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "switch_default_Evaluate()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            accuracy, class_based_accuracy = ptf.Evaluate()
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "switch_default_Switch_Mode()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.Switch_Mode(train=True)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "switch_default_Train()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.Train()
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    return system_dict
示例#3
0
def test_switch_expert(system_dict):
    forward = True
    if (not os.path.isdir("datasets")):
        os.system(
            "! wget --load-cookies /tmp/cookies.txt \"https://docs.google.com/uc?export=download&confirm=$(wget --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1rG-U1mS8hDU7_wM56a1kc-li_zHLtbq2' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1rG-U1mS8hDU7_wM56a1kc-li_zHLtbq2\" -O datasets.zip && rm -rf /tmp/cookies.txt"
        )
        os.system("! unzip -qq datasets.zip")

    test = "switch_expert_object_creation"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf = prototype(verbose=0)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "switch_expert_Prototype()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.Prototype("sample-project-1", "sample-experiment-6")
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "switch_expert_switch_mode()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.Switch_Mode(eval_infer=True)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "switch_expert_Model_Params()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.Model_Params(
                model_path=
                "workspace/sample-project-1/sample-experiment-5/output/models/intermediate_model_9",
                use_gpu=True)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "switch_expert_Model()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.Model()
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "switch_expert_update_input_size()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.update_input_size(224)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "switch_expert_Infer-Img()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            img_name = "datasets/dataset_cats_dogs_test/0.jpg"
            predictions = ptf.Infer(img_name=img_name, return_raw=True)
            img_name = "datasets/dataset_cats_dogs_test/84.jpg"
            predictions = ptf.Infer(img_name=img_name, return_raw=True)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "switch_expert_Infer-Folder()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            inference_dataset = "datasets/dataset_cats_dogs_test/"
            output = ptf.Infer(img_dir=inference_dataset, return_raw=True)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "switch_expert_Dataset_Params()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.Dataset_Params(dataset_path="datasets/dataset_cats_dogs_eval",
                               input_size=224)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "switch_expert_Dataset()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.Dataset()
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "switch_expert_Evaluate()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            accuracy, class_based_accuracy = ptf.Evaluate()
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "switch_expert_switch_mode()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.Switch_Mode(train=True)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "expert_train_Dataset_Params()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.Dataset_Params(dataset_path="datasets/dataset_cats_dogs_train",
                               split=0.9,
                               input_size=224,
                               batch_size=16,
                               shuffle_data=True,
                               num_processors=3)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "expert_train_apply_transforms()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.apply_random_resized_crop(256, train=True, val=True, test=True)
            ptf.apply_random_perspective(train=True, val=True)
            ptf.apply_random_vertical_flip(train=True, val=True)
            ptf.apply_random_horizontal_flip(train=True, val=True)
            ptf.apply_normalize(mean=[0.485, 0.456, 0.406],
                                std=[0.229, 0.224, 0.225],
                                train=True,
                                val=True,
                                test=True)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "expert_train_Dataset()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.Dataset()
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "expert_train_Model_Params()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.Model_Params(model_name="resnet18",
                             freeze_base_network=True,
                             use_gpu=True,
                             use_pretrained=True)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "expert_train_Model()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.Model()
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "expert_train_lr_multistep_decrease()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.lr_multistep_decrease([1, 3], gamma=0.9)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "expert_train_optimizer_sgd()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.optimizer_sgd(0.001, momentum=0.9)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "expert_train_loss_softmax_crossentropy()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.loss_softmax_crossentropy()
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "expert_train_Training_Params()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.Training_Params(
                num_epochs=4,
                display_progress=True,
                display_progress_realtime=True,
                save_intermediate_models=True,
                intermediate_model_prefix="intermediate_model_",
                save_training_logs=True)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "expert_train_Train()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.Train()
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    return system_dict
示例#4
0
def test_expert_train(system_dict):
    forward = True
    if (not os.path.isdir("datasets")):
        os.system(
            "! wget --load-cookies /tmp/cookies.txt \"https://docs.google.com/uc?export=download&confirm=$(wget --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1rG-U1mS8hDU7_wM56a1kc-li_zHLtbq2' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1rG-U1mS8hDU7_wM56a1kc-li_zHLtbq2\" -O datasets.zip && rm -rf /tmp/cookies.txt"
        )
        os.system("! unzip -qq datasets.zip")

    test = "expert_train_object_creation"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf = prototype(verbose=0)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "expert_train_Prototype()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.Prototype("sample-project-1", "sample-experiment-4")
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "expert_train_Dataset_Params()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.Dataset_Params(dataset_path=[
                "datasets/dataset_csv_id/train", "datasets/dataset_csv_id/val"
            ],
                               path_to_csv=[
                                   "datasets/dataset_csv_id/train.csv",
                                   "datasets/dataset_csv_id/val.csv"
                               ],
                               split=0.9,
                               input_size=224,
                               batch_size=16,
                               shuffle_data=True,
                               num_processors=3)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "expert_train_Dataset()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.Dataset()
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "expert_train_Model_Params()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.Model_Params(model_name="resnet18",
                             freeze_base_network=True,
                             use_gpu=True,
                             use_pretrained=True)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "expert_train_append_layer()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.append_dropout(probability=0.1)
            ptf.append_linear(final_layer=True)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "expert_train_Model()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.Model()
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "expert_train_lr_step_decrease()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.lr_step_decrease(1, gamma=0.9)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "expert_train_optimizer_sgd()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.optimizer_sgd(0.001, momentum=0.9)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "expert_train_loss_softmax_crossentropy()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.loss_softmax_crossentropy()
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "expert_train_Training_Params()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.Training_Params(
                num_epochs=3,
                display_progress=True,
                display_progress_realtime=True,
                save_intermediate_models=True,
                intermediate_model_prefix="intermediate_model_",
                save_training_logs=True)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "expert_train_Train()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.Train()
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    return system_dict
示例#5
0
def test_update_eval_infer(system_dict):
    forward = True
    if (not os.path.isdir("datasets")):
        os.system(
            "! wget --load-cookies /tmp/cookies.txt \"https://docs.google.com/uc?export=download&confirm=$(wget --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1rG-U1mS8hDU7_wM56a1kc-li_zHLtbq2' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1rG-U1mS8hDU7_wM56a1kc-li_zHLtbq2\" -O datasets.zip && rm -rf /tmp/cookies.txt"
        )
        os.system("! unzip -qq datasets.zip")

    test = "update_normal_object_creation"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf = prototype(verbose=0)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "update_normal_Prototype()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.Prototype("sample-project-1",
                          "sample-experiment-3",
                          eval_infer=True)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "update_normal_Dataset_Params()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.Dataset_Params(dataset_path="datasets/dataset_csv_id/test",
                               path_to_csv="datasets/dataset_csv_id/test.csv",
                               delimiter=",")
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "default_eval_infer_Dataset()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.Dataset()
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "update_normal_update_model_path()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.update_model_path(
                "workspace/sample-project-1/sample-experiment-3/output/models/best_model"
            )
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "update_normal_Reload()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.Reload()
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "default_eval_infer_Evaluate()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            accuracy, class_based_accuracy = ptf.Evaluate()
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    return system_dict
示例#6
0
def test_analyse(system_dict):
    forward = True;
    if(not os.path.isdir("datasets")):
        os.system("! wget --load-cookies /tmp/cookies.txt \"https://docs.google.com/uc?export=download&confirm=$(wget --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1rG-U1mS8hDU7_wM56a1kc-li_zHLtbq2' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1rG-U1mS8hDU7_wM56a1kc-li_zHLtbq2\" -O datasets.zip && rm -rf /tmp/cookies.txt")
        os.system("! unzip -qq datasets.zip")
        

    test = "analyse_object_creation";
    system_dict["total_tests"] += 1;
    print_start(test, system_dict["total_tests"])
    if(forward):
        try:
            ptf = prototype(verbose=0);
            system_dict["successful_tests"] += 1;
            print_status("Pass");
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e);
            system_dict["failed_tests_lists"].append(test);
            forward = False;
            print_status("Fail");
    else:
        system_dict["skipped_tests_lists"].append(test);
        print_status("Skipped");


    test = "analyse_Prototype()";
    system_dict["total_tests"] += 1;
    print_start(test, system_dict["total_tests"])
    if(forward): 
        try:
            ptf.Prototype("sample-project-1", "sample-experiment-1");
            system_dict["successful_tests"] += 1;
            print_status("Pass");
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e);
            system_dict["failed_tests_lists"].append(test);
            forward = False;
            print_status("Fail");
    else:
        system_dict["skipped_tests_lists"].append(test);
        print_status("Skipped");


    test = "analyse_Default()";
    system_dict["total_tests"] += 1;
    print_start(test, system_dict["total_tests"])
    if(forward): 
        try:
            ptf.Default(dataset_path="datasets/dataset_cats_dogs_train", 
                model_name="resnet18", freeze_base_network=True, num_epochs=2);
            system_dict["successful_tests"] += 1;
            print_status("Pass");
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e);
            system_dict["failed_tests_lists"].append(test);
            forward = False;
            print_status("Fail");
    else:
        system_dict["skipped_tests_lists"].append(test);
        print_status("Skipped");


    test = "analyse_Analyse_Learning_Rates()";
    system_dict["total_tests"] += 1;
    print_start(test, system_dict["total_tests"])
    if(forward): 
        try:
            analysis_name = "analyse_learning_rates"
            lrs = [0.1, 0.05];
            epochs=2
            percent_data=40
            analysis = ptf.Analyse_Learning_Rates(analysis_name, lrs, percent_data, num_epochs=epochs, state="keep_none");
            system_dict["successful_tests"] += 1;
            print_status("Pass");
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e);
            system_dict["failed_tests_lists"].append(test);
            forward = False;
            print_status("Fail");
    else:
        system_dict["skipped_tests_lists"].append(test);
        print_status("Skipped");


    test = "analyse_Analyse_Input_Sizes()";
    system_dict["total_tests"] += 1;
    print_start(test, system_dict["total_tests"])
    if(forward): 
        try:
            analysis_name = "analyse_input_sizes";
            input_sizes = [128, 256];
            epochs=2;
            percent_data=40;
            analysis = ptf.Analyse_Input_Sizes(analysis_name, input_sizes, percent_data, num_epochs=epochs, state="keep_none");
            system_dict["successful_tests"] += 1;
            print_status("Pass");
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e);
            system_dict["failed_tests_lists"].append(test);
            forward = False;
            print_status("Fail");
    else:
        system_dict["skipped_tests_lists"].append(test);
        print_status("Skipped");



    test = "analyse_Analyse_Batch_Sizes()";
    system_dict["total_tests"] += 1;
    print_start(test, system_dict["total_tests"])
    if(forward): 
        try:
            analysis_name = "analyse_batch_sizes";
            batch_sizes = [2, 3];
            epochs = 2;
            percent_data = 40;
            analysis = ptf.Analyse_Batch_Sizes(analysis_name, batch_sizes, percent_data, num_epochs=epochs, state="keep_none");
            system_dict["successful_tests"] += 1;
            print_status("Pass");
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e);
            system_dict["failed_tests_lists"].append(test);
            forward = False;
            print_status("Fail");
    else:
        system_dict["skipped_tests_lists"].append(test);
        print_status("Skipped");




    test = "analyse_Analyse_Models()";
    system_dict["total_tests"] += 1;
    print_start(test, system_dict["total_tests"])
    if(forward): 
        try:
            analysis_name = "analyse_models";
            models = [["resnet18", True, True], ["resnet34", False, True]]; 
            percent_data=40;
            analysis = ptf.Analyse_Models(analysis_name, models, percent_data, num_epochs=epochs, state="keep_none");
            system_dict["successful_tests"] += 1;
            print_status("Pass");
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e);
            system_dict["failed_tests_lists"].append(test);
            forward = False;
            print_status("Fail");
    else:
        system_dict["skipped_tests_lists"].append(test);
        print_status("Skipped");



    test = "analyse_Analyse_Optimizers()";
    system_dict["total_tests"] += 1;
    print_start(test, system_dict["total_tests"])
    if(forward): 
        try:
            analysis_name = "analyse_optimizers";
            optimizers = ["sgd", "adam"];
            epochs = 2;
            percent_data = 40;
            analysis = ptf.Analyse_Optimizers(analysis_name, optimizers, percent_data, num_epochs=epochs, state="keep_none");
            system_dict["successful_tests"] += 1;
            print_status("Pass");
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e);
            system_dict["failed_tests_lists"].append(test);
            forward = False;
            print_status("Fail");
    else:
        system_dict["skipped_tests_lists"].append(test);
        print_status("Skipped");







    

    return system_dict
示例#7
0
def test_update_normal(system_dict):
    forward = True
    if (not os.path.isdir("datasets")):
        os.system(
            "! wget --load-cookies /tmp/cookies.txt \"https://docs.google.com/uc?export=download&confirm=$(wget --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1rG-U1mS8hDU7_wM56a1kc-li_zHLtbq2' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1rG-U1mS8hDU7_wM56a1kc-li_zHLtbq2\" -O datasets.zip && rm -rf /tmp/cookies.txt"
        )
        os.system("! unzip -qq datasets.zip")

    test = "update_normal_object_creation"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf = prototype(verbose=0)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "update_normal_Prototype()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.Prototype("sample-project-1", "sample-experiment-3")
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "update_normal_Default()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.Default(dataset_path="datasets/dataset_csv_id/train",
                        path_to_csv="datasets/dataset_csv_id/train.csv",
                        delimiter=",",
                        model_name="resnet18",
                        freeze_base_network=True,
                        num_epochs=10)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "update_normal_update_model_name()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.update_model_name("resnet50")
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "update_normal_update_use_gpu()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.update_use_gpu(False)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "update_normal_update_use_pretrained()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.update_use_pretrained(True)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "update_normal_update_freeze_base_network()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.update_freeze_base_network(False)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "update_normal_update_freeze_layers()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.update_freeze_layers(10)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "update_normal_update_num_epochs()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.update_num_epochs(2)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "update_normal_update_display_progress_realtime()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.update_display_progress_realtime(False)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "update_normal_update_display_progress()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.update_display_progress(False)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "update_normal_update_save_intermediate_models()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.update_save_intermediate_models(False)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "update_normal_update_save_training_logs()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.update_save_training_logs(True)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "update_normal_lr_fixed()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.lr_fixed()
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "update_normal_Reload()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.Reload()
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "update_normal_EDA()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.EDA(check_missing=True, check_corrupt=True)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "update_normal_Estimate_Train_Time()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.Estimate_Train_Time()
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "update_normal_Train()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.Train()
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    return system_dict
示例#8
0
def test_compare(system_dict):
    forward = True;
    if(not os.path.isdir("datasets")):
        os.system("! wget --load-cookies /tmp/cookies.txt \"https://docs.google.com/uc?export=download&confirm=$(wget --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1rG-U1mS8hDU7_wM56a1kc-li_zHLtbq2' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1rG-U1mS8hDU7_wM56a1kc-li_zHLtbq2\" -O datasets.zip && rm -rf /tmp/cookies.txt")
        os.system("! unzip -qq datasets.zip")
    
    test = "compare_object_creation";
    system_dict["total_tests"] += 1;
    print_start(test, system_dict["total_tests"])
    if(forward):
        try:
            ctf = compare(verbose=0);
            system_dict["successful_tests"] += 1;
            print_status("Pass");
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e);
            system_dict["failed_tests_lists"].append(test);
            forward = False;
            print_status("Fail");
    else:
        system_dict["skipped_tests_lists"].append(test);
        print_status("Skipped");


    test = "compare_Comparison()";
    system_dict["total_tests"] += 1;
    print_start(test, system_dict["total_tests"])
    if(forward):
        try:
            ctf.Comparison("Sample-Comparison-1");
            system_dict["successful_tests"] += 1;
            print_status("Pass");
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e);
            system_dict["failed_tests_lists"].append(test);
            forward = False;
            print_status("Fail");
    else:
        system_dict["skipped_tests_lists"].append(test);
        print_status("Skipped");



    test = "compare_Add_Experiment()";
    system_dict["total_tests"] += 1;
    print_start(test, system_dict["total_tests"])
    if(forward):
        try:
            ctf.Add_Experiment("sample-project-1", "sample-experiment-1");
            ctf.Add_Experiment("sample-project-1", "sample-experiment-2");
            ctf.Add_Experiment("sample-project-1", "sample-experiment-3");
            ctf.Add_Experiment("sample-project-1", "sample-experiment-4");
            ctf.Add_Experiment("sample-project-1", "sample-experiment-5");
            ctf.Add_Experiment("sample-project-1", "sample-experiment-6");
            system_dict["successful_tests"] += 1;
            print_status("Pass");
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e);
            system_dict["failed_tests_lists"].append(test);
            forward = False;
            print_status("Fail");
    else:
        system_dict["skipped_tests_lists"].append(test);
        print_status("Skipped");


    test = "compare_Generate_Statistics()";
    system_dict["total_tests"] += 1;
    print_start(test, system_dict["total_tests"])
    if(forward):
        try:
            ctf.Generate_Statistics();
            system_dict["successful_tests"] += 1;
            print_status("Pass");
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e);
            system_dict["failed_tests_lists"].append(test);
            forward = False;
            print_status("Fail");
    else:
        system_dict["skipped_tests_lists"].append(test);
        print_status("Skipped");


    return system_dict;
示例#9
0
def test_default_eval_infer(system_dict):
    forward = True
    if (not os.path.isdir("datasets")):
        os.system(
            "! wget --load-cookies /tmp/cookies.txt \"https://docs.google.com/uc?export=download&confirm=$(wget --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1rG-U1mS8hDU7_wM56a1kc-li_zHLtbq2' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1rG-U1mS8hDU7_wM56a1kc-li_zHLtbq2\" -O datasets.zip && rm -rf /tmp/cookies.txt"
        )
        os.system("! unzip -qq datasets.zip")

    test = "default_eval_infer_object_creation"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf = prototype(verbose=0)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "default_eval_infer_Prototype()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.Prototype("sample-project-1",
                          "sample-experiment-1",
                          eval_infer=True)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "default_eval_infer_Infer-img()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            img_name = "datasets/dataset_cats_dogs_test/0.jpg"
            predictions = ptf.Infer(img_name=img_name, return_raw=True)
            img_name = "datasets/dataset_cats_dogs_test/84.jpg"
            predictions = ptf.Infer(img_name=img_name, return_raw=True)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "default_eval_infer_Infer-folder()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            inference_dataset = "datasets/dataset_cats_dogs_test"
            output = ptf.Infer(img_dir=inference_dataset, return_raw=True)
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "default_eval_infer_Dataset_Params()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.Dataset_Params(dataset_path="datasets/dataset_cats_dogs_eval")
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "default_eval_infer_Dataset()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            ptf.Dataset()
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    test = "default_eval_infer_Evaluate()"
    system_dict["total_tests"] += 1
    print_start(test, system_dict["total_tests"])
    if (forward):
        try:
            accuracy, class_based_accuracy = ptf.Evaluate()
            system_dict["successful_tests"] += 1
            print_status("Pass")
        except Exception as e:
            system_dict["failed_tests_exceptions"].append(e)
            system_dict["failed_tests_lists"].append(test)
            forward = False
            print_status("Fail")
    else:
        system_dict["skipped_tests_lists"].append(test)
        print_status("Skipped")

    return system_dict