def main(mydata): f = Figlet(font='slant') running = True while running: os.system('clear') print(f.renderText('Stargate XX')) print('system date and time') print(datetime.datetime.utcnow()) print('Status and Data') print('Status and Data' + Fore.RED + 'some red text') print(Back.GREEN + 'and with a green background') print(Style.DIM + 'and in dim text') print(Style.RESET_ALL) common.print_status('nmod nav status', mydata.get_nmod_nav()) common.print_status('nmod gps status', mydata.get_nmod_gps()) print('################ nmod gps status ################') print('latitude :' + str(mydata.get_gps_latitude())) print('longitude :' + str(mydata.get_gps_longitude())) print('altitude :' + str(mydata.get_gps_altitude())) print('time UTC :' + str(mydata.get_gps_time())) print('################ motor diff ################') print('motor 1 :' + str(mydata.get_motor1_diff())) print('motor 2 :' + str(mydata.get_motor2_diff())) questions = [ { 'type': 'checkbox', 'qmark': '?', 'message': 'Select task', 'name': 'tasks', 'choices': [ Separator('= star ='), { 'name': 'Vega', }, { 'name': 'Enif', }, { 'name': 'Boteln', }, Separator('= planets ='), { 'name': 'mars', }, { 'name': 'venus', }, { 'name': 'saturn', }, Separator('= sat and moon ='), { 'name': 'moon', }, { 'name': 'iss', }, Separator('= options ='), { 'name': 'exit' } ] } ] answers = prompt(questions, style=custom_style_2) pprint(answers) print(len(answers["tasks"])) if len(answers["tasks"]) > 5: print('### Please choose only one task at a time ###') elif len(answers["tasks"]) < 1: print('### Please choose at least one task ###') else: if 'Vega' in answers["tasks"]: observe.main(mydata) elif 'nmod' in answers["tasks"]: nav.main(mydata) print(mydata.get_nmod_nav()) nmod_nav_ok = mydata.get_nmod_nav() elif 'gps' in answers["tasks"]: myGPS.main(mydata) print(mydata.get_nmod_gps()) nmod_gps_ok = mydata.get_nmod_gps() elif 'manuel_correction' in answers["tasks"]: motor_control.mode_manuel(mydata) elif 'star3_verification' in answers["tasks"]: test2 = nav.testing() elif 'exit' in answers["tasks"]: running = False
def test_expert_eval_infer(system_dict): forward = True test = "expert_eval_infer_object_creation" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: ktf = prototype(verbose=0) system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "expert_eval_infer_Prototype()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: ktf.Prototype("sample-project-1", "sample-experiment-4", eval_infer=True) system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "expert_eval_infer_Dataset_Params()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: ktf.Dataset_Params( dataset_path="../datasets/dataset_csv_id/test", path_to_csv="../datasets/dataset_csv_id/test.csv", delimiter=",") system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "expert_eval_infer_reset_transforms()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: ktf.reset_transforms(test=True) system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "expert_eval_infer_Dataset()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: ktf.Dataset() system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "expert_eval_infer_Evaluate()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: accuracy, class_based_accuracy = ktf.Evaluate() system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") return system_dict
def test_default_train(system_dict): forward = True if (not os.path.isdir("datasets")): os.system( "! wget --load-cookies /tmp/cookies.txt \"https://docs.google.com/uc?export=download&confirm=$(wget --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1rG-U1mS8hDU7_wM56a1kc-li_zHLtbq2' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1rG-U1mS8hDU7_wM56a1kc-li_zHLtbq2\" -O datasets.zip && rm -rf /tmp/cookies.txt" ) os.system("! unzip -qq datasets.zip") test = "default_train_object_creation" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: print("In here") ktf = prototype(verbose=0) system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "default_train_Prototype()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: ktf.Prototype("sample-project-1", "sample-experiment-1") system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "default_train_Default()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: ktf.Default(dataset_path="datasets/dataset_cats_dogs_train", model_name="resnet50", freeze_base_network=True, num_epochs=2) system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "default_train_Train()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: ktf.Train() system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") return system_dict
def test_layer_add(system_dict): forward = True test = "test_layer_add" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: gtf = prototype(verbose=0) gtf.Prototype("sample-project-1", "sample-experiment-1") network = [] network.append(gtf.convolution(output_channels=16)) network.append(gtf.batch_normalization()) network.append(gtf.relu()) network.append(gtf.max_pooling()) subnetwork = [] branch1 = [] branch1.append(gtf.convolution(output_channels=16)) branch1.append(gtf.batch_normalization()) branch1.append(gtf.convolution(output_channels=16)) branch1.append(gtf.batch_normalization()) branch2 = [] branch2.append(gtf.convolution(output_channels=16)) branch2.append(gtf.batch_normalization()) branch3 = [] branch3.append(gtf.identity()) subnetwork.append(branch1) subnetwork.append(branch2) subnetwork.append(branch3) subnetwork.append(gtf.add()) network.append(subnetwork) network.append(gtf.convolution(output_channels=16)) network.append(gtf.batch_normalization()) network.append(gtf.relu()) network.append(gtf.max_pooling()) network.append(gtf.flatten()) network.append(gtf.fully_connected(units=1024)) network.append(gtf.dropout(drop_probability=0.2)) network.append(gtf.fully_connected(units=2)) gtf.Compile_Network(network, data_shape=(3, 64, 64), use_gpu=False) x = torch.randn(1, 3, 64, 64) y = gtf.system_dict["local"]["model"](x) system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") return system_dict
def test_switch_expert(system_dict): forward = True test = "switch_expert_object_creation" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: gtf = prototype(verbose=0) system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "switch_expert_Prototype()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: gtf.Prototype("sample-project-1", "sample-experiment-6") system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "switch_expert_switch_mode()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: gtf.Switch_Mode(eval_infer=True) system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "switch_expert_Model_Params()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: gtf.Model_Params(model_path=[ "workspace/sample-project-1/sample-experiment-5/output/models/intermediate_model_-symbol.json", "workspace/sample-project-1/sample-experiment-5/output/models/intermediate_model_-0000.params" ], use_gpu=True) system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "switch_expert_Model()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: gtf.Model() system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "switch_expert_update_input_size()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: gtf.update_input_size(224) system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "switch_expert_Infer-Img()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: img_name = "../datasets/dataset_cats_dogs_test/0.jpg" predictions = gtf.Infer(img_name=img_name, return_raw=True) img_name = "../datasets/dataset_cats_dogs_test/84.jpg" predictions = gtf.Infer(img_name=img_name, return_raw=True) system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "switch_expert_Infer-Folder()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: inference_dataset = "../datasets/dataset_cats_dogs_test/" output = gtf.Infer(img_dir=inference_dataset, return_raw=True) system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "switch_expert_Dataset_Params()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: gtf.Dataset_Params( dataset_path="../datasets/dataset_cats_dogs_eval", input_size=224) system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "switch_expert_Dataset()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: gtf.Dataset() system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "switch_expert_Evaluate()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: accuracy, class_based_accuracy = gtf.Evaluate() system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "switch_expert_switch_mode()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: gtf.Switch_Mode(train=True) system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "expert_train_Dataset_Params()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: gtf.Dataset_Params( dataset_path="../datasets/dataset_cats_dogs_train", split=0.9, input_size=224, batch_size=16, shuffle_data=True, num_processors=3) system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "expert_train_apply_transforms()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: gtf.apply_random_vertical_flip(train=True, val=True) gtf.apply_random_horizontal_flip(train=True, val=True) gtf.apply_random_lighting(train=True, val=True) gtf.apply_normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], train=True, val=True, test=True) system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "expert_train_Dataset()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: gtf.Dataset() system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "expert_train_Model_Params()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: gtf.Model_Params(model_name="resnet18_v1", freeze_base_network=True, use_gpu=True, use_pretrained=True) system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "expert_train_Model()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: gtf.Model() system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "expert_train_lr_multistep_decrease()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: gtf.lr_multistep_decrease([1, 3], gamma=0.9) system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "expert_train_optimizer_sgd()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: gtf.optimizer_sgd(0.001, momentum=0.9) system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "expert_train_loss_softmax_crossentropy()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: gtf.loss_softmax_crossentropy() system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "expert_train_Training_Params()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: gtf.Training_Params( num_epochs=4, display_progress=True, display_progress_realtime=True, save_intermediate_models=True, intermediate_model_prefix="intermediate_model_", save_training_logs=True) system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "expert_train_Train()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: gtf.Train() system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") return system_dict
def test_update_eval_infer(system_dict): forward = True if (not os.path.isdir("datasets")): os.system( "! wget --load-cookies /tmp/cookies.txt \"https://docs.google.com/uc?export=download&confirm=$(wget --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1rG-U1mS8hDU7_wM56a1kc-li_zHLtbq2' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1rG-U1mS8hDU7_wM56a1kc-li_zHLtbq2\" -O datasets.zip && rm -rf /tmp/cookies.txt" ) os.system("! unzip -qq datasets.zip") test = "update_normal_object_creation" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: ktf = prototype(verbose=0) system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "update_normal_Prototype()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: ktf.Prototype("sample-project-1", "sample-experiment-3", eval_infer=True) system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "update_normal_Dataset_Params()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: ktf.Dataset_Params(dataset_path="datasets/dataset_csv_id/test", path_to_csv="datasets/dataset_csv_id/test.csv", delimiter=",") system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "default_eval_infer_Dataset()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: ktf.Dataset() system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "update_normal_update_model_path()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: ktf.update_model_path( "workspace/sample-project-1/sample-experiment-3/output/models/best_model.h5" ) system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "update_normal_Reload()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: ktf.Reload() system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "default_eval_infer_Evaluate()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: accuracy, class_based_accuracy = ktf.Evaluate() system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") return system_dict
def test_initializer_xavier_uniform(system_dict): forward = True test = "test_initializer_xavier_uniform" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: gtf = prototype(verbose=0) gtf.Prototype("sample-project-1", "sample-experiment-1") network = [] network.append(gtf.convolution(output_channels=16)) network.append(gtf.batch_normalization()) network.append(gtf.relu()) network.append(gtf.convolution(output_channels=32)) network.append(gtf.batch_normalization()) network.append(gtf.relu()) network.append(gtf.average_pooling(kernel_size=2)) network.append(gtf.convolution(output_channels=64)) network.append(gtf.batch_normalization()) network.append(gtf.relu()) network.append(gtf.convolution(output_channels=64)) network.append(gtf.batch_normalization()) network.append(gtf.relu()) network.append(gtf.average_pooling(kernel_size=2)) network.append(gtf.convolution(output_channels=128)) network.append(gtf.batch_normalization()) network.append(gtf.relu()) network.append(gtf.convolution(output_channels=128)) network.append(gtf.batch_normalization()) network.append(gtf.relu()) network.append(gtf.average_pooling(kernel_size=2)) network.append(gtf.flatten()) network.append(gtf.dropout(drop_probability=0.2)) network.append(gtf.fully_connected(units=1024)) network.append(gtf.dropout(drop_probability=0.2)) network.append(gtf.fully_connected(units=2)) network.append(gtf.softmax()) gtf.Compile_Network(network, data_shape=(3, 32, 32), network_initializer="xavier_uniform") x = tf.placeholder(tf.float32, shape=(1, 32, 32, 3)) y = gtf.system_dict["local"]["model"](x) system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") return system_dict
def test_default_eval_infer(system_dict): forward = True if (not os.path.isdir("datasets")): os.system( "! wget --load-cookies /tmp/cookies.txt \"https://docs.google.com/uc?export=download&confirm=$(wget --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1rG-U1mS8hDU7_wM56a1kc-li_zHLtbq2' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1rG-U1mS8hDU7_wM56a1kc-li_zHLtbq2\" -O datasets.zip && rm -rf /tmp/cookies.txt" ) os.system("! unzip -qq datasets.zip") test = "default_eval_infer_object_creation" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: gtf = prototype(verbose=0) system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "default_eval_infer_Prototype()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: gtf.Prototype("sample-project-1", "sample-experiment-1", eval_infer=True) system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "default_eval_infer_Infer-img()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: img_name = "datasets/dataset_cats_dogs_test/0.jpg" predictions = gtf.Infer(img_name=img_name, return_raw=True) img_name = "datasets/dataset_cats_dogs_test/84.jpg" predictions = gtf.Infer(img_name=img_name, return_raw=True) system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "default_eval_infer_Infer-folder()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: inference_dataset = "datasets/dataset_cats_dogs_test" output = gtf.Infer(img_dir=inference_dataset, return_raw=True) system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "default_eval_infer_Dataset_Params()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: gtf.Dataset_Params(dataset_path="datasets/dataset_cats_dogs_eval") system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "default_eval_infer_Dataset()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: gtf.Dataset() system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "default_eval_infer_Evaluate()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: accuracy, class_based_accuracy = gtf.Evaluate() system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") return system_dict
def test_update_eval_infer(system_dict): forward = True test = "update_normal_object_creation" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: ptf = prototype(verbose=0) system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "update_normal_Prototype()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: ptf.Prototype("sample-project-1", "sample-experiment-3", eval_infer=True) system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "update_normal_Dataset_Params()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: ptf.Dataset_Params( dataset_path="../datasets/dataset_csv_id/test", path_to_csv="../datasets/dataset_csv_id/test.csv", delimiter=",") system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "default_eval_infer_Dataset()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: ptf.Dataset() system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "update_normal_update_model_path()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: ptf.update_model_path( "workspace/sample-project-1/sample-experiment-3/output/models/best_model" ) system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "update_normal_Reload()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: ptf.Reload() system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "default_eval_infer_Evaluate()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: accuracy, class_based_accuracy = ptf.Evaluate() system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") return system_dict
def test_analyse(system_dict): forward = True if (not os.path.isdir("datasets")): os.system( "! wget --load-cookies /tmp/cookies.txt \"https://docs.google.com/uc?export=download&confirm=$(wget --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1rG-U1mS8hDU7_wM56a1kc-li_zHLtbq2' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1rG-U1mS8hDU7_wM56a1kc-li_zHLtbq2\" -O datasets.zip && rm -rf /tmp/cookies.txt" ) os.system("! unzip -qq datasets.zip") test = "analyse_object_creation" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: ktf = prototype(verbose=0) system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "analyse_Prototype()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: ktf.Prototype("sample-project-1", "sample-experiment-1") system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "analyse_Default()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: ktf.Default(dataset_path="datasets/dataset_cats_dogs_train", model_name="mobilenet", freeze_base_network=True, num_epochs=2) system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "analyse_Analyse_Learning_Rates()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: analysis_name = "analyse_learning_rates" lrs = [0.1, 0.05] epochs = 2 percent_data = 40 analysis = ktf.Analyse_Learning_Rates(analysis_name, lrs, percent_data, num_epochs=epochs, state="keep_none") system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "analyse_Analyse_Input_Sizes()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: analysis_name = "analyse_input_sizes" input_sizes = [128, 256] epochs = 2 percent_data = 40 analysis = ktf.Analyse_Input_Sizes(analysis_name, input_sizes, percent_data, num_epochs=epochs, state="keep_none") system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "analyse_Analyse_Batch_Sizes()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: analysis_name = "analyse_batch_sizes" batch_sizes = [2, 3] epochs = 2 percent_data = 40 analysis = ktf.Analyse_Batch_Sizes(analysis_name, batch_sizes, percent_data, num_epochs=epochs, state="keep_none") system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "analyse_Analyse_Models()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: analysis_name = "analyse_models" models = [["mobilenet", True, True], ["mobilenet_v2", False, True]] percent_data = 40 analysis = ktf.Analyse_Models(analysis_name, models, percent_data, num_epochs=epochs, state="keep_none") system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "analyse_Analyse_Optimizers()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: analysis_name = "analyse_optimizers" optimizers = ["sgd", "adam"] epochs = 2 percent_data = 40 analysis = ktf.Analyse_Optimizers(analysis_name, optimizers, percent_data, num_epochs=epochs, state="keep_none") system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") return system_dict
def test_default_eval_infer(system_dict): forward = True test = "default_eval_infer_object_creation" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: ptf = prototype(verbose=0) system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "default_eval_infer_Prototype()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: ptf.Prototype("sample-project-1", "sample-experiment-1", eval_infer=True) system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "default_eval_infer_Infer-img()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: img_name = "../datasets/dataset_cats_dogs_test/0.jpg" predictions = ptf.Infer(img_name=img_name, return_raw=True) img_name = "../datasets/dataset_cats_dogs_test/84.jpg" predictions = ptf.Infer(img_name=img_name, return_raw=True) system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "default_eval_infer_Infer-folder()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: inference_dataset = "../datasets/dataset_cats_dogs_test" output = ptf.Infer(img_dir=inference_dataset, return_raw=True) system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "default_eval_infer_Dataset_Params()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: ptf.Dataset_Params( dataset_path="../datasets/dataset_cats_dogs_eval") system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "default_eval_infer_Dataset()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: ptf.Dataset() system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "default_eval_infer_Evaluate()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: accuracy, class_based_accuracy = ptf.Evaluate() system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") return system_dict
def test_switch_default(system_dict): forward = True test = "switch_default_object_object_creation" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: gtf = prototype(verbose=0) system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "switch_default_object_Prototype()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: gtf.Prototype("sample-project-1", "sample-experiment-5") system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "switch_default_Default()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: gtf.Default(dataset_path=[ "../datasets/dataset_cats_dogs_train", "../datasets/dataset_cats_dogs_eval" ], model_name="resnet18_v1", freeze_base_network=True, num_epochs=10) system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "switch_default_EDA()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: gtf.EDA(check_missing=True, check_corrupt=True) system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "switch_default_Switch_Mode()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: gtf.Switch_Mode(eval_infer=True) system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "switch_default_Dataset_Params()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: gtf.Dataset_Params( dataset_path="../datasets/dataset_cats_dogs_eval") system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "switch_default_Dataset()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: gtf.Dataset() system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "switch_default_Evaluate()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: accuracy, class_based_accuracy = gtf.Evaluate() system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "switch_default_Switch_Mode()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: gtf.Switch_Mode(train=True) system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "switch_default_Train()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: gtf.Train() system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") return system_dict
def test_update_copy_from(system_dict): forward = True if (not os.path.isdir("datasets")): os.system( "! wget --load-cookies /tmp/cookies.txt \"https://docs.google.com/uc?export=download&confirm=$(wget --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1rG-U1mS8hDU7_wM56a1kc-li_zHLtbq2' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1rG-U1mS8hDU7_wM56a1kc-li_zHLtbq2\" -O datasets.zip && rm -rf /tmp/cookies.txt" ) os.system("! unzip -qq datasets.zip") test = "update_copy_from_object_creation" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: ktf = prototype(verbose=0) system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "update_copy_from_Prototype()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: ktf.Prototype( "sample-project-1", "sample-experiment-2", copy_from=["sample-project-1", "sample-experiment-1"]) system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "update_copy_from_reset_transforms()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: ktf.reset_transforms() system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "update_copy_from_apply_transforms()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: ktf.apply_random_horizontal_flip(train=True, val=True) ktf.apply_mean_subtraction(mean=[0.485, 0.456, 0.406], train=True, val=True, test=True) system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "update_copy_from_update_dataset()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: ktf.update_dataset(dataset_path=[ "datasets/dataset_cats_dogs_train", "datasets/dataset_cats_dogs_eval" ]) system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "update_copy_from_update_input_size()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: ktf.update_input_size(224) system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "update_copy_from_update_batch_size()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: ktf.update_batch_size(6) system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "update_copy_from_update_shuffle_data()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: ktf.update_shuffle_data(False) system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "update_copy_from_update_num_processors()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: ktf.update_num_processors(16) system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "update_copy_from_update_trainval_split()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: ktf.update_trainval_split(0.6) system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "update_copy_from_Reload()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: ktf.Reload() system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "update_copy_from_EDA()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: ktf.EDA(check_missing=True, check_corrupt=True) system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "update_copy_from_Train()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: ktf.Train() system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") return system_dict
def test_compare(system_dict): forward = True if (not os.path.isdir("datasets")): os.system( "! wget --load-cookies /tmp/cookies.txt \"https://docs.google.com/uc?export=download&confirm=$(wget --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1rG-U1mS8hDU7_wM56a1kc-li_zHLtbq2' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1rG-U1mS8hDU7_wM56a1kc-li_zHLtbq2\" -O datasets.zip && rm -rf /tmp/cookies.txt" ) os.system("! unzip -qq datasets.zip") test = "compare_object_creation" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: ctf = compare(verbose=0) system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "compare_Comparison()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: ctf.Comparison("Sample-Comparison-1") system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "compare_Add_Experiment()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: ctf.Add_Experiment("sample-project-1", "sample-experiment-1") ctf.Add_Experiment("sample-project-1", "sample-experiment-2") ctf.Add_Experiment("sample-project-1", "sample-experiment-3") ctf.Add_Experiment("sample-project-1", "sample-experiment-4") ctf.Add_Experiment("sample-project-1", "sample-experiment-5") ctf.Add_Experiment("sample-project-1", "sample-experiment-6") system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "compare_Generate_Statistics()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: ctf.Generate_Statistics() system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") return system_dict
def test_update_normal(system_dict): forward = True test = "update_normal_object_creation" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: ktf = prototype(verbose=0) system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "update_normal_Prototype()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: ktf.Prototype("sample-project-1", "sample-experiment-3") system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "update_normal_Default()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: ktf.Default(dataset_path="../datasets/dataset_csv_id/train", path_to_csv="../datasets/dataset_csv_id/train.csv", delimiter=",", model_name="resnet50", freeze_base_network=True, num_epochs=10) system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "update_normal_update_model_name()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: ktf.update_model_name("resnet50") system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "update_normal_update_use_gpu()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: ktf.update_use_gpu(False) system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "update_normal_update_use_pretrained()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: ktf.update_use_pretrained(True) system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "update_normal_update_freeze_base_network()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: ktf.update_freeze_base_network(False) system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "update_normal_update_freeze_layers()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: ktf.update_freeze_layers(50) system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "update_normal_update_num_epochs()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: ktf.update_num_epochs(2) system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "update_normal_update_display_progress_realtime()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: ktf.update_display_progress_realtime(False) system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "update_normal_update_display_progress()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: ktf.update_display_progress(False) system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "update_normal_update_save_intermediate_models()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: ktf.update_save_intermediate_models(False) system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "update_normal_update_save_training_logs()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: ktf.update_save_training_logs(True) system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "update_normal_lr_fixed()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: ktf.lr_fixed() system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "update_normal_Reload()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: ktf.Reload() system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "update_normal_EDA()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: ktf.EDA(check_missing=True, check_corrupt=True) system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "update_normal_Estimate_Train_Time()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: ktf.Estimate_Train_Time() system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "update_normal_Train()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: ktf.Train() system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") return system_dict
def test_update_normal(system_dict): forward = True; if(not os.path.isdir("datasets")): os.system("! wget --load-cookies /tmp/cookies.txt \"https://docs.google.com/uc?export=download&confirm=$(wget --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1rG-U1mS8hDU7_wM56a1kc-li_zHLtbq2' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1rG-U1mS8hDU7_wM56a1kc-li_zHLtbq2\" -O datasets.zip && rm -rf /tmp/cookies.txt") os.system("! unzip -qq datasets.zip") test = "update_normal_object_creation"; system_dict["total_tests"] += 1; print_start(test, system_dict["total_tests"]) if(forward): try: ptf = prototype(verbose=0); system_dict["successful_tests"] += 1; print_status("Pass"); except Exception as e: system_dict["failed_tests_exceptions"].append(e); system_dict["failed_tests_lists"].append(test); forward = False; print_status("Fail"); else: system_dict["skipped_tests_lists"].append(test); print_status("Skipped"); test = "update_normal_Prototype()"; system_dict["total_tests"] += 1; print_start(test, system_dict["total_tests"]) if(forward): try: ptf.Prototype("sample-project-1", "sample-experiment-3"); system_dict["successful_tests"] += 1; print_status("Pass"); except Exception as e: system_dict["failed_tests_exceptions"].append(e); system_dict["failed_tests_lists"].append(test); forward = False; print_status("Fail"); else: system_dict["skipped_tests_lists"].append(test); print_status("Skipped"); test = "update_normal_Default()"; system_dict["total_tests"] += 1; print_start(test, system_dict["total_tests"]) if(forward): try: ptf.Default(dataset_path="datasets/dataset_csv_id/train", path_to_csv="datasets/dataset_csv_id/train.csv", delimiter=",", model_name="resnet18", freeze_base_network=True, num_epochs=10); system_dict["successful_tests"] += 1; print_status("Pass"); except Exception as e: system_dict["failed_tests_exceptions"].append(e); system_dict["failed_tests_lists"].append(test); forward = False; print_status("Fail"); else: system_dict["skipped_tests_lists"].append(test); print_status("Skipped"); test = "update_normal_update_model_name()"; system_dict["total_tests"] += 1; print_start(test, system_dict["total_tests"]) if(forward): try: ptf.update_model_name("resnet50"); system_dict["successful_tests"] += 1; print_status("Pass"); except Exception as e: system_dict["failed_tests_exceptions"].append(e); system_dict["failed_tests_lists"].append(test); forward = False; print_status("Fail"); else: system_dict["skipped_tests_lists"].append(test); print_status("Skipped"); test = "update_normal_update_use_gpu()"; system_dict["total_tests"] += 1; print_start(test, system_dict["total_tests"]) if(forward): try: ptf.update_use_gpu(False); system_dict["successful_tests"] += 1; print_status("Pass"); except Exception as e: system_dict["failed_tests_exceptions"].append(e); system_dict["failed_tests_lists"].append(test); forward = False; print_status("Fail"); else: system_dict["skipped_tests_lists"].append(test); print_status("Skipped"); test = "update_normal_update_use_pretrained()"; system_dict["total_tests"] += 1; print_start(test, system_dict["total_tests"]) if(forward): try: ptf.update_use_pretrained(True); system_dict["successful_tests"] += 1; print_status("Pass"); except Exception as e: system_dict["failed_tests_exceptions"].append(e); system_dict["failed_tests_lists"].append(test); forward = False; print_status("Fail"); else: system_dict["skipped_tests_lists"].append(test); print_status("Skipped"); test = "update_normal_update_freeze_base_network()"; system_dict["total_tests"] += 1; print_start(test, system_dict["total_tests"]) if(forward): try: ptf.update_freeze_base_network(False); system_dict["successful_tests"] += 1; print_status("Pass"); except Exception as e: system_dict["failed_tests_exceptions"].append(e); system_dict["failed_tests_lists"].append(test); forward = False; print_status("Fail"); else: system_dict["skipped_tests_lists"].append(test); print_status("Skipped"); test = "update_normal_update_freeze_layers()"; system_dict["total_tests"] += 1; print_start(test, system_dict["total_tests"]) if(forward): try: ptf.update_freeze_layers(10); system_dict["successful_tests"] += 1; print_status("Pass"); except Exception as e: system_dict["failed_tests_exceptions"].append(e); system_dict["failed_tests_lists"].append(test); forward = False; print_status("Fail"); else: system_dict["skipped_tests_lists"].append(test); print_status("Skipped"); test = "update_normal_update_num_epochs()"; system_dict["total_tests"] += 1; print_start(test, system_dict["total_tests"]) if(forward): try: ptf.update_num_epochs(2); system_dict["successful_tests"] += 1; print_status("Pass"); except Exception as e: system_dict["failed_tests_exceptions"].append(e); system_dict["failed_tests_lists"].append(test); forward = False; print_status("Fail"); else: system_dict["skipped_tests_lists"].append(test); print_status("Skipped"); test = "update_normal_update_display_progress_realtime()"; system_dict["total_tests"] += 1; print_start(test, system_dict["total_tests"]) if(forward): try: ptf.update_display_progress_realtime(False); system_dict["successful_tests"] += 1; print_status("Pass"); except Exception as e: system_dict["failed_tests_exceptions"].append(e); system_dict["failed_tests_lists"].append(test); forward = False; print_status("Fail"); else: system_dict["skipped_tests_lists"].append(test); print_status("Skipped"); test = "update_normal_update_display_progress()"; system_dict["total_tests"] += 1; print_start(test, system_dict["total_tests"]) if(forward): try: ptf.update_display_progress(False); system_dict["successful_tests"] += 1; print_status("Pass"); except Exception as e: system_dict["failed_tests_exceptions"].append(e); system_dict["failed_tests_lists"].append(test); forward = False; print_status("Fail"); else: system_dict["skipped_tests_lists"].append(test); print_status("Skipped"); test = "update_normal_update_save_intermediate_models()"; system_dict["total_tests"] += 1; print_start(test, system_dict["total_tests"]) if(forward): try: ptf.update_save_intermediate_models(False); system_dict["successful_tests"] += 1; print_status("Pass"); except Exception as e: system_dict["failed_tests_exceptions"].append(e); system_dict["failed_tests_lists"].append(test); forward = False; print_status("Fail"); else: system_dict["skipped_tests_lists"].append(test); print_status("Skipped"); test = "update_normal_update_save_training_logs()"; system_dict["total_tests"] += 1; print_start(test, system_dict["total_tests"]) if(forward): try: ptf.update_save_training_logs(True); system_dict["successful_tests"] += 1; print_status("Pass"); except Exception as e: system_dict["failed_tests_exceptions"].append(e); system_dict["failed_tests_lists"].append(test); forward = False; print_status("Fail"); else: system_dict["skipped_tests_lists"].append(test); print_status("Skipped"); test = "update_normal_lr_fixed()"; system_dict["total_tests"] += 1; print_start(test, system_dict["total_tests"]) if(forward): try: ptf.lr_fixed(); system_dict["successful_tests"] += 1; print_status("Pass"); except Exception as e: system_dict["failed_tests_exceptions"].append(e); system_dict["failed_tests_lists"].append(test); forward = False; print_status("Fail"); else: system_dict["skipped_tests_lists"].append(test); print_status("Skipped"); test = "update_normal_Reload()"; system_dict["total_tests"] += 1; print_start(test, system_dict["total_tests"]) if(forward): try: ptf.Reload(); system_dict["successful_tests"] += 1; print_status("Pass"); except Exception as e: system_dict["failed_tests_exceptions"].append(e); system_dict["failed_tests_lists"].append(test); forward = False; print_status("Fail"); else: system_dict["skipped_tests_lists"].append(test); print_status("Skipped"); test = "update_normal_EDA()"; system_dict["total_tests"] += 1; print_start(test, system_dict["total_tests"]) if(forward): try: ptf.EDA(check_missing=True, check_corrupt=True); system_dict["successful_tests"] += 1; print_status("Pass"); except Exception as e: system_dict["failed_tests_exceptions"].append(e); system_dict["failed_tests_lists"].append(test); forward = False; print_status("Fail"); else: system_dict["skipped_tests_lists"].append(test); print_status("Skipped"); test = "update_normal_Estimate_Train_Time()"; system_dict["total_tests"] += 1; print_start(test, system_dict["total_tests"]) if(forward): try: ptf.Estimate_Train_Time(); system_dict["successful_tests"] += 1; print_status("Pass"); except Exception as e: system_dict["failed_tests_exceptions"].append(e); system_dict["failed_tests_lists"].append(test); forward = False; print_status("Fail"); else: system_dict["skipped_tests_lists"].append(test); print_status("Skipped"); test = "update_normal_Train()"; system_dict["total_tests"] += 1; print_start(test, system_dict["total_tests"]) if(forward): try: ptf.Train(); system_dict["successful_tests"] += 1; print_status("Pass"); except Exception as e: system_dict["failed_tests_exceptions"].append(e); system_dict["failed_tests_lists"].append(test); forward = False; print_status("Fail"); else: system_dict["skipped_tests_lists"].append(test); print_status("Skipped"); return system_dict
def test_switch_expert(system_dict): forward = True if (not os.path.isdir("datasets")): os.system( "! wget --load-cookies /tmp/cookies.txt \"https://docs.google.com/uc?export=download&confirm=$(wget --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1rG-U1mS8hDU7_wM56a1kc-li_zHLtbq2' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1rG-U1mS8hDU7_wM56a1kc-li_zHLtbq2\" -O datasets.zip && rm -rf /tmp/cookies.txt" ) os.system("! unzip -qq datasets.zip") test = "switch_expert_object_creation" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: ptf = prototype(verbose=0) system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "switch_expert_Prototype()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: ptf.Prototype("sample-project-1", "sample-experiment-6") system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "switch_expert_switch_mode()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: ptf.Switch_Mode(eval_infer=True) system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "switch_expert_Model_Params()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: ptf.Model_Params( model_path= "workspace/sample-project-1/sample-experiment-5/output/models/intermediate_model_9", use_gpu=True) system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "switch_expert_Model()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: ptf.Model() system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "switch_expert_update_input_size()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: ptf.update_input_size(224) system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "switch_expert_Infer-Img()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: img_name = "datasets/dataset_cats_dogs_test/0.jpg" predictions = ptf.Infer(img_name=img_name, return_raw=True) img_name = "datasets/dataset_cats_dogs_test/84.jpg" predictions = ptf.Infer(img_name=img_name, return_raw=True) system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "switch_expert_Infer-Folder()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: inference_dataset = "datasets/dataset_cats_dogs_test/" output = ptf.Infer(img_dir=inference_dataset, return_raw=True) system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "switch_expert_Dataset_Params()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: ptf.Dataset_Params(dataset_path="datasets/dataset_cats_dogs_eval", input_size=224) system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "switch_expert_Dataset()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: ptf.Dataset() system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "switch_expert_Evaluate()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: accuracy, class_based_accuracy = ptf.Evaluate() system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "switch_expert_switch_mode()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: ptf.Switch_Mode(train=True) system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "expert_train_Dataset_Params()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: ptf.Dataset_Params(dataset_path="datasets/dataset_cats_dogs_train", split=0.9, input_size=224, batch_size=16, shuffle_data=True, num_processors=3) system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "expert_train_apply_transforms()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: ptf.apply_random_resized_crop(256, train=True, val=True, test=True) ptf.apply_random_perspective(train=True, val=True) ptf.apply_random_vertical_flip(train=True, val=True) ptf.apply_random_horizontal_flip(train=True, val=True) ptf.apply_normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], train=True, val=True, test=True) system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "expert_train_Dataset()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: ptf.Dataset() system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "expert_train_Model_Params()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: ptf.Model_Params(model_name="resnet18", freeze_base_network=True, use_gpu=True, use_pretrained=True) system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "expert_train_Model()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: ptf.Model() system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "expert_train_lr_multistep_decrease()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: ptf.lr_multistep_decrease([1, 3], gamma=0.9) system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "expert_train_optimizer_sgd()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: ptf.optimizer_sgd(0.001, momentum=0.9) system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "expert_train_loss_softmax_crossentropy()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: ptf.loss_softmax_crossentropy() system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "expert_train_Training_Params()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: ptf.Training_Params( num_epochs=4, display_progress=True, display_progress_realtime=True, save_intermediate_models=True, intermediate_model_prefix="intermediate_model_", save_training_logs=True) system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "expert_train_Train()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: ptf.Train() system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") return system_dict
def test_expert_train(system_dict): forward = True; if(not os.path.isdir("datasets")): os.system("! wget --load-cookies /tmp/cookies.txt \"https://docs.google.com/uc?export=download&confirm=$(wget --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1rG-U1mS8hDU7_wM56a1kc-li_zHLtbq2' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1rG-U1mS8hDU7_wM56a1kc-li_zHLtbq2\" -O datasets.zip && rm -rf /tmp/cookies.txt") os.system("! unzip -qq datasets.zip") test = "expert_train_object_creation"; system_dict["total_tests"] += 1; print_start(test, system_dict["total_tests"]) if(forward): try: ptf = prototype(verbose=0); system_dict["successful_tests"] += 1; print_status("Pass"); except Exception as e: system_dict["failed_tests_exceptions"].append(e); system_dict["failed_tests_lists"].append(test); forward = False; print_status("Fail"); else: system_dict["skipped_tests_lists"].append(test); print_status("Skipped"); test = "expert_train_Prototype()"; system_dict["total_tests"] += 1; print_start(test, system_dict["total_tests"]) if(forward): try: ptf.Prototype("sample-project-1", "sample-experiment-4"); system_dict["successful_tests"] += 1; print_status("Pass"); except Exception as e: system_dict["failed_tests_exceptions"].append(e); system_dict["failed_tests_lists"].append(test); forward = False; print_status("Fail"); else: system_dict["skipped_tests_lists"].append(test); print_status("Skipped"); test = "expert_train_Dataset_Params()"; system_dict["total_tests"] += 1; print_start(test, system_dict["total_tests"]) if(forward): try: ptf.Dataset_Params(dataset_path=["datasets/dataset_csv_id/train", "datasets/dataset_csv_id/val"], path_to_csv=["datasets/dataset_csv_id/train.csv", "datasets/dataset_csv_id/val.csv"], split=0.9, input_size=224, batch_size=16, shuffle_data=True, num_processors=3); system_dict["successful_tests"] += 1; print_status("Pass"); except Exception as e: system_dict["failed_tests_exceptions"].append(e); system_dict["failed_tests_lists"].append(test); forward = False; print_status("Fail"); else: system_dict["skipped_tests_lists"].append(test); print_status("Skipped"); test = "expert_train_Dataset()"; system_dict["total_tests"] += 1; print_start(test, system_dict["total_tests"]) if(forward): try: ptf.Dataset(); system_dict["successful_tests"] += 1; print_status("Pass"); except Exception as e: system_dict["failed_tests_exceptions"].append(e); system_dict["failed_tests_lists"].append(test); forward = False; print_status("Fail"); else: system_dict["skipped_tests_lists"].append(test); print_status("Skipped"); test = "expert_train_Model_Params()"; system_dict["total_tests"] += 1; print_start(test, system_dict["total_tests"]) if(forward): try: ptf.Model_Params(model_name="resnet18", freeze_base_network=True, use_gpu=True, use_pretrained=True); system_dict["successful_tests"] += 1; print_status("Pass"); except Exception as e: system_dict["failed_tests_exceptions"].append(e); system_dict["failed_tests_lists"].append(test); forward = False; print_status("Fail"); else: system_dict["skipped_tests_lists"].append(test); print_status("Skipped"); test = "expert_train_append_layer()"; system_dict["total_tests"] += 1; print_start(test, system_dict["total_tests"]) if(forward): try: ptf.append_dropout(probability=0.1); ptf.append_linear(final_layer=True); system_dict["successful_tests"] += 1; print_status("Pass"); except Exception as e: system_dict["failed_tests_exceptions"].append(e); system_dict["failed_tests_lists"].append(test); forward = False; print_status("Fail"); else: system_dict["skipped_tests_lists"].append(test); print_status("Skipped"); test = "expert_train_Model()"; system_dict["total_tests"] += 1; print_start(test, system_dict["total_tests"]) if(forward): try: ptf.Model(); system_dict["successful_tests"] += 1; print_status("Pass"); except Exception as e: system_dict["failed_tests_exceptions"].append(e); system_dict["failed_tests_lists"].append(test); forward = False; print_status("Fail"); else: system_dict["skipped_tests_lists"].append(test); print_status("Skipped"); test = "expert_train_lr_step_decrease()"; system_dict["total_tests"] += 1; print_start(test, system_dict["total_tests"]) if(forward): try: ptf.lr_step_decrease(1, gamma=0.9); system_dict["successful_tests"] += 1; print_status("Pass"); except Exception as e: system_dict["failed_tests_exceptions"].append(e); system_dict["failed_tests_lists"].append(test); forward = False; print_status("Fail"); else: system_dict["skipped_tests_lists"].append(test); print_status("Skipped"); test = "expert_train_optimizer_sgd()"; system_dict["total_tests"] += 1; print_start(test, system_dict["total_tests"]) if(forward): try: ptf.optimizer_sgd(0.001, momentum=0.9); system_dict["successful_tests"] += 1; print_status("Pass"); except Exception as e: system_dict["failed_tests_exceptions"].append(e); system_dict["failed_tests_lists"].append(test); forward = False; print_status("Fail"); else: system_dict["skipped_tests_lists"].append(test); print_status("Skipped"); test = "expert_train_loss_softmax_crossentropy()"; system_dict["total_tests"] += 1; print_start(test, system_dict["total_tests"]) if(forward): try: ptf.loss_softmax_crossentropy(); system_dict["successful_tests"] += 1; print_status("Pass"); except Exception as e: system_dict["failed_tests_exceptions"].append(e); system_dict["failed_tests_lists"].append(test); forward = False; print_status("Fail"); else: system_dict["skipped_tests_lists"].append(test); print_status("Skipped"); test = "expert_train_Training_Params()"; system_dict["total_tests"] += 1; print_start(test, system_dict["total_tests"]) if(forward): try: ptf.Training_Params(num_epochs=3, display_progress=True, display_progress_realtime=True, save_intermediate_models=True, intermediate_model_prefix="intermediate_model_", save_training_logs=True); system_dict["successful_tests"] += 1; print_status("Pass"); except Exception as e: system_dict["failed_tests_exceptions"].append(e); system_dict["failed_tests_lists"].append(test); forward = False; print_status("Fail"); else: system_dict["skipped_tests_lists"].append(test); print_status("Skipped"); test = "expert_train_Train()"; system_dict["total_tests"] += 1; print_start(test, system_dict["total_tests"]) if(forward): try: ptf.Train(); system_dict["successful_tests"] += 1; print_status("Pass"); except Exception as e: system_dict["failed_tests_exceptions"].append(e); system_dict["failed_tests_lists"].append(test); forward = False; print_status("Fail"); else: system_dict["skipped_tests_lists"].append(test); print_status("Skipped"); return system_dict
def test_expert_train(system_dict): forward = True; test = "expert_train_object_creation"; system_dict["total_tests"] += 1; print_start(test, system_dict["total_tests"]) if(forward): try: ktf = prototype(verbose=0); system_dict["successful_tests"] += 1; print_status("Pass"); except Exception as e: system_dict["failed_tests_exceptions"].append(e); system_dict["failed_tests_lists"].append(test); forward = False; print_status("Fail"); else: system_dict["skipped_tests_lists"].append(test); print_status("Skipped"); test = "expert_train_Prototype()"; system_dict["total_tests"] += 1; print_start(test, system_dict["total_tests"]) if(forward): try: ktf.Prototype("sample-project-1", "sample-experiment-4"); system_dict["successful_tests"] += 1; print_status("Pass"); except Exception as e: system_dict["failed_tests_exceptions"].append(e); system_dict["failed_tests_lists"].append(test); forward = False; print_status("Fail"); else: system_dict["skipped_tests_lists"].append(test); print_status("Skipped"); test = "expert_train_Dataset_Params()"; system_dict["total_tests"] += 1; print_start(test, system_dict["total_tests"]) if(forward): try: ktf.Dataset_Params(dataset_path=["../datasets/dataset_csv_id/train", "../datasets/dataset_csv_id/val"], path_to_csv=["../datasets/dataset_csv_id/train.csv", "../datasets/dataset_csv_id/val.csv"], split=0.9, input_size=224, batch_size=2, shuffle_data=True, num_processors=3); system_dict["successful_tests"] += 1; print_status("Pass"); except Exception as e: system_dict["failed_tests_exceptions"].append(e); system_dict["failed_tests_lists"].append(test); forward = False; print_status("Fail"); else: system_dict["skipped_tests_lists"].append(test); print_status("Skipped"); test = "expert_train_Dataset()"; system_dict["total_tests"] += 1; print_start(test, system_dict["total_tests"]) if(forward): try: ktf.Dataset(); system_dict["successful_tests"] += 1; print_status("Pass"); except Exception as e: system_dict["failed_tests_exceptions"].append(e); system_dict["failed_tests_lists"].append(test); forward = False; print_status("Fail"); else: system_dict["skipped_tests_lists"].append(test); print_status("Skipped"); test = "expert_train_Model_Params()"; system_dict["total_tests"] += 1; print_start(test, system_dict["total_tests"]) if(forward): try: ktf.Model_Params(model_name="resnet50", freeze_base_network=True, use_gpu=True, gpu_memory_fraction=0.5, use_pretrained=True); system_dict["successful_tests"] += 1; print_status("Pass"); except Exception as e: system_dict["failed_tests_exceptions"].append(e); system_dict["failed_tests_lists"].append(test); forward = False; print_status("Fail"); else: system_dict["skipped_tests_lists"].append(test); print_status("Skipped"); test = "expert_train_append_layer()"; system_dict["total_tests"] += 1; print_start(test, system_dict["total_tests"]) if(forward): try: ktf.append_dropout(probability=0.1); ktf.append_linear(final_layer=True); system_dict["successful_tests"] += 1; print_status("Pass"); except Exception as e: system_dict["failed_tests_exceptions"].append(e); system_dict["failed_tests_lists"].append(test); forward = False; print_status("Fail"); else: system_dict["skipped_tests_lists"].append(test); print_status("Skipped"); test = "expert_train_Model()"; system_dict["total_tests"] += 1; print_start(test, system_dict["total_tests"]) if(forward): try: ktf.Model(); system_dict["successful_tests"] += 1; print_status("Pass"); except Exception as e: system_dict["failed_tests_exceptions"].append(e); system_dict["failed_tests_lists"].append(test); forward = False; print_status("Fail"); else: system_dict["skipped_tests_lists"].append(test); print_status("Skipped"); test = "expert_train_lr_step_decrease()"; system_dict["total_tests"] += 1; print_start(test, system_dict["total_tests"]) if(forward): try: ktf.lr_step_decrease(1, gamma=0.9); system_dict["successful_tests"] += 1; print_status("Pass"); except Exception as e: system_dict["failed_tests_exceptions"].append(e); system_dict["failed_tests_lists"].append(test); forward = False; print_status("Fail"); else: system_dict["skipped_tests_lists"].append(test); print_status("Skipped"); test = "expert_train_optimizer_sgd()"; system_dict["total_tests"] += 1; print_start(test, system_dict["total_tests"]) if(forward): try: ktf.optimizer_sgd(0.0001, momentum=0.9); system_dict["successful_tests"] += 1; print_status("Pass"); except Exception as e: system_dict["failed_tests_exceptions"].append(e); system_dict["failed_tests_lists"].append(test); forward = False; print_status("Fail"); else: system_dict["skipped_tests_lists"].append(test); print_status("Skipped"); test = "expert_train_loss_softmax_crossentropy()"; system_dict["total_tests"] += 1; print_start(test, system_dict["total_tests"]) if(forward): try: ktf.loss_crossentropy(); system_dict["successful_tests"] += 1; print_status("Pass"); except Exception as e: system_dict["failed_tests_exceptions"].append(e); system_dict["failed_tests_lists"].append(test); forward = False; print_status("Fail"); else: system_dict["skipped_tests_lists"].append(test); print_status("Skipped"); test = "expert_train_Training_Params()"; system_dict["total_tests"] += 1; print_start(test, system_dict["total_tests"]) if(forward): try: ktf.Training_Params(num_epochs=3, display_progress=True, display_progress_realtime=True, save_intermediate_models=True, intermediate_model_prefix="intermediate_model_", save_training_logs=True); system_dict["successful_tests"] += 1; print_status("Pass"); except Exception as e: system_dict["failed_tests_exceptions"].append(e); system_dict["failed_tests_lists"].append(test); forward = False; print_status("Fail"); else: system_dict["skipped_tests_lists"].append(test); print_status("Skipped"); test = "expert_train_Train()"; system_dict["total_tests"] += 1; print_start(test, system_dict["total_tests"]) if(forward): try: ktf.Train(); system_dict["successful_tests"] += 1; print_status("Pass"); except Exception as e: system_dict["failed_tests_exceptions"].append(e); system_dict["failed_tests_lists"].append(test); forward = False; print_status("Fail"); else: system_dict["skipped_tests_lists"].append(test); print_status("Skipped"); return system_dict
def test_compare(system_dict): forward = True test = "compare_object_creation" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: ctf = compare(verbose=0) system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "compare_Comparison()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: ctf.Comparison("Sample-Comparison-1") system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "compare_Add_Experiment()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: ctf.Add_Experiment("sample-project-1", "sample-experiment-1") ctf.Add_Experiment("sample-project-1", "sample-experiment-2") ctf.Add_Experiment("sample-project-1", "sample-experiment-3") ctf.Add_Experiment("sample-project-1", "sample-experiment-4") ctf.Add_Experiment("sample-project-1", "sample-experiment-5") ctf.Add_Experiment("sample-project-1", "sample-experiment-6") system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "compare_Generate_Statistics()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: ctf.Generate_Statistics() system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") return system_dict
def test_switch_default(system_dict): forward = True; if(not os.path.isdir("datasets")): os.system("! wget --load-cookies /tmp/cookies.txt \"https://docs.google.com/uc?export=download&confirm=$(wget --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1rG-U1mS8hDU7_wM56a1kc-li_zHLtbq2' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1rG-U1mS8hDU7_wM56a1kc-li_zHLtbq2\" -O datasets.zip && rm -rf /tmp/cookies.txt") os.system("! unzip -qq datasets.zip") test = "switch_default_object_object_creation"; system_dict["total_tests"] += 1; print_start(test, system_dict["total_tests"]) if(forward): try: ptf = prototype(verbose=0); system_dict["successful_tests"] += 1; print_status("Pass"); except Exception as e: system_dict["failed_tests_exceptions"].append(e); system_dict["failed_tests_lists"].append(test); forward = False; print_status("Fail"); else: system_dict["skipped_tests_lists"].append(test); print_status("Skipped"); test = "switch_default_object_Prototype()"; system_dict["total_tests"] += 1; print_start(test, system_dict["total_tests"]) if(forward): try: ptf.Prototype("sample-project-1", "sample-experiment-5"); system_dict["successful_tests"] += 1; print_status("Pass"); except Exception as e: system_dict["failed_tests_exceptions"].append(e); system_dict["failed_tests_lists"].append(test); forward = False; print_status("Fail"); else: system_dict["skipped_tests_lists"].append(test); print_status("Skipped"); test = "switch_default_Default()"; system_dict["total_tests"] += 1; print_start(test, system_dict["total_tests"]) if(forward): try: ptf.Default(dataset_path=["datasets/dataset_cats_dogs_train", "datasets/dataset_cats_dogs_eval"], model_name="resnet18", freeze_base_network=True, num_epochs=10); system_dict["successful_tests"] += 1; print_status("Pass"); except Exception as e: system_dict["failed_tests_exceptions"].append(e); system_dict["failed_tests_lists"].append(test); forward = False; print_status("Fail"); else: system_dict["skipped_tests_lists"].append(test); print_status("Skipped"); test = "switch_default_EDA()"; system_dict["total_tests"] += 1; print_start(test, system_dict["total_tests"]) if(forward): try: ptf.EDA(check_missing=True, check_corrupt=True); system_dict["successful_tests"] += 1; print_status("Pass"); except Exception as e: system_dict["failed_tests_exceptions"].append(e); system_dict["failed_tests_lists"].append(test); forward = False; print_status("Fail"); else: system_dict["skipped_tests_lists"].append(test); print_status("Skipped"); test = "switch_default_Switch_Mode()"; system_dict["total_tests"] += 1; print_start(test, system_dict["total_tests"]) if(forward): try: ptf.Switch_Mode(eval_infer=True); system_dict["successful_tests"] += 1; print_status("Pass"); except Exception as e: system_dict["failed_tests_exceptions"].append(e); system_dict["failed_tests_lists"].append(test); forward = False; print_status("Fail"); else: system_dict["skipped_tests_lists"].append(test); print_status("Skipped"); test = "switch_default_Dataset_Params()"; system_dict["total_tests"] += 1; print_start(test, system_dict["total_tests"]) if(forward): try: ptf.Dataset_Params(dataset_path="datasets/dataset_cats_dogs_eval"); system_dict["successful_tests"] += 1; print_status("Pass"); except Exception as e: system_dict["failed_tests_exceptions"].append(e); system_dict["failed_tests_lists"].append(test); forward = False; print_status("Fail"); else: system_dict["skipped_tests_lists"].append(test); print_status("Skipped"); test = "switch_default_Dataset()"; system_dict["total_tests"] += 1; print_start(test, system_dict["total_tests"]) if(forward): try: ptf.Dataset(); system_dict["successful_tests"] += 1; print_status("Pass"); except Exception as e: system_dict["failed_tests_exceptions"].append(e); system_dict["failed_tests_lists"].append(test); forward = False; print_status("Fail"); else: system_dict["skipped_tests_lists"].append(test); print_status("Skipped"); test = "switch_default_Evaluate()"; system_dict["total_tests"] += 1; print_start(test, system_dict["total_tests"]) if(forward): try: accuracy, class_based_accuracy = ptf.Evaluate(); system_dict["successful_tests"] += 1; print_status("Pass"); except Exception as e: system_dict["failed_tests_exceptions"].append(e); system_dict["failed_tests_lists"].append(test); forward = False; print_status("Fail"); else: system_dict["skipped_tests_lists"].append(test); print_status("Skipped"); test = "switch_default_Switch_Mode()"; system_dict["total_tests"] += 1; print_start(test, system_dict["total_tests"]) if(forward): try: ptf.Switch_Mode(train=True); system_dict["successful_tests"] += 1; print_status("Pass"); except Exception as e: system_dict["failed_tests_exceptions"].append(e); system_dict["failed_tests_lists"].append(test); forward = False; print_status("Fail"); else: system_dict["skipped_tests_lists"].append(test); print_status("Skipped"); test = "switch_default_Train()"; system_dict["total_tests"] += 1; print_start(test, system_dict["total_tests"]) if(forward): try: ptf.Train(); system_dict["successful_tests"] += 1; print_status("Pass"); except Exception as e: system_dict["failed_tests_exceptions"].append(e); system_dict["failed_tests_lists"].append(test); forward = False; print_status("Fail"); else: system_dict["skipped_tests_lists"].append(test); print_status("Skipped"); return system_dict
def test_analyse(system_dict): forward = True test = "analyse_object_creation" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: gtf = prototype(verbose=0) system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "analyse_Prototype()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: gtf.Prototype("sample-project-1", "sample-experiment-1") system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "analyse_Default()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: gtf.Default(dataset_path="../datasets/dataset_cats_dogs_train", model_name="resnet18_v1", freeze_base_network=True, num_epochs=2) system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "analyse_Analyse_Learning_Rates()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: analysis_name = "analyse_learning_rates" lrs = [0.1, 0.05] epochs = 2 percent_data = 40 analysis = gtf.Analyse_Learning_Rates(analysis_name, lrs, percent_data, num_epochs=epochs, state="keep_none") system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "analyse_Analyse_Input_Sizes()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: analysis_name = "analyse_input_sizes" input_sizes = [128, 256] epochs = 2 percent_data = 40 analysis = gtf.Analyse_Input_Sizes(analysis_name, input_sizes, percent_data, num_epochs=epochs, state="keep_none") system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "analyse_Analyse_Batch_Sizes()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: analysis_name = "analyse_batch_sizes" batch_sizes = [2, 3] epochs = 2 percent_data = 40 analysis = gtf.Analyse_Batch_Sizes(analysis_name, batch_sizes, percent_data, num_epochs=epochs, state="keep_none") system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "analyse_Analyse_Models()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: analysis_name = "analyse_models" models = [["resnet18_v1", True, True], ["resnet34_v1", False, True]] percent_data = 40 analysis = gtf.Analyse_Models(analysis_name, models, percent_data, num_epochs=epochs, state="keep_none") system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") test = "analyse_Analyse_Optimizers()" system_dict["total_tests"] += 1 print_start(test, system_dict["total_tests"]) if (forward): try: analysis_name = "analyse_optimizers" optimizers = ["sgd", "adam"] epochs = 2 percent_data = 40 analysis = gtf.Analyse_Optimizers(analysis_name, optimizers, percent_data, num_epochs=epochs, state="keep_none") system_dict["successful_tests"] += 1 print_status("Pass") except Exception as e: system_dict["failed_tests_exceptions"].append(e) system_dict["failed_tests_lists"].append(test) forward = False print_status("Fail") else: system_dict["skipped_tests_lists"].append(test) print_status("Skipped") return system_dict