def run_NAS_EA_FA_V2(): best_valid_acc, best_test_acc, times = NAS_EA_FA_V2() valid_acc, test_acc = handle_result(best_valid_acc, best_test_acc, times, MAX_TIME_BUDGET, INTERVAL) return valid_acc, test_acc
def run_regularized_evolution_algorithm(dataset: str): best_valid_acc, best_test_acc, times = regularized_evolution_algorithm( dataset) valid_acc, test_acc = handle_result(best_valid_acc, best_test_acc, times, MAX_TIME_BUDGET, INTERVAL) return valid_acc, test_acc
def run_random_search(dataset: str): best_valid_acc, best_test_acc, times = random_search(dataset) valid_acc, test_acc = handle_result(best_valid_acc, best_test_acc, times, MAX_TIME_BUDGET, INTERVAL) return valid_acc, test_acc
def run_neural_predictor(): best_valid_acc, best_test_acc, times = neural_predictor() valid_acc, test_acc = handle_result(best_valid_acc, best_test_acc, times, MAX_TIME_BUDGET, INTERVAL) return valid_acc, test_acc
coco_names = load_classnames('datasets/coco.names') num_channels = 3 image_filenames = [r"coco-cat-test.jpg"] raw_images = [None] * len(image_filenames) batch = torch.empty( (len(image_filenames), num_channels, input_width, input_height), dtype=torch.float32) for b, image_fn in enumerate(image_filenames): batch[b, :], raw_images[b] = load_image(image_fn, (input_height, input_width)) batch = batch.to(device) with torch.no_grad(): # # This is a hack for the fact that the ops inside the nested sequentials # # seem to be missing the no_grad context # model.disable_grad() predictions = model(batch) palette = pkl.load(open("palette", "rb")) batch_predictions = handle_result(predictions, 0.5, 0.4) print_predictions(image_filenames, coco_names, batch_predictions) plot_predictions(raw_images, coco_names, batch_predictions, palette) print(builder)