Beispiel #1
0
def test_save_json():
    experiment_id = "test_save_json"
    environment.init(experiment_id)
    setup_test_environment()
    test_dict = {
        "model_name": "save_json",
        "image_size_height": 128,
        "image_size_width": 64,
        "num_classes": 3,
        "parameters": "test_node",
        "flops": "test_flops",
    }
    if not os.path.exists(environment.EXPERIMENT_DIR):
        os.makedirs(environment.EXPERIMENT_DIR)

    _save_json(name=test_dict["model_name"],
               image_size=(test_dict["image_size_height"],
                           test_dict["image_size_width"]),
               num_classes=test_dict["num_classes"],
               node_param_dict=test_dict["parameters"],
               node_flops_dict=test_dict["flops"])
    output_file = os.path.join(
        environment.EXPERIMENT_DIR,
        "{}_profile.json".format(test_dict["model_name"]))
    with open(output_file, 'r') as fp:
        file_data = json.load(fp)

    assert os.path.isfile(output_file)
    assert test_dict == file_data
Beispiel #2
0
def test_build_tfds_object_detection():
    environment.setup_test_environment()

    # Build TFDS Dataset
    config_file = "unit/fixtures/configs/for_build_tfds_object_detection.py"
    run(config_file, overwrite=True)

    # Check if the builded dataset can be loaded with the same config file
    expriment_id = "tfds_object_detection"
    train_run(config_file, expriment_id, recreate=True)

    # Check if the dataset was build correctly
    train_data_num = 3
    validation_data_num = 2
    config = config_util.load(config_file)

    train_dataset = setup_dataset(TFDSObjectDetection,
                                  subset="train",
                                  batch_size=config.BATCH_SIZE,
                                  **config.DATASET.TFDS_KWARGS)

    validation_dataset = setup_dataset(TFDSObjectDetection,
                                       subset="validation",
                                       batch_size=config.BATCH_SIZE,
                                       **config.DATASET.TFDS_KWARGS)

    assert train_dataset.num_per_epoch == train_data_num
    assert validation_dataset.num_per_epoch == validation_data_num

    assert train_dataset.num_max_boxes == validation_dataset.num_max_boxes
    num_max_boxes = train_dataset.num_max_boxes

    for _ in range(train_data_num):
        images, labels = train_dataset.feed()

        assert isinstance(images, np.ndarray)
        assert images.shape[0] == config.BATCH_SIZE
        assert images.shape[1] == config.IMAGE_SIZE[0]
        assert images.shape[2] == config.IMAGE_SIZE[1]
        assert images.shape[3] == 3

        assert isinstance(labels, np.ndarray)
        assert labels.shape[0] == config.BATCH_SIZE
        assert labels.shape[1] == num_max_boxes
        assert labels.shape[2] == 5

    for _ in range(validation_data_num):
        images, labels = validation_dataset.feed()

        assert isinstance(images, np.ndarray)
        assert images.shape[0] == config.BATCH_SIZE
        assert images.shape[1] == config.IMAGE_SIZE[0]
        assert images.shape[2] == config.IMAGE_SIZE[1]
        assert images.shape[3] == 3

        assert isinstance(labels, np.ndarray)
        assert labels.shape[0] == config.BATCH_SIZE
        assert labels.shape[1] == num_max_boxes
        assert labels.shape[2] == 5
Beispiel #3
0
def test_export():

    config_file = "unit/fixtures/configs/for_export.py"
    expriment_id = "test_export"
    train_run(config_file, expriment_id, recreate=True, profile_step=7)

    setup_test_environment()

    run(expriment_id, None, (None, None), [], None)
Beispiel #4
0
def test_predict_object_detection():

    config_file = "unit/fixtures/configs/for_predict_object_detection.py"
    expriment_id = "test_predict_object_detection"
    train_run(config_file, expriment_id, recreate=True)

    setup_test_environment()

    run("unit/fixtures/sample_images", "outputs", expriment_id, None, None, save_images=True)
Beispiel #5
0
def test_profile():

    config_file = "unit/fixtures/configs/for_profile.py"
    expriment_id = "test_profile"
    train_run(config_file, expriment_id, recreate=True)

    setup_test_environment()

    run(expriment_id, None, None, 2, [])
Beispiel #6
0
def set_test_environment():
    """Set test environment"""
    print("set test environment")

    yield environment.setup_test_environment()

    # By using a yield statement instead of return, all the code after the yield statement serves as the teardown code:
    # See also: https://docs.pytest.org/en/latest/fixture.html#fixture-finalization-executing-teardown-code
    environment.teardown_test_environment()
Beispiel #7
0
        assert isinstance(labels, np.ndarray)
        assert labels.shape[0] == batch_size
        assert labels.shape[1] == num_max_boxes
        assert labels.shape[2] == 5

    for _ in range(2):
        images, labels = validation_dataset.feed()
        # _show_images_with_boxes(images, labels)

        assert isinstance(images, np.ndarray)
        assert images.shape[0] == batch_size
        assert images.shape[1] == image_size[0]
        assert images.shape[2] == image_size[1]
        assert images.shape[3] == 3

        assert isinstance(labels, np.ndarray)
        assert labels.shape[0] == batch_size
        assert labels.shape[1] == num_max_boxes
        assert labels.shape[2] == 5


if __name__ == '__main__':
    from blueoil.environment import setup_test_environment
    setup_test_environment()

    test_delta_mark_classification()
    test_delta_mark_classification_has_validation_path()

    test_delta_mark_object_detection()
    test_delta_mark_object_detection_has_validation_path()
    dataset = Dummy(subset="train", batch_size=batch_size)
    dataset_iterator = DatasetIterator(dataset)

    for i in range(0, 10):
        images, labels = next(dataset_iterator)
        assert images.shape[0] == batch_size
        assert labels.shape[0] == batch_size


def test_dataset_iterator_batch_order():
    """Assert that data given by iterator is same whether enabele_prefetch ture or false."""

    batch_size = 8
    dataset = Dummy(subset="train", batch_size=batch_size)
    dataset_iterator = DatasetIterator(dataset, seed=10, enable_prefetch=False)
    prefetch_dataset_iterator = DatasetIterator(dataset, seed=10, enable_prefetch=True)

    for i in range(0, 30):
        images, labels = next(dataset_iterator)
        prefetch_images, prefetch_labels = next(prefetch_dataset_iterator)

        assert np.all(images == prefetch_images)
        assert np.all(labels == prefetch_labels)


if __name__ == '__main__':
    from blueoil import environment
    environment.setup_test_environment()
    test_dataset_iterator_batch_size()
    test_dataset_iterator_batch_order()