コード例 #1
0
def test_pascalvoc_2007_2012_no_skip_difficult():
    batch_size = 3
    image_size = [256, 512]
    dataset = Pascalvoc20072012(
        batch_size=batch_size,
        pre_processor=ResizeWithGtBoxes(image_size),
        skip_difficult=False,
    )
    dataset = DatasetIterator(dataset)
    num_max_boxes = 56

    num_train_val_2007 = 2501 + 2510
    num_train_val_2012 = 5717 + 5823
    num_test_2007 = 4952

    assert dataset.num_max_boxes == num_max_boxes
    assert Pascalvoc20072012.count_max_boxes(
        skip_difficult=False) == num_max_boxes
    assert dataset.num_per_epoch == num_train_val_2007 + num_train_val_2012

    val_dataset = Pascalvoc20072012(
        subset="validation",
        batch_size=batch_size,
        pre_processor=ResizeWithGtBoxes(image_size),
        skip_difficult=False)
    val_dataset = DatasetIterator(val_dataset)
    assert val_dataset.num_per_epoch == num_test_2007

    for _ in range(STEP_SIZE):
        images, labels = dataset.feed()
        # _show_images_with_boxes(images, labels)
        assert isinstance(images, np.ndarray)
        assert images.shape[0] == batch_size
        assert images.shape[1] == image_size[0]
        assert images.shape[2] == image_size[1]
        assert images.shape[3] == 3

        assert isinstance(labels, np.ndarray)
        assert labels.shape[0] == batch_size
        assert labels.shape[1] == num_max_boxes
        assert labels.shape[2] == 5

    for _ in range(STEP_SIZE):
        images, labels = val_dataset.feed()
        # _show_images_with_boxes(images, labels)
        assert isinstance(images, np.ndarray)
        assert images.shape[0] == batch_size
        assert images.shape[1] == image_size[0]
        assert images.shape[2] == image_size[1]
        assert images.shape[3] == 3

        assert isinstance(labels, np.ndarray)
        assert labels.shape[0] == batch_size
        assert labels.shape[1] == num_max_boxes
        assert labels.shape[2] == 5
コード例 #2
0
def test_bdd100k():
    batch_size = 3
    image_size = [320, 320]

    num_max_boxes = 100

    num_train = 70000
    num_val = 10000

    dataset = BDD100K(batch_size=batch_size,
                      max_boxes=num_max_boxes,
                      pre_processor=ResizeWithGtBoxes(image_size))
    dataset = DatasetIterator(dataset)

    assert dataset.num_max_boxes == num_max_boxes
    assert dataset.num_per_epoch == num_train

    val_dataset = BDD100K(subset="validation",
                          batch_size=batch_size,
                          max_boxes=num_max_boxes,
                          pre_processor=ResizeWithGtBoxes(image_size))
    val_dataset = DatasetIterator(val_dataset)
    assert val_dataset.num_per_epoch == num_val

    for _ in range(STEP_SIZE):
        images, labels = dataset.feed()
        # _show_images_with_boxes(images, labels)
        assert isinstance(images, np.ndarray)
        assert images.shape[0] == batch_size
        assert images.shape[1] == image_size[0]
        assert images.shape[2] == image_size[1]
        assert images.shape[3] == 3

        assert isinstance(labels, np.ndarray)
        assert labels.shape[0] == batch_size
        assert labels.shape[1] == num_max_boxes
        assert labels.shape[2] == 5

    for _ in range(STEP_SIZE):
        images, labels = val_dataset.feed()
        # _show_images_with_boxes(images, labels)
        assert isinstance(images, np.ndarray)
        assert images.shape[0] == batch_size
        assert images.shape[1] == image_size[0]
        assert images.shape[2] == image_size[1]
        assert images.shape[3] == 3

        assert isinstance(labels, np.ndarray)
        assert labels.shape[0] == batch_size
        assert labels.shape[1] == num_max_boxes
        assert labels.shape[2] == 5
コード例 #3
0
def test_open_images_v4_object_detection():
    batch_size = 1
    image_size = [256, 256]
    dataset = OpenImagesV4BoundingBox(batch_size=batch_size,
                                      pre_processor=ResizeWithGtBoxes(image_size))
    dataset = DatasetIterator(dataset)

    num_max_boxes = dataset.num_max_boxes
    assert dataset.num_max_boxes == OpenImagesV4BoundingBox.count_max_boxes()

    for _ in range(5):
        images, labels = dataset.feed()

        # _show_images_with_boxes(images, labels)

        assert isinstance(images, np.ndarray)
        assert images.shape[0] == batch_size
        assert images.shape[1] == image_size[0]
        assert images.shape[2] == image_size[1]
        assert images.shape[3] == 3

        assert isinstance(labels, np.ndarray)
        assert labels.shape[0] == batch_size
        assert labels.shape[1] == num_max_boxes
        assert labels.shape[2] == 5
コード例 #4
0
def test_cifar10():
    batch_size = 3
    image_size = [256, 512]
    dataset = Cifar10(batch_size=batch_size, pre_processor=Resize(image_size))
    dataset = DatasetIterator(dataset)
    val_dataset = Cifar10(subset="validation",
                          batch_size=batch_size,
                          pre_processor=Resize(image_size))
    val_dataset = DatasetIterator(val_dataset)

    assert dataset.num_classes == 10
    assert dataset.num_per_epoch == 50000
    assert val_dataset.num_per_epoch == 10000

    for _ in range(STEP_SIZE):
        images, labels = dataset.feed()
        assert isinstance(images, np.ndarray)
        assert images.shape[0] == batch_size
        assert images.shape[1] == image_size[0]
        assert images.shape[2] == image_size[1]
        assert images.shape[3] == 3

        assert isinstance(labels, np.ndarray)
        assert labels.shape[0] == batch_size
        assert labels.shape[1] == dataset.num_classes
コード例 #5
0
def test_pascalvoc_2012():
    batch_size = 3
    image_size = [256, 512]
    dataset = Pascalvoc2012(batch_size=batch_size,
                            pre_processor=ResizeWithGtBoxes(image_size))
    num_max_boxes = 39
    dataset = DatasetIterator(dataset)

    assert dataset.num_max_boxes == num_max_boxes
    assert Pascalvoc2012.count_max_boxes() == num_max_boxes
    assert dataset.num_per_epoch == 5717

    val_dataset = Pascalvoc2012(subset="validation",
                                batch_size=batch_size,
                                pre_processor=ResizeWithGtBoxes(image_size))
    val_dataset = DatasetIterator(val_dataset)
    assert val_dataset.num_per_epoch == 5823

    for _ in range(STEP_SIZE):
        images, labels = dataset.feed()
        # _show_images_with_boxes(images, labels)
        assert isinstance(images, np.ndarray)
        assert images.shape[0] == batch_size
        assert images.shape[1] == image_size[0]
        assert images.shape[2] == image_size[1]
        assert images.shape[3] == 3

        assert isinstance(labels, np.ndarray)
        assert labels.shape[0] == batch_size
        assert labels.shape[1] == num_max_boxes
        assert labels.shape[2] == 5

    for _ in range(STEP_SIZE):
        images, labels = val_dataset.feed()
        # _show_images_with_boxes(images, labels)
        assert isinstance(images, np.ndarray)
        assert images.shape[0] == batch_size
        assert images.shape[1] == image_size[0]
        assert images.shape[2] == image_size[1]
        assert images.shape[3] == 3

        assert isinstance(labels, np.ndarray)
        assert labels.shape[0] == batch_size
        assert labels.shape[1] == num_max_boxes
        assert labels.shape[2] == 5
コード例 #6
0
def test_hue():
    batch_size = 3
    image_size = [256, 512]
    dataset = Pascalvoc2007(
        batch_size=batch_size, pre_processor=ResizeWithGtBoxes(image_size),
        augmentor=Hue((-10, 10)),
    )
    dataset = DatasetIterator(dataset)

    for _ in range(5):
        images, labels = dataset.feed()
        _show_images_with_boxes(images, labels)
コード例 #7
0
def test_filp_top_bottom():
    batch_size = 3
    image_size = [256, 512]
    dataset = Pascalvoc2007(
        batch_size=batch_size, pre_processor=ResizeWithGtBoxes(image_size),
        augmentor=FlipTopBottom(),
    )
    dataset = DatasetIterator(dataset)

    for _ in range(5):
        images, labels = dataset.feed()
        _show_images_with_boxes(images, labels)
コード例 #8
0
def test_ilsvrc_2012():
    batch_size = 3
    image_size = [256, 512]
    dataset = Ilsvrc2012(batch_size=batch_size,
                         pre_processor=Resize(image_size))
    dataset = DatasetIterator(dataset)
    val_dataset = Ilsvrc2012(subset="validation",
                             batch_size=batch_size,
                             pre_processor=Resize(image_size))
    val_dataset = DatasetIterator(val_dataset)

    num_train = 1281167
    num_validation = 50000

    assert dataset.num_per_epoch == num_train
    assert val_dataset.num_per_epoch == num_validation

    for _ in range(STEP_SIZE):
        images, labels = dataset.feed()
        assert isinstance(images, np.ndarray)
        assert images.shape[0] == batch_size
        assert images.shape[1] == image_size[0]
        assert images.shape[2] == image_size[1]
        assert images.shape[3] == 3

        assert isinstance(labels, np.ndarray)
        assert labels.shape[0] == batch_size
        assert labels.shape[1] == dataset.num_classes

    for _ in range(STEP_SIZE):
        images, labels = val_dataset.feed()
        assert isinstance(images, np.ndarray)
        assert images.shape[0] == batch_size
        assert images.shape[1] == image_size[0]
        assert images.shape[2] == image_size[1]
        assert images.shape[3] == 3

        assert isinstance(labels, np.ndarray)
        assert labels.shape[0] == batch_size
        assert labels.shape[1] == dataset.num_classes
コード例 #9
0
def test_ssd_random_crop():
    batch_size = 3
    image_size = [256, 512]
    dataset = Pascalvoc2007(
        batch_size=batch_size, pre_processor=ResizeWithGtBoxes(image_size),
        augmentor=SSDRandomCrop(),
    )
    dataset = DatasetIterator(dataset)

    for _ in range(5):
        images, labels = dataset.feed()
        _show_images_with_boxes(images, labels)
        assert np.all(labels[:, :, 2] <= 512)
        assert np.all(labels[:, :, 3] <= 256)
コード例 #10
0
def test_sequence():
    batch_size = 3
    image_size = [256, 512]
    augmentor = Sequence([
        FlipLeftRight(),
        FlipTopBottom(),
        SSDRandomCrop(),
    ])

    dataset = Pascalvoc2007(
        batch_size=batch_size, pre_processor=ResizeWithGtBoxes(image_size),
        augmentor=augmentor,
    )
    dataset = DatasetIterator(dataset)

    for _ in range(5):
        images, labels = dataset.feed()
        _show_images_with_boxes(images, labels)
コード例 #11
0
ファイル: test_div2k.py プロジェクト: yasumura-lm/blueoil
def test_can_iterate(set_test_environment, subset):
    batch_size = 1
    image_size = (100, 100)

    dataset = Div2k(subset, batch_size=batch_size)
    iterator = DatasetIterator(dataset)

    for _ in range(len(dataset)):
        images, labels = iterator.feed()

        assert isinstance(images, np.ndarray)
        assert images.shape[0] == batch_size
        assert images.shape[1] == image_size[0]
        assert images.shape[2] == image_size[1]
        assert images.shape[3] == 3

        assert isinstance(labels, np.ndarray)
        assert labels.shape[0] == batch_size
        assert labels[0] is None
コード例 #12
0
def test_open_images_v4_classification():
    batch_size = 1
    image_size = [256, 256]
    dataset = OpenImagesV4Classification(batch_size=batch_size,
                                         pre_processor=Resize(image_size))
    dataset = DatasetIterator(dataset)

    for _ in range(5):
        images, labels = dataset.feed()

        assert isinstance(images, np.ndarray)
        assert images.shape[0] == batch_size
        assert images.shape[1] == image_size[0]
        assert images.shape[2] == image_size[1]
        assert images.shape[3] == 3

        assert isinstance(labels, np.ndarray)
        assert labels.shape[0] == batch_size
        assert labels.shape[1] == dataset.num_classes
コード例 #13
0
def test_caltech101():
    batch_size = 3
    image_size = [256, 512]
    dataset = Caltech101(batch_size=batch_size,
                         pre_processor=Resize(image_size))
    dataset = DatasetIterator(dataset)

    assert dataset.num_classes == 101

    for _ in range(STEP_SIZE):
        images, labels = dataset.feed()
        assert isinstance(images, np.ndarray)
        assert images.shape[0] == batch_size
        assert images.shape[1] == image_size[0]
        assert images.shape[2] == image_size[1]
        assert images.shape[3] == 3

        assert isinstance(labels, np.ndarray)
        assert labels.shape[0] == batch_size
        assert labels.shape[1] == dataset.num_classes
コード例 #14
0
def test_pascalvoc_2007_with_target_classes():
    batch_size = 3
    image_size = [256, 512]

    num_max_boxes = 12
    num_train = 240
    num_validation = 254
    num_test = 433

    assert TargetClassesPascalvoc2007.count_max_boxes() == num_max_boxes

    dataset = TargetClassesPascalvoc2007(
        batch_size=batch_size,
        pre_processor=ResizeWithGtBoxes(image_size),
    )
    dataset = DatasetIterator(dataset)
    assert dataset.num_per_epoch == num_train

    val_dataset = TargetClassesPascalvoc2007(
        subset="validation",
        batch_size=batch_size,
        pre_processor=ResizeWithGtBoxes(image_size))
    val_dataset = DatasetIterator(val_dataset)
    assert val_dataset.num_per_epoch == num_validation

    test_dataset = TargetClassesPascalvoc2007(
        subset="test",
        batch_size=batch_size,
        pre_processor=ResizeWithGtBoxes(image_size))
    test_dataset = DatasetIterator(test_dataset)
    assert test_dataset.num_per_epoch == num_test

    for _ in range(STEP_SIZE):
        images, labels = dataset.feed()
        # _show_images_with_boxes(images, labels)
        assert isinstance(images, np.ndarray)
        assert images.shape[0] == batch_size
        assert images.shape[1] == image_size[0]
        assert images.shape[2] == image_size[1]
        assert images.shape[3] == 3

        assert isinstance(labels, np.ndarray)
        assert labels.shape[0] == batch_size
        assert labels.shape[1] == num_max_boxes
        assert labels.shape[2] == 5

    for _ in range(STEP_SIZE):
        images, labels = val_dataset.feed()
        # _show_images_with_boxes(images, labels)
        assert isinstance(images, np.ndarray)
        assert images.shape[0] == batch_size
        assert images.shape[1] == image_size[0]
        assert images.shape[2] == image_size[1]
        assert images.shape[3] == 3

        assert isinstance(labels, np.ndarray)
        assert labels.shape[0] == batch_size
        assert labels.shape[1] == num_max_boxes
        assert labels.shape[2] == 5

    for _ in range(STEP_SIZE):
        images, labels = test_dataset.feed()
        # _show_images_with_boxes(images, labels)
        assert isinstance(images, np.ndarray)
        assert images.shape[0] == batch_size
        assert images.shape[1] == image_size[0]
        assert images.shape[2] == image_size[1]
        assert images.shape[3] == 3

        assert isinstance(labels, np.ndarray)
        assert labels.shape[0] == batch_size
        assert labels.shape[1] == num_max_boxes
        assert labels.shape[2] == 5
コード例 #15
0
def test_pascalvoc_2007_not_skip_difficult():
    batch_size = 3
    image_size = [256, 512]

    num_max_boxes = 42
    num_train = 2501
    num_validation = 2510
    num_test = 4952

    assert Pascalvoc2007.count_max_boxes(skip_difficult=False) == num_max_boxes

    dataset = Pascalvoc2007(batch_size=batch_size,
                            pre_processor=ResizeWithGtBoxes(image_size),
                            skip_difficult=False)
    dataset = DatasetIterator(dataset)
    assert dataset.num_per_epoch == num_train

    val_dataset = Pascalvoc2007(subset="validation",
                                batch_size=batch_size,
                                pre_processor=ResizeWithGtBoxes(image_size),
                                skip_difficult=False)
    val_dataset = DatasetIterator(val_dataset)
    assert val_dataset.num_per_epoch == num_validation

    test_dataset = Pascalvoc2007(subset="test",
                                 batch_size=batch_size,
                                 pre_processor=ResizeWithGtBoxes(image_size),
                                 skip_difficult=False)
    test_dataset = DatasetIterator(test_dataset)
    assert test_dataset.num_per_epoch == num_test

    for _ in range(STEP_SIZE):
        images, labels = dataset.feed()

        assert isinstance(images, np.ndarray)
        assert images.shape[0] == batch_size
        assert images.shape[1] == image_size[0]
        assert images.shape[2] == image_size[1]
        assert images.shape[3] == 3

        assert isinstance(labels, np.ndarray)
        assert labels.shape[0] == batch_size
        assert labels.shape[1] == num_max_boxes
        assert labels.shape[2] == 5

    for _ in range(STEP_SIZE):
        images, labels = val_dataset.feed()

        assert isinstance(images, np.ndarray)
        assert images.shape[0] == batch_size
        assert images.shape[1] == image_size[0]
        assert images.shape[2] == image_size[1]
        assert images.shape[3] == 3

        assert isinstance(labels, np.ndarray)
        assert labels.shape[0] == batch_size
        assert labels.shape[1] == num_max_boxes
        assert labels.shape[2] == 5

    for _ in range(STEP_SIZE):
        images, labels = test_dataset.feed()

        assert isinstance(images, np.ndarray)
        assert images.shape[0] == batch_size
        assert images.shape[1] == image_size[0]
        assert images.shape[2] == image_size[1]
        assert images.shape[3] == 3

        assert isinstance(labels, np.ndarray)
        assert labels.shape[0] == batch_size
        assert labels.shape[1] == num_max_boxes
        assert labels.shape[2] == 5
コード例 #16
0
ファイル: evaluate.py プロジェクト: wtnb93/blueoil
def evaluate(config, restore_path):
    if restore_path is None:
        restore_file = executor.search_restore_filename(
            environment.CHECKPOINTS_DIR)
        restore_path = os.path.join(environment.CHECKPOINTS_DIR, restore_file)

    if not os.path.exists("{}.index".format(restore_path)):
        raise Exception("restore file {} dont exists.".format(restore_path))

    print("restore_path:", restore_path)

    DatasetClass = config.DATASET_CLASS
    ModelClass = config.NETWORK_CLASS
    network_kwargs = dict(
        (key.lower(), val) for key, val in config.NETWORK.items())
    dataset_kwargs = dict(
        (key.lower(), val) for key, val in config.DATASET.items())

    if "test" in DatasetClass.available_subsets:
        subset = "test"
    else:
        subset = "validation"

    validation_dataset = DatasetIterator(DatasetClass(subset=subset,
                                                      **dataset_kwargs),
                                         seed=0)

    graph = tf.Graph()
    with graph.as_default():

        if ModelClass.__module__.startswith("lmnet.networks.object_detection"):
            model = ModelClass(
                classes=validation_dataset.classes,
                num_max_boxes=validation_dataset.num_max_boxes,
                is_debug=config.IS_DEBUG,
                **network_kwargs,
            )

        else:
            model = ModelClass(
                classes=validation_dataset.classes,
                is_debug=config.IS_DEBUG,
                **network_kwargs,
            )

        global_step = tf.Variable(0, name="global_step", trainable=False)
        is_training = tf.constant(False, name="is_training")

        images_placeholder, labels_placeholder = model.placeholderes()

        output = model.inference(images_placeholder, is_training)

        metrics_ops_dict, metrics_update_op = model.metrics(
            output, labels_placeholder)
        model.summary(output, labels_placeholder)

        summary_op = tf.summary.merge_all()

        metrics_summary_op, metrics_placeholders = executor.prepare_metrics(
            metrics_ops_dict)

        init_op = tf.global_variables_initializer()
        reset_metrics_op = tf.local_variables_initializer()
        saver = tf.train.Saver(max_to_keep=None)

    session_config = None  # tf.ConfigProto(log_device_placement=True)
    sess = tf.Session(graph=graph, config=session_config)
    sess.run([init_op, reset_metrics_op])

    validation_writer = tf.summary.FileWriter(environment.TENSORBOARD_DIR +
                                              "/evaluate")

    saver.restore(sess, restore_path)

    last_step = sess.run(global_step)

    # init metrics values
    test_step_size = int(
        math.ceil(validation_dataset.num_per_epoch / config.BATCH_SIZE))
    print("test_step_size", test_step_size)

    for test_step in range(test_step_size):
        print("test_step", test_step)

        images, labels = validation_dataset.feed()
        feed_dict = {
            images_placeholder: images,
            labels_placeholder: labels,
        }

        # Summarize at only last step.
        if test_step == test_step_size - 1:
            summary, _ = sess.run([summary_op, metrics_update_op],
                                  feed_dict=feed_dict)
            validation_writer.add_summary(summary, last_step)
        else:
            sess.run([metrics_update_op], feed_dict=feed_dict)

    metrics_values = sess.run(list(metrics_ops_dict.values()))
    metrics_feed_dict = {
        placeholder: value
        for placeholder, value in zip(metrics_placeholders, metrics_values)
    }
    metrics_summary, = sess.run(
        [metrics_summary_op],
        feed_dict=metrics_feed_dict,
    )
    validation_writer.add_summary(metrics_summary, last_step)