Exemple #1
0
def test_classification_json():
    task = Tasks.CLASSIFICATION
    image_size = (120, 160)
    classes = ("aaa", "bbb")
    params = {
        "task": task,
        "classes": classes,
        "image_size": image_size,
        "data_format": "NCHW",
    }

    batch_size = 2
    inputs = np.random.uniform(size=[batch_size, len(classes)])

    raw_images = np.zeros((batch_size, 320, 280, 3), dtype=np.uint8)
    image_files = ["dummy.png", "dumpy_2.pny"]

    call = JsonOutput(**params)

    json_output = call(inputs, raw_images, image_files)
    output = json.loads(json_output)

    assert output["classes"] == [{"id": i, "name": name} for i, name in enumerate(classes)]
    assert output["task"] == str(task.value)

    results = output["results"]
    assert [result["file_path"] for result in results] == image_files

    for i in range(batch_size):
        predictions = results[i]["prediction"]
        assert [prediction["probability"] for prediction in predictions] == inputs[i, :].astype(str).tolist()
Exemple #2
0
def test_object_detection_json():
    task = Tasks.OBJECT_DETECTION
    image_size = (120, 160)
    classes = ("aaa", "bbb")
    params = {
        "task": task,
        "classes": classes,
        "image_size": image_size,
        "data_format": "NCHW",
    }

    batch_size = 2
    box_sizes = (3, 5)

    boxes_1 = np.concatenate([
        np.random.randint(120, size=(box_sizes[0], 4)),
        np.random.randint(len(classes), size=(box_sizes[0], 1)),
        np.random.uniform(size=(box_sizes[0], 1)),
    ], axis=1)

    boxes_2 = np.concatenate([
        np.random.randint(120, size=(box_sizes[1], 4)),
        np.random.randint(len(classes), size=(box_sizes[1], 1)),
        np.random.uniform(size=(box_sizes[1], 1)),
    ], axis=1)

    inputs = [boxes_1, boxes_2]

    raw_images = np.zeros((batch_size, 320, 280, 3), dtype=np.uint8)
    image_files = ["dummy.png", "dumpy_2.pny"]

    call = JsonOutput(**params)

    json_output = call(inputs, raw_images, image_files)
    output = json.loads(json_output)

    assert output["classes"] == [{"id": i, "name": name} for i, name in enumerate(classes)]
    assert output["task"] == str(task.value)

    results = output["results"]
    assert [result["file_path"] for result in results] == image_files

    for i in range(batch_size):
        predictions = results[i]["prediction"]
        assert [prediction["score"] for prediction in predictions] == inputs[i][:, 5].astype(str).tolist()
        assert [prediction["class"]["id"] for prediction in predictions] == inputs[i][:, 4].astype(int).tolist()

        resized_boxes = np.stack([
            inputs[i][:, 0] * 280 / image_size[1],
            inputs[i][:, 1] * 320 / image_size[0],
            inputs[i][:, 2] * 280 / image_size[1],
            inputs[i][:, 3] * 320 / image_size[0],
        ], axis=1)
        assert np.allclose([prediction["box"] for prediction in predictions], resized_boxes)
Exemple #3
0
def test_semantic_segmentation_json():
    task = Tasks.SEMANTIC_SEGMENTATION
    image_size = (120, 160)
    classes = ("aaa", "bbb")
    params = {
        "task": task,
        "classes": classes,
        "image_size": image_size,
        "data_format": "NCHW",
    }

    batch_size = 2

    predict = np.random.uniform(size=(batch_size, len(classes), image_size[0],
                                      image_size[1]))

    raw_images = np.zeros((batch_size, 320, 280, 3), dtype=np.uint8)
    image_files = ["dummy.png", "dumpy_2.pny"]

    call = JsonOutput(**params)

    json_output = call(predict, raw_images, image_files)
    output = json.loads(json_output)

    assert output["classes"] == [{
        "id": i,
        "name": name
    } for i, name in enumerate(classes)]
    assert output["task"] == str(task.value)

    results = output["results"]
    assert [result["file_path"] for result in results] == image_files

    for i in range(batch_size):
        predictions = results[i]["prediction"]
        for class_id in range(len(classes)):
            mask = predictions[i]["mask"]
            mask_data = base64.b64decode(mask)
            mask_pil_image = PIL.Image.open(BytesIO(mask_data))
            mask_image = np.array(mask_pil_image)
            assert mask_image.shape == (320, 280)
Exemple #4
0
def _run(input_dir, output_dir, config, restore_path, save_images):
    ModelClass = config.NETWORK_CLASS
    network_kwargs = dict(
        (key.lower(), val) for key, val in config.NETWORK.items())

    graph = tf.Graph()
    with graph.as_default():
        model = ModelClass(classes=config.CLASSES,
                           is_debug=config.IS_DEBUG,
                           **network_kwargs)

        is_training = tf.constant(False, name="is_training")

        images_placeholder, _ = model.placeholders()
        output_op = model.inference(images_placeholder, is_training)

        init_op = tf.global_variables_initializer()

        saver = tf.train.Saver(max_to_keep=None)

    session_config = tf.ConfigProto()
    sess = tf.Session(graph=graph, config=session_config)
    sess.run(init_op)
    saver.restore(sess, restore_path)

    all_image_files = _all_image_files(input_dir)

    step_size = int(math.ceil(len(all_image_files) / config.BATCH_SIZE))

    json_output = JsonOutput(
        task=config.TASK,
        classes=config.CLASSES,
        image_size=config.IMAGE_SIZE,
        data_format=config.DATA_FORMAT,
    )

    image_from_json = ImageFromJson(
        task=config.TASK,
        classes=config.CLASSES,
        image_size=config.IMAGE_SIZE,
    )

    results = []
    for step in range(step_size):
        start_index = (step) * config.BATCH_SIZE
        end_index = (step + 1) * config.BATCH_SIZE

        image_files = all_image_files[start_index:end_index]

        while len(image_files) != config.BATCH_SIZE:
            # add dummy image.
            image_files.append(DUMMY_FILENAME)

        images, raw_images = _get_images(image_files,
                                         config.DATASET.PRE_PROCESSOR,
                                         config.DATA_FORMAT)

        feed_dict = {images_placeholder: images}
        outputs = sess.run(output_op, feed_dict=feed_dict)

        if config.POST_PROCESSOR:
            outputs = config.POST_PROCESSOR(outputs=outputs)["outputs"]

        results.append(outputs)
        _save_outputs(output_dir, outputs, step)

        json = json_output(outputs, raw_images, image_files)
        _save_json(output_dir, json, step)

        if save_images:
            filename_images = image_from_json(json, raw_images, image_files)
            _save_images(output_dir, filename_images, step)

    return results