Esempio n. 1
0
def coco_write_anns_to_file(coco, img_id):
    """
    Given an image id $img_id, write out the annotations in the
    mAP format to $MAP_GROUND_TRUTH_DIR/$image_id.txt
    """
    anns = get_image_annotations(coco, img_id)
    fname = '%s/%012d.%s' % (MAP_GROUND_TRUTH_DIR, img_id, 'txt')
    write_anns_to_file(anns, fname)
Esempio n. 2
0
def test_single_abs_ann_to_rect_mask():
    import matplotlib.pyplot as plt
    from pycocotools.coco import COCO
    from file_management import ANNOTATION_FILE, get_downloaded_ids
    from annotations import get_image_annotations, plot_annotations

    coco = COCO(ANNOTATION_FILE)
    img_ids = get_downloaded_ids()
    anns = [get_image_annotations(coco, img) for img in img_ids]
    #view_img = lambda index: plot_annotations(img_ids[index], anns[index])

    ann = anns[0][0]
    plt.figure(2)
    fig, ax = plt.subplots()
    ax.imshow(single_abs_ann_to_rect_mask(640, 420, ann['bbox']))
    plt.figure(1)
    plot_annotations(img_ids[0], [ann])
Esempio n. 3
0
def test_abs_anns_to_heatmap():
    import matplotlib.pyplot as plt
    from matplotlib import cm
    from pycocotools.coco import COCO
    from file_management import ANNOTATION_FILE, get_downloaded_ids
    from annotations import get_image_annotations, plot_annotations

    coco = COCO(ANNOTATION_FILE)
    img_ids = get_downloaded_ids()
    anns = [get_image_annotations(coco, img) for img in img_ids]

    plt.figure(0)
    ann = anns[0]
    heatmap = abs_anns_to_heatmap(640, 640, ann)
    plt.figure(2)
    fig, ax = plt.subplots()
    ax.imshow(heatmap)  #, cmap=cm.jet)
    plt.figure(1)
    plot_annotations(img_ids[0], ann)
Esempio n. 4
0
def get_y_true(coco, bounding_box_count, cell_width_px, cell_height_px,
               img_id):
    # Force bounding_box_count to 1 - See NOTE in above documentation
    bounding_box_count = 1

    # These should probably get moved to the parameters
    DEFAULT_VALUE = 0
    NO_OBJECT_WEIGHT = 0
    HAS_OBJECT_WEIGHT = 1

    # Position of the various training parameters along the last dimension
    # of the output data from the neural network
    POS_OBJ_SCORE = 0
    POS_BOX_CENTER_X = 1
    POS_BOX_CENTER_Y = 2
    POS_BOX_WIDTH = 3
    POS_BOX_HEIGHT = 4

    annotations = get_image_annotations(coco, img_id)

    # img = coco.loadImgs([img_id])[0]

    # cell_x_count, how many cells are on horizontal direction, cell_y_count,
    # how many cells are on vertical direction
    cell_x_count = ceil(PADDED_SIZE / cell_width_px)
    cell_y_count = ceil(PADDED_SIZE / cell_height_px)
    # 5 parameters to each bounding box: Probability, X pos, Y pos, Width, Height
    y_true = np.full((cell_y_count, cell_x_count, bounding_box_count * 5),
                     DEFAULT_VALUE)
    y_true = y_true.astype('float32')
    # Set all object probabilities to NO_OBJECT_WEIGHT
    if DEFAULT_VALUE != NO_OBJECT_WEIGHT:
        y_true[..., ..., POS_OBJ_SCORE::5] = NO_OBJECT_WEIGHT

    for annotation in annotations:
        # Calculate the cell that the annotation should match
        bounding_box = annotation['bbox']

        # print("[DEBUG]", bounding_box)

        abs_ul_x = bounding_box[0]
        abs_ul_y = bounding_box[1]
        width = bounding_box[2]
        height = bounding_box[3]

        # Find the center of the box in terms of the whole image
        # These values are purposely floats to keep as much information as
        #  possible about the center of the image
        abs_center_x = abs_ul_x + width / 2
        abs_center_y = abs_ul_y + height / 2

        # Calculate the cell the bounding box is centered in
        cell_x_pos = floor(abs_center_x / cell_width_px)
        cell_y_pos = floor(abs_center_y / cell_height_px)

        # Find the center of the box relative to the corner of the cell:
        # ...And put it in terms of the cell size
        rel_center_x = (abs_center_x -
                        (cell_x_pos * cell_width_px)) / cell_width_px
        rel_center_y = (abs_center_y -
                        (cell_y_pos * cell_height_px)) / cell_height_px

        # Find the size of the bounding box relative to the cell
        rel_width = (width / cell_width_px) / 10  #hard code
        rel_height = (height / cell_height_px) / 10  #hard code

        # TODO: Move to handling more than one bounding box
        # if y_true[cell_y_pos, cell_x_pos, POS_OBJ_SCORE] != NO_OBJECT_WEIGHT:
        #     logging.warn("Image %d has multiple bounding boxes in cell (%d,%d)" % (
        #         img_id,
        #         cell_x_pos,
        #         cell_y_pos
        #     ))

        # Set values for the training data
        y_true[cell_y_pos, cell_x_pos, POS_BOX_CENTER_X] = rel_center_x
        y_true[cell_y_pos, cell_x_pos, POS_BOX_CENTER_Y] = rel_center_y
        y_true[cell_y_pos, cell_x_pos, POS_BOX_WIDTH] = rel_width
        y_true[cell_y_pos, cell_x_pos, POS_BOX_HEIGHT] = rel_height
        y_true[cell_y_pos, cell_x_pos, POS_OBJ_SCORE] = HAS_OBJECT_WEIGHT
        # print("[DEBUG]", y_true[cell_y_pos, cell_x_pos, :])
    return y_true
Esempio n. 5
0
#!/usr/bin/env python3 -i

from pycocotools.coco import COCO
from file_management import ANNOTATION_FILE, get_downloaded_ids
from annotations import get_image_annotations, plot_annotations

coco = COCO(ANNOTATION_FILE)
img_ids = get_downloaded_ids()
anns = [get_image_annotations(coco, img) for img in img_ids]
view_img = lambda index: plot_annotations(img_ids[index], anns[index])