コード例 #1
0
def training_data_generator(coco, start_index, num_images, bounding_box_count,
                            cell_width_px, cell_height_px, batch_size):
    img_ids = get_downloaded_ids()
    img_ids = list(filter(is_not_greyscale, img_ids))[:num_images]
    while 1:
        for img_id in img_ids:
            image_batch = np.empty((batch_size, 640, 640, 3),
                                   np.float)  #hard code
            y_true_batch = np.empty((batch_size, 10, 10, 5),
                                    np.float)  #hard code
            for i in range(batch_size):
                image = file_management.get_image(img_id)
                image = np.divide(image, 256, dtype=np.float32)
                try:
                    image = pad_image(image, PADDED_SIZE)
                except Exception as e:
                    print(id)
                    raise e
                ground_truth = get_y_true(coco, bounding_box_count,
                                          cell_width_px, cell_height_px,
                                          img_id)
                image_batch[i, :, :, :] = image
                y_true_batch[i, :, :, :] = ground_truth

            yield (image_batch, y_true_batch)
コード例 #2
0
def all_imgs_numpy(num_images):
    img_ids = get_downloaded_ids()
    img_ids = list(filter(is_not_greyscale, img_ids))[:num_images]
    imgs = np.empty((len(img_ids), 640, 640, 3), np.float)
    gen = image_generator(img_ids)
    for index in range(len(img_ids)):
        imgs[index, :, :, :] = next(gen)

    return imgs
コード例 #3
0
def all_ground_truth_numpy(coco, num_images, bounding_box_count, cell_width_px,
                           cell_height_px):
    #assert(cell_rows == ceil(PADDED_SIZE/cell_height_px))
    #assert(cell_columns == ceil(PADDED_SIZE/cell_width_px))
    img_ids = get_downloaded_ids()
    img_ids = list(filter(is_not_greyscale, img_ids))[:num_images]
    output = np.empty(
        (len(img_ids), ceil(PADDED_SIZE / cell_height_px),
         ceil(PADDED_SIZE / cell_width_px), bounding_box_count * 5), np.float)
    gen = y_true_generator(coco, bounding_box_count, cell_width_px,
                           cell_height_px, img_ids)
    for index in range(len(img_ids)):
        output[index, :, :, :] = next(gen)
    return output
コード例 #4
0
def test_single_abs_ann_to_rect_mask():
    import matplotlib.pyplot as plt
    from pycocotools.coco import COCO
    from file_management import ANNOTATION_FILE, get_downloaded_ids
    from annotations import get_image_annotations, plot_annotations

    coco = COCO(ANNOTATION_FILE)
    img_ids = get_downloaded_ids()
    anns = [get_image_annotations(coco, img) for img in img_ids]
    #view_img = lambda index: plot_annotations(img_ids[index], anns[index])

    ann = anns[0][0]
    plt.figure(2)
    fig, ax = plt.subplots()
    ax.imshow(single_abs_ann_to_rect_mask(640, 420, ann['bbox']))
    plt.figure(1)
    plot_annotations(img_ids[0], [ann])
コード例 #5
0
def test_abs_anns_to_heatmap():
    import matplotlib.pyplot as plt
    from matplotlib import cm
    from pycocotools.coco import COCO
    from file_management import ANNOTATION_FILE, get_downloaded_ids
    from annotations import get_image_annotations, plot_annotations

    coco = COCO(ANNOTATION_FILE)
    img_ids = get_downloaded_ids()
    anns = [get_image_annotations(coco, img) for img in img_ids]

    plt.figure(0)
    ann = anns[0]
    heatmap = abs_anns_to_heatmap(640, 640, ann)
    plt.figure(2)
    fig, ax = plt.subplots()
    ax.imshow(heatmap)  #, cmap=cm.jet)
    plt.figure(1)
    plot_annotations(img_ids[0], ann)
コード例 #6
0
ファイル: classify.py プロジェクト: prass-anyvision/QueueTime
    action='store_true',
    help=
    'If this flag is passed, run on ALL images downloaded. Note that you still must pass an image id, but it does nothing.'
)
args = vars(ap.parse_args())

# load the trained convolutional neural network
print("[INFO] loading network...")
# Load in the custom loss function
get_custom_objects().update({"QueueTime_loss": QueueTime_loss})

model = load_model(args["model"])

img_ids = args["image_ids"]
if args['all']:
    img_ids = filter(is_not_greyscale, get_downloaded_ids())

for img_id in img_ids:
    image = pad_image(get_image(img_id), PADDED_SIZE)
    image = np.expand_dims(image, axis=0)

    # classify the input image
    print("[INFO] classifying image...")
    y_pred = model.predict(image)[0]
    # post_pred = QueueTime_post_process(y_pred)
    post_pred = cnn_y_to_absolute(CELL_WIDTH, CELL_HEIGHT, y_pred)

    # filter out all scores below threshold:
    post_pred_filtered = list(
        filter(lambda ann: ann['score'] > 0.001, post_pred))
コード例 #7
0
ファイル: main.py プロジェクト: prass-anyvision/QueueTime
#!/usr/bin/env python3 -i

from pycocotools.coco import COCO
from file_management import ANNOTATION_FILE, get_downloaded_ids
from annotations import get_image_annotations, plot_annotations

coco = COCO(ANNOTATION_FILE)
img_ids = get_downloaded_ids()
anns = [get_image_annotations(coco, img) for img in img_ids]
view_img = lambda index: plot_annotations(img_ids[index], anns[index])