예제 #1
0
def predict_data_set(model, save_path, data, label_dict, conf_thresh):
    """
    Loop through (test) dataset, make bounding box predictions on each image, and save the updated images
    """
    for i in tqdm(range(data.shape[0])):
        # Original (O.G.) image
        og_img = np.expand_dims(data[i, :, :, CONTEXT_FRAMES], axis=2)
        og_img = np.tile(og_img, (1, 1, 3))

        # Make prediction
        inputs = tf.expand_dims(data[i, :, :, :], axis=0)
        prediction = model.predict(inputs, steps=1)
        boxes = convert_matrix_to_dict(prediction, conf_thresh)

        # Plot predicted boxes
        pred_img = draw_boxes(og_img * 255, boxes[0])
        pred_img = np.squeeze(pred_img)

        # Plot true boxes
        # Replace class values which can be 0, 1, or 2 for true labels with 1 indicating 100% confidence
        og_labels = [(np.array(boxes[:-1]) / DOWNSCALE_FACTOR).tolist() + [1]
                     for boxes in label_dict[i]]
        true_img = draw_boxes(og_img * 255, og_labels)
        true_img = np.squeeze(true_img)

        # Plot and save both predicted and true images side by side
        compare_img = np.vstack((pred_img, true_img))
        compare_img = compare_img.astype('uint8')
        save_img = Image.fromarray(compare_img, 'RGB')
        save_img.save(save_path + str(i) + '.png', 'PNG')
예제 #2
0
def bbox_detector_single_frame(detector, frame):
    img = Image.fromarray(frame)
    pil_image = ImageOps.fit(img, (256, 256), Image.ANTIALIAS)
    pil_image_rgb = pil_image.convert("RGB")
    converted_img = tf.image.convert_image_dtype(
        np.array(img), dtype=tf.float32)[tf.newaxis, ...]
    print('converted_img - type: {} shape: {}'.format(type(converted_img), converted_img.shape))
    result = detector(converted_img)
    result = {key: value.numpy() for key, value in result.items()}
    print("Found %d objects." % len(result["detection_scores"]))
    detection_class_entities = result["detection_class_entities"]
    detection_scores = result['detection_scores']
    detection_boxes = result['detection_boxes']

    person_detection_scores = []
    person_class_entities = []
    person_bounding_boxes = []
    for i, entity in enumerate(detection_class_entities):
        if detection_class_entities[i] == b'Person':
            person_class_entities.append(detection_class_entities[i])
            person_bounding_boxes.append(detection_boxes[i])
            person_detection_scores.append(detection_scores[i])

    image_with_boxes = draw_boxes(
        np.array(img),
        np.array(person_bounding_boxes),
        np.array(person_class_entities),
        np.array(person_detection_scores))
    display_image(image_with_boxes)

    return result
def gen_image(I):
    global recorrelate, corr, heatmaps, map_out
    if recorrelate:
        heatmaps = np.zeros((len(filters_ind), I.shape[0], I.shape[1]), dtype=np.float32)
        for j, i in enumerate(filters_ind):
            heatmaps[j] = correlate(I, filters[i], step=2)
        recorrelate = False
        corr = np.max(heatmaps, axis=0)
        map_out = CMAP(corr)[:,:,:3] * 255
        print(np.max(corr))

    boxes = []
    output = np.zeros(I.shape[:2], dtype=np.float32)
    for j, i in enumerate(filters_ind):
        mask = (heatmaps[j] > heat_thresh[i]) * corr
        output = np.maximum(output, mask)
        mask_boxes = get_boxes(mask)
        boxes += min_box_size(mask_boxes,
                            filters[i].shape[0], filters[i].shape[1],
                            I.shape[0], I.shape[1])
    I = draw_boxes(I, boxes)

    left = np.concatenate((I, gray2rgb(output > 0) * I), 0)
    right = np.concatenate((gray2rgb(corr * 255).astype(np.uint8), 
                            map_out.astype(np.uint8)), 0)

    height_diff = right.shape[0] - compound_filter.shape[0]
    filter_image = np.pad(compound_filter, [(0,height_diff), (0,0), (0,0)], mode='constant')
    return np.concatenate((left, right, filter_image), 1)
예제 #4
0
 def __call__(self, img, show_plots=False):
     hits = self.get_hits(img)
     heat = make_heatmap(img.shape[0:2], hits)
     if self._last_heatmap is None:
         self._last_heatmap = heat
     filtered_heat = (1 -
                      self._alpha) * self._last_heatmap + self._alpha * heat
     self._last_heatmap = filtered_heat
     binary = filtered_heat >= self._threshold
     labels = label_image(binary)
     boxes = []
     for i in range(labels[1]):
         y_points, x_points = np.where(labels[0] == i + 1)
         box = ((np.min(x_points), np.min(y_points)), (np.max(x_points),
                                                       np.max(y_points)))
         width = box[1][0] - box[0][0]
         height = box[1][1] - box[0][1]
         if width >= 32 and height >= 32:
             boxes.append(box)
     if show_plots:
         f, ((a0, a1), (a2, a3)) = plt.subplots(2, 2)
         a0.set_title('Raw Hits')
         a0.imshow(draw_boxes(rgb(img, self._cspace), hits))
         a1.set_title('Heatmap')
         a1.imshow(heat.astype(np.float32) / np.max(heat), cmap='gray')
         a2.set_title('Thresholded Heatmap')
         a2.imshow(binary, cmap='gray')
         a3.set_title('Label Image')
         a3.imshow(labels[0], cmap='gray')
     return boxes
예제 #5
0
def show_results(result, labels, scores=None, show_size=False):
    from util import draw_boxes, get_image_from_s3
    mid2label = joblib.load(os.path.join('data', 'mid2label.joblib'))
    for k in range(len(result)):
        print(result[k][0])
        print(
            sorted([(mid2label[labels[matched_index]], mid2label[label], score)
                    for score, matched_index, label in zip(
                        result[k][8], result[k][1], result[k][4])],
                   key=lambda x: -x[2]))
        print([mid2label[label] for label in result[k][5]])
        print(result[k][9])
        image = get_image_from_s3(result[k][0])
        draw_boxes(image,
                   result[k][2],
                   result[k][4],
                   scores=scores,
                   uid=result[k][3],
                   show=True,
                   show_size=show_size)
예제 #6
0
def predict_data_point(model, data, label_dict, index, conf_thresh):
    """
    Predict bounding boxes around hair follicles for a single data point at index
    """
    og_img = np.expand_dims(data[index, :, :, CONTEXT_FRAMES], axis=2)
    og_img = np.tile(og_img, (1, 1, 3))

    # Add batch dimension
    inputs = tf.expand_dims(data[index, :, :, :], axis=0)

    # Feed the input to the model
    prediction = model.predict(inputs, steps=1)

    # Transform network output ONLY for YOLO loss!!!
    # prediction = sigmoid(prediction)

    # Convert predictions from matrix form to dictionary
    boxes = convert_matrix_to_dict(prediction, conf_thresh)
    print('Predicted boxes: ', boxes)

    # fig, axes = plt.subplots(nrows=2, ncols=1)

    # Plot predicted boxes
    pred_img = draw_boxes(og_img, boxes[0])
    pred_img = np.squeeze(pred_img)
    # axes[0].imshow(pred_img)

    # Plot true bounding boxes
    # Replace class values which can be 0, 1, or 2 for true labels with 1 indicating 100% confidence
    og_labels = [(np.array(boxes[:-1]) / DOWNSCALE_FACTOR).tolist() + [1]
                 for boxes in label_dict[index]]
    true_img = draw_boxes(og_img, og_labels)
    true_img = np.squeeze(true_img)
    # axes[1].imshow(true_img)

    # Plot predicted and true boxes on same plot for side by side comparison
    compare_img = np.vstack((pred_img, true_img))
    plt.imshow(compare_img)
    plt.show()
예제 #7
0
파일: dataset.py 프로젝트: taylorshin/Hairy
def verify_data_generator(generator):
    """
    Verifies that the data is correctly generated and augmented
    """
    for i, (data_batch, label_batch) in enumerate(generator):
        print('BATCH {}'.format(i))
        print('DATA BATCH: ', data_batch.shape)
        print('LABELS BATCH: ', label_batch.shape)
        for j in range(BATCH_SIZE):
            print('IMG {}'.format(j))
            img = data_batch[j, :, :, CONTEXT_FRAMES]
            img = img[:, :, np.newaxis]
            img = np.tile(img, (1, 1, 3))
            boxes = convert_matrix_to_dict(label_batch,
                                           conf_thresh=CONFIDENCE_THRESHOLD)
            box_img = draw_boxes(img, boxes[j])
            box_img = np.squeeze(box_img)
            plt.imshow(box_img)
            plt.show()
예제 #8
0
        box_coords = np.zeros_like(boxes)
        box_coords[:, 0] = boxes[:, 0] * height
        box_coords[:, 1] = boxes[:, 1] * width
        box_coords[:, 2] = boxes[:, 2] * height
        box_coords[:, 3] = boxes[:, 3] * width
        return box_coords

    def predict(self, image, confidence_cutoff=0.4):
        image_np = np.expand_dims(np.asarray(image, dtype=np.uint8), 0)
        result = self.predictor_fn({'inputs': image_np})
        boxes = np.squeeze(result["detection_boxes"])
        scores = np.squeeze(result["detection_scores"])
        classes = np.squeeze(result["detection_classes"])
        boxes, scores, classes = self.filter_boxes(confidence_cutoff, boxes,
                                                   scores, classes)

        width, height = image.size
        box_coords = self.to_image_coords(boxes, height, width)
        return box_coords, classes


if __name__ == "__main__":
    tld = traffic_light_detector()
    image = Image.open('test_images/sample1.jpg')
    box_coords, classes = tld.predict(image)
    util.draw_boxes(image, box_coords, thickness=2)

    plt.imshow(np.asarray(image, dtype=np.uint8))
    plt.title(classes)
    plt.show()
    # Load input images
    data_path = 'data/RedLights2011_Medium'

    # Load predictions
    preds_path = 'data/hw02_preds'
    preds_file = 'preds_test.json'  # TODO: Change this as needed
    data = None
    with open(os.path.join(preds_path, preds_file), 'r') as f:
        data = json.load(f)
    assert data is not None

    # Draw and save bounding boxes for each file
    i = 0
    n = len(data)
    printProgressBar(0, n, prefix='Progress:', suffix='Complete', length=50)
    for file_name, boxes in data.items():
        # Read image using PIL
        I = np.asarray(Image.open(os.path.join(data_path, file_name)))
        result = draw_boxes(I, boxes)

        # Save new image
        Image.fromarray(result).save(os.path.join(out_path, file_name))

        printProgressBar(i,
                         n,
                         prefix='Progress:',
                         suffix='Complete',
                         length=50)
        i += 1
예제 #10
0
    os.path.join('..', 'open_images_', 'validation-images-with-rotation.csv'))
meta.set_index('ImageID', inplace=True)

print('data loaded')

for index, row in meta.iterrows():
    if index not in id2url:
        continue

    #if not np.isnan(row.Rotation):
    if row.Rotation != 270:
        continue
    '''
    image_url = id2url[index]
    try:
        image = get_image(image_url, rotate=id2rot[index])
    except:
        print('error downloading: ' + image_url)
        continue
    '''

    image = get_image_from_s3(index)

    result = get_image_boxes(objects, index)
    print(row.Rotation)
    print(result)

    image_with_boxes = draw_boxes(image, result["detection_boxes"],
                                  result["detection_class_names"])
    display_image(image_with_boxes)
예제 #11
0
             zip(result["detection_scores"], result["detection_boxes"],
                 result["detection_class_names"]),
             reverse=True)
         if x[0] > suppression_threshold and box_area(x[1]) >= min_area
     ][:suppression_topk]))
 if len(results) == 0:
     shutil.rmtree(folder)
     print('No detected objects found above thresholds - skipping')
     continue
 result["detection_scores"], result["detection_boxes"], result[
     "detection_class_names"] = results
 image_with_boxes = draw_boxes(image,
                               result["detection_boxes"],
                               result["detection_class_names"],
                               scores=result["detection_scores"],
                               style='new',
                               save_path=os.path.join(
                                   folder, 'input_overlay.jpg'),
                               show=show_overlay,
                               show_size=show_size)
 if get_twitter:
     timestamp = timestamps[cnt]
 else:
     timestamp = datetime.utcnow().isoformat(' ')
 height, width = image.shape[:2]
 input_list = [{
     'bbox': [
         int(box[1] * width),
         int(box[0] * height),
         int(box[3] * width),
         int(box[2] * height)