Ejemplo n.º 1
0
def CleanMask_v1(mask):
    # remove small objects and fill holes
    mask = (mask > .5).astype(np.int)
    mask = binary_fill_holes(mask)
    lbl_mask, numObj = scipy_label(mask)
    processed_mask = np.zeros_like(mask)
    minimum_cc_sum = .005 * np.prod(mask.shape)
    for label in range(1, numObj + 1):
        if np.sum(lbl_mask == label) > minimum_cc_sum:
            processed_mask[lbl_mask == label] = 1
    return processed_mask.astype(np.int)
    def generate_submission(self):
        test_datapath = '/data/Kaggle/test-png'
        # Get list of files
        img_files = natsorted(glob(join(test_datapath, '*.png')))
        # load files into array
        test_imgs = np.stack(
            [self.LoadImgForTest(f, self.dims) for f in img_files])[...,
                                                                    np.newaxis]
        # Get predicted masks
        tqdm.write('Getting mask predictions...')
        masks = self.model.predict(test_imgs,
                                   batch_size=self.batch_size,
                                   verbose=1)
        # data to write to csv
        submission_data = []
        # process mask
        for ind, cur_file in enumerate(tqdm(img_files)):
            cur_mask = masks[ind, ..., 0]
            cur_im = test_imgs[ind, ..., 0]
            cur_mask = (cur_mask > .5).astype(np.int)
            cur_id = splitfile(cur_file)

            processed_mask = CleanMask_v1(cur_mask)
            lbl_mask, numObj = scipy_label(processed_mask)
            if numObj > 0:
                for label in range(1, numObj + 1):
                    temp_mask = np.zeros_like(cur_mask)
                    temp_mask[lbl_mask == label] = 1
                    temp_mask = cv2.resize(temp_mask.astype(np.float),
                                           (1024, 1024))
                    temp_mask[temp_mask < .5] = 0
                    temp_mask[temp_mask > 0] = 255
                    temp_mask = np.transpose(temp_mask)
                    cur_rle = mask2rle(temp_mask, 1024, 1024)
                    submission_data.append([cur_id, cur_rle])
            else:
                cur_rle = -1
                submission_data.append([cur_id, cur_rle])

        # write to csv
        # generate time-stamped filename
        timestamp = datetime.datetime.now().strftime("%Y_%m_%d_%H%M")
        csv_filename = 'TestSubmission_{}'.format(timestamp)
        tqdm.write('Writing csv...')
        with open(csv_filename, mode='w') as f:
            writer = csv.writer(f, delimiter=',')
            for row in tqdm(submission_data):
                writer.writerow(row)

        print('Done')
Ejemplo n.º 3
0
def GetSubData(file, label, mask):
    mask = mask[..., 0]
    mask = (mask > .5).astype(np.int)
    fid = splitfile(file)

    if label == 0:
        return [fid, -1]

    processed_mask = CleanMask_v1(mask)
    lbl_mask, numObj = scipy_label(processed_mask)
    if numObj > 0:
        processed_mask[processed_mask > 0] = 255
        processed_mask = np.transpose(processed_mask)
        rle = mask2rle(processed_mask, 1024, 1024)
    else:
        rle = -1
    return [fid, rle]
Ejemplo n.º 4
0
def pipeline(img):
    img_draw_search = img.copy()
    img_draw_cars = img.copy()
    img_processed = process_image(img)
    img_search = img_processed[p_search.ystart:p_search.ystop, :, :]
    shape = img_search.shape
    img_search = cv2.resize(
        img_search,
        (np.int(shape[1] / p_search.scale), np.int(shape[0] / p_search.scale)))
    hog_features = get_hog_features(img_search, p_features)[0]
    heatmap = slide_and_search(img_search, img_draw_search, hog_features,
                               classifier, p_search, p_features)

    heatmaps.update(heatmap)
    heatmap_sum = heatmaps.get_sum()
    heatmap_thresh = heatmap_sum.copy()
    apply_threshold(heatmap_thresh, 25)
    boxes = scipy_label(heatmap_thresh)

    draw_car_boxes(img_draw_cars, boxes)

    return img_draw_search, img_draw_cars, heatmap_sum
def detect_largest_umap_areas_slice(slice_u_map, structure):

    binary_map = np.zeros(slice_u_map.shape).astype(np.bool)
    mask = slice_u_map != 0
    binary_map[mask] = True
    binary_structure = generate_binary_structure(binary_map.ndim, connectivity=2)
    bin_labels, num_of_objects = scipy_label(binary_map, binary_structure)
    blob_area_sizes = []
    blob_after_erosion_sizes = []
    if num_of_objects >= 1:
        for i in np.arange(1, num_of_objects + 1):
            binary_map = np.zeros(bin_labels.shape).astype(np.bool)
            binary_map[bin_labels == i] = 1
            blob_area_sizes.append(np.count_nonzero(binary_map))
            remaining_blob = binary_erosion(binary_map, structure)
            blob_after_erosion_sizes.append(np.count_nonzero(remaining_blob))
        blob_area_sizes = np.array(blob_area_sizes)
        blob_area_sizes[::-1].sort()
        # print(blob_after_erosion_sizes)
        blob_after_erosion_sizes = np.array(blob_after_erosion_sizes)
        blob_after_erosion_sizes[::-1].sort()

    return blob_area_sizes, blob_after_erosion_sizes
Ejemplo n.º 6
0
 def _is_contiguous(self, arr):
     _, num_features = scipy_label(arr)
     return num_features == 1
def getLargestCC(segmentation):
    labels, count = scipy_label(segmentation, structure=np.ones((3, 3, 3)))
    assert( labels.max() != 0 ) # assume at least 1 CC
    largestCC = labels == np.argmax(np.bincount(labels.flat)[1:])+ 1
    return largestCC
# Get predicted masks
print('Getting predicted masks...')
masks = model.predict(test_imgs, batch_size=batch_size, verbose=1)

# data to write to csv
submission_data = []
# process mask
tqdm.write('Processing masks...')
for ind, cur_file in enumerate(tqdm(img_files)):
    cur_mask = masks[ind, ..., 0]
    cur_im = test_imgs[ind, ..., 0]
    cur_mask = (cur_mask > .5).astype(np.int)
    cur_id = splitfile(cur_file)

    processed_mask = CleanMask_v1(cur_mask)
    lbl_mask, numObj = scipy_label(processed_mask)
    if numObj > 0:
        for label in range(1, numObj + 1):
            temp_mask = np.zeros_like(cur_mask)
            temp_mask[lbl_mask == label] = 1
            temp_mask = cv2.resize(temp_mask.astype(np.float), (1024, 1024))
            temp_mask[temp_mask < .5] = 0
            temp_mask[temp_mask > 0] = 255
            temp_mask = np.transpose(temp_mask)
            cur_rle = mask2rle(temp_mask, 1024, 1024)
            submission_data.append([cur_id, cur_rle])
    else:
        cur_rle = -1
        submission_data.append([cur_id, cur_rle])

# write to csv
Ejemplo n.º 9
0
def filter_candidates(img,
                      candidates,
                      cars,
                      heat_threshold=1,
                      max_smooth=10,
                      min_overlap=0.6,
                      min_area=1000):
    """
    Given the list of candidates detected by sliding windows,
    compute the heat map and extract the "canonical" car positions in the image
    `cars` is a list of CarPosition objects
    """
    heat = np.zeros_like(img[:, :, 0]).astype(np.float)

    # add heat
    for box in candidates:
        heat[box[0][1]:box[1][1], box[0][0]:box[1][0]] += 1

    # Apply threshold to help remove false positives
    clipped_heat_map = np.copy(heat)
    clipped_heat_map[clipped_heat_map <= heat_threshold] = 0

    # Visualize the heatmap
    heat_map = np.clip(clipped_heat_map, 0, 255)

    # Find final boxes from heatmap using label function
    labels = scipy_label(heat_map)
    bboxes = get_labeled_bboxes(labels)

    # miss by default
    for c in cars:
        c.misses += 1

    # assign the bboxes to the cars
    for bbox in bboxes:
        if bbox_area(bbox) < min_area:
            continue

        if len(cars) == 0:
            cars = [CarPosition(bbox, max_smooth=max_smooth)]
        else:
            overlaps = [c.overlap(bbox) for c in cars]
            idx = np.argmax(overlaps)
            if overlaps[idx] >= min_overlap:
                cars[idx].update(bbox)
            else:
                cars.append(CarPosition(bbox, max_smooth=max_smooth))

    # remove cars that miss a frame
    max_misses = 3
    min_count = 3
    cars = [c for c in cars if c.misses < max_misses]

    # merge near-by car positions
    merged_cars = []
    for i in range(len(cars)):
        merged = True
        for j in range(i + 1, len(cars)):
            if cars[i].overlap(cars[j].position) >= min_overlap:
                merged = False
                break
        if merged:
            merged_cars.append(cars[i])
    cars = merged_cars

    # Draw the box on the image
    car_img = np.copy(img)
    for car in cars:
        if car.count >= min_count:
            pos = car.position
            cv2.rectangle(car_img, pos[0], pos[1], (0, 0, 255), 6)

    return cars, car_img, heat
def Process_video_frame(img):
    global BoxHandler
    global FrameCount

    FrameCount = FrameCount + 1
    #print("Frame Count :", FrameCount)

    rectangle_boxes = []

    #------------------------------------------------------
    # find cars in 192 x 192 sliding windows (scale=3)
    # searching area: (ystart - ystop) = 636 - 396 = 240 = 192 x 1.25 (Chosen)
    #_, img_boxes, _, _ = \
    #find_cars(  img, conv_color='RGB2YCrCb', ystart=396, ystop=636, scale=3, svc=svc,
    #            X_scaler=X_scaler, orient=orient, pix_per_cell=pix_per_cell, cell_per_block=cell_per_block,
    #            spatial_size=spatial_size, hist_bins=hist_bins )
    #
    #rectangle_boxes.extend(img_boxes)

    # find cars in 160 x 160 sliding windows (scale=2.5)
    # searching area: (ystart - ystop) = 620 - 380 = 240 = 160 x 1.5 (Chosen)
    # searching area: (ystart - ystop) = 630 - 430 = 200 = 160 x 1.25
    _, img_boxes, _, _ = \
    find_cars(  img, conv_color='RGB2YCrCb', ystart=380, ystop=620, scale=2.5, svc=svc,
                X_scaler=X_scaler, orient=orient, pix_per_cell=pix_per_cell, cell_per_block=cell_per_block,
                spatial_size=spatial_size, hist_bins=hist_bins )

    rectangle_boxes.extend(img_boxes)

    # find cars in 128 x 128 sliding windows (scale=2)
    # searching area: (ystart - ystop) = 636 - 380 = 256 = 128 x 2
    # searching area: (ystart - ystop) = 572 - 380 = 192 = 128 x 1.5  (Chosen)
    # searching area: (ystart - ystop) = 540 - 380 = 160 = 128 x 1.25
    _, img_boxes, _, _ = \
    find_cars(  img, conv_color='RGB2YCrCb', ystart=380, ystop=540, scale=2, svc=svc,
                X_scaler=X_scaler, orient=orient, pix_per_cell=pix_per_cell, cell_per_block=cell_per_block,
                spatial_size=spatial_size, hist_bins=hist_bins )

    rectangle_boxes.extend(img_boxes)

    # find cars in 112 x 112 sliding windows (scale=1.75)
    # searching area: (ystart - ystop) = 604 - 380 = 224 = 112 x 2 (Chosen)
    # searching area: (ystart - ystop) = 508 - 368 = 140 = 112 x 1.25 (Chosen)
    _, img_boxes, _, _ = \
    find_cars(  img, conv_color='RGB2YCrCb', ystart=368, ystop=508, scale=1.75, svc=svc,
                X_scaler=X_scaler, orient=orient, pix_per_cell=pix_per_cell, cell_per_block=cell_per_block,
                spatial_size=spatial_size, hist_bins=hist_bins )

    rectangle_boxes.extend(img_boxes)

    # find cars in 96 x 96 sliding windows (scale=1.5)
    # searching area: (ystart - ystop) = 572 - 380 = 192 = 96 x 2
    # searching area: (ystart - ystop) = 524 - 380 = 144 = 96 x 1.5
    # searching area: (ystart - ystop) = 488 - 368 = 120 = 96 x 1.25 (Chosen)
    # searching area: (ystart - ystop) = 464 - 368 = 96 = 96 x 1.0
    _, img_boxes, _, _ = \
    find_cars(  img, conv_color='RGB2YCrCb', ystart=380, ystop=524, scale=1.5, svc=svc,
                X_scaler=X_scaler, orient=orient, pix_per_cell=pix_per_cell, cell_per_block=cell_per_block,
                spatial_size=spatial_size, hist_bins=hist_bins )

    rectangle_boxes.extend(img_boxes)

    # find cars in 64 x 64 sliding windows (scale=1)
    # searching area: (ystart - ystop) = 508 - 380 = 128 = 64 x 2
    # searching area: (ystart - ystop) = 496 - 400 = 96 = 64 x 1.5 (Chosen)
    # searching area: (ystart - ystop) = 460 - 380 = 80 = 64 x 1.25
    _, img_boxes, _, _ = \
    find_cars(  img, conv_color='RGB2YCrCb', ystart=400, ystop=496, scale=1, svc=svc,
                X_scaler=X_scaler, orient=orient, pix_per_cell=pix_per_cell, cell_per_block=cell_per_block,
                spatial_size=spatial_size, hist_bins=hist_bins )

    rectangle_boxes.extend(img_boxes)

    #-------------------------------------------------------
    # Debug: Generate video with raw boxes drawn
    if (boDebugMode == True) and (boOutputRawBoxes == True):
        box_marked_img = draw_boxes(img, rectangle_boxes)
        return box_marked_img
    #------------------------------------------------------
    BoxHandler.add_boxes(rectangle_boxes)
    BoxHandler.add_heatmap(rectangle_boxes, img)

    heatmap = np.zeros_like(img[:, :, 0]).astype(np.float)

    heatmap = add_weight_to_heatmap(heatmap, BoxHandler.get_all_boxes())

    heatmap = apply_threshold(heatmap, BoxHandler.get_bboxes_threshold())

    heatmap_thresholds = BoxHandler.get_framecount_threshold()
    #plt.imshow(heatmap_thresholds, cmap='hot')
    #plt.show()

    heatmap[(heatmap_thresholds == 0)] = 0

    #-------------------------------------------------------
    # Debug: Generate video with Heat Map + Raw Boxes
    if boDebugMode == True:
        heatmap_show = heatmap

        zeroimage = np.zeros_like(img[:, :, 0]).astype(np.float)

        heatmap_RGB = cv2.merge([heatmap_show * 3, zeroimage, zeroimage])
        heatmap_RGB = heatmap_RGB.astype(np.uint8)
        #plt.imshow(heatmap_RGB)
        #plt.show()

        debug_image = draw_boxes(img,
                                 rectangle_boxes,
                                 color=(0, 0, 255),
                                 thick=2)
        debug_image = cv2.addWeighted(debug_image, 1, heatmap_RGB, 0.8, 0)

        labels = scipy_label(heatmap)
        debug_image = draw_labeled_bboxes(debug_image,
                                          labels,
                                          color=(255, 0, 0),
                                          thick=3)
        return debug_image
    #-------------------------------------------------------
    else:
        labels = scipy_label(heatmap)
        refined_marked_img = draw_labeled_bboxes(img, labels)

    labels = scipy_label(heatmap)
    refined_marked_img = draw_labeled_bboxes(img, labels)

    return refined_marked_img
Ejemplo n.º 11
0
    images = [box_marked_img, heatmap]
    titles = ["Marked Image", "Original Heat Map"]
    #plt_fig = plt.figure(figsize=(10.8, 3), dpi=200)
    #display_images_var1(plt_fig, 1, 2, images, titles)
    f, ax = plt.subplots(1, 2, figsize=(10.4, 3.15))
    f.tight_layout()
    ax[0].imshow(images[0])
    ax[0].set_title(titles[0], fontsize=14)
    ax[1].imshow(images[1], cmap='hot')
    ax[1].set_title(titles[1], fontsize=14)
    plt.subplots_adjust(left=0.06, right=0.97, top=0.98, bottom=0.02)

    #-------------------------------------------------------
    heatmap = apply_threshold(heatmap, 2)
    labels = scipy_label(heatmap)
    refined_marked_img = draw_labeled_bboxes(img, labels)

    images = [refined_marked_img, heatmap]
    titles = ["Marked Image", "Thresholded Heat Map"]
    #plt_fig = plt.figure(figsize=(10.8, 3), dpi=200)
    #display_images_var1(plt_fig, 1, 2, images, titles)
    f, ax = plt.subplots(1, 2, figsize=(10.4, 3.15))
    f.tight_layout()
    ax[0].imshow(images[0])
    ax[0].set_title(titles[0], fontsize=14)
    ax[1].imshow(images[1], cmap='hot')
    ax[1].set_title(titles[1], fontsize=14)
    plt.subplots_adjust(left=0.06, right=0.97, top=0.98, bottom=0.02)

    mpimg.imsave(