def objective(basic_grid, contours_map, delta_x, delta_y, angle, scale, shear):
    print 'args: ' + str(delta_x) + '  ' + str(delta_y) + '  ' + str(
        angle) + '  ' + str(scale)
    transformed_grid = get_transformed_grid(basic_grid, delta_x, delta_y,
                                            angle, scale, shear)
    gaussians = np.full((np.size(contours_map, 0), np.size(contours_map, 1)),
                        fill_value=0,
                        dtype=np.float64)
    for x, y in transformed_grid:
        gaussian_sigma = 70 * scale  # TODO: rethink the "* scale"
        if x < 0 or y < 0:
            continue
        gaussian = get_gaussian_on_image(x, y, gaussian_sigma,
                                         contours_map.shape[1],
                                         contours_map.shape[0])
        gaussians = np.add(gaussians, gaussian)
    viz_utils.show_image('gaussians', gaussians)
    return np.sum(np.multiply(contours_map, gaussians))
import cv2

from framework import viz_utils
import computer_vision.typical_image_alignment as align

if __name__ == '__main__':

    import content.data_pointers.lavi_april_18.dji as dji_data

    obstacle = dji_data.snapshots_80_meters['15-17-1']
    clear = dji_data.snapshots_80_meters['15-10-1']

    img_obstacle = cv2.imread(obstacle.path)
    img_clear = cv2.imread(clear.path)
    img_obstacle_reg, h = align.orb_based_registration(img_obstacle, img_clear)
    # viz_utils.show_image('imreg', img_obstacle_reg)
    # img_diff = img_obstacle_reg - img_clear
    img_gray_diff = cv2.subtract(
        cv2.cvtColor(img_obstacle_reg, cv2.COLOR_BGR2GRAY),
        cv2.cvtColor(img_clear, cv2.COLOR_BGR2GRAY))
    viz_utils.show_image('imreg_diff', img_gray_diff)
    # img_diff_denoised = cv2.fastNlMeansDenoising(img_gray_diff, None, 10,10,7)
    # viz_utils.show_image('imreg_denoise', img_diff_denoised)
    # thresh = cv2.adaptiveThreshold(img_gray_diff, 0, adaptiveMethod=cv2.ADAPTIVE_THRESH_MEAN_C, thresholdType=cv2.THRESH_BINARY_INV, blockSize=21, C=2)
    # viz_utils.show_image('imreg_denoise', thresh)

    se = cv2.getStructuringElement(cv2.MORPH_RECT, (15, 15))
    out = cv2.morphologyEx(img_gray_diff, cv2.MORPH_CLOSE, se)
    viz_utils.show_image('imreg_denoise', out)
    dji_data.snapshots_80_meters.values()
]
viz_mode = True
N = 6

if __name__ == '__main__':
    idx = 0
    for image_path in image_paths_list:

        idx += 1
        if idx != 4:
            continue
        # Read image
        image = cv2.imread(image_path)
        if viz_mode:
            viz_utils.show_image('image', image)

        # Crop central ROI
        cropped_image_size = np.min([image.shape[0], image.shape[1]]) * 0.9
        cropped_image, crop_origin, _ = cv_utils.crop_region(
            image,
            x_center=image.shape[1] / 2,
            y_center=image.shape[0] / 2,
            x_pixels=cropped_image_size,
            y_pixels=cropped_image_size)
        if viz_mode:
            viz_utils.show_image('cropped image', cropped_image)

        # Estimate orchard orientation
        orientation = trunks_detection_old_cv.estimate_rows_orientation(
            cropped_image)
예제 #4
0
        dji.snapshots_80_meters[obstacle_in_3_4_image_key].path)
    obstacle_in_4_5_image = cv2.imread(
        dji.snapshots_80_meters[obstacle_in_4_5_image_key].path)
    obstacle_in_5_6_image = cv2.imread(
        dji.snapshots_80_meters[obstacle_in_5_6_image_key].path)

    baseline_gray_image = cv2.cvtColor(baseline_image, cv2.COLOR_BGR2GRAY)
    obstacle_in_3_4_gray_image = cv2.cvtColor(obstacle_in_3_4_image,
                                              cv2.COLOR_BGR2GRAY)
    obstacle_in_4_5_gray_image = cv2.cvtColor(obstacle_in_4_5_image,
                                              cv2.COLOR_BGR2GRAY)
    obstacle_in_5_6_gray_image = cv2.cvtColor(obstacle_in_5_6_image,
                                              cv2.COLOR_BGR2GRAY)

    if viz_mode:
        viz_utils.show_image('baseline', baseline_gray_image)
        viz_utils.show_image('obstacle_in_3_4', obstacle_in_3_4_gray_image)
        viz_utils.show_image('obstacle_in_4_5', obstacle_in_4_5_gray_image)
        viz_utils.show_image('obstacle_in_5_6', obstacle_in_5_6_gray_image)

    with open(metadata_baseline_path) as f:
        metadata_baseline = json.load(f)
    with open(metadata_obstacle_in_3_4_path) as f:
        metadata_obstacle_in_3_4 = json.load(f)
    with open(metadata_obstacle_in_4_5_path) as f:
        metadata_obstacle_in_4_5 = json.load(f)
    with open(metadata_obstacle_in_5_6_path) as f:
        metadata_obstacle_in_5_6 = json.load(f)

    points_baseline = metadata_baseline['results']['1']['pattern_points']
    points_obstacle_in_3_4 = metadata_obstacle_in_3_4['results']['1'][
예제 #5
0
    def heuristic_cost_estimate(self, current, goal):
        (x1, y1) = current
        (x2, y2) = goal
        return math.hypot(x2 - x1, y2 - y1)

    def distance_between(self, n1, n2):
        return 1 # TODO: change

    def neighbors(self, node):
        curr_x, curr_y = node
        def is_free(x, y):
            if 0 <= x < self.map_image.shape[1] and 0 <= y < self.map_image.shape[0]:
                if self.map_image[y, x] == 0:
                    return True
            return False
        return [(x, y) for (x, y) in [(curr_x, curr_y - 1), (curr_x, curr_y + 1),
                                      (curr_x - 1, curr_y), (curr_x + 1, curr_y)] if is_free(x,y)]


if __name__ == '__main__':
    map_image = cv2.cvtColor(cv2.imread(r'/home/omer/Downloads/dji_15-53-1_map.pgm'), cv2.COLOR_RGB2GRAY)
    points = cv_utils.sample_pixel_coordinates(map_image, multiple=True)
    start = points[0]
    goal = points[1]
    path_plan = PathPlan(map_image)
    path = path_plan.astar(start, goal)
    for point in path:
        cv2.circle(map_image, point, radius=3, color=255, thickness=-1)
    viz_utils.show_image('path', map_image)
    print ('end')
    print cv_utils.calculate_image_similarity(image1,
                                              warped_image_gt2,
                                              method='mse')
    print('')
    warped_image_gt1 = cv_utils.warp_image(image=image1,
                                           points_in_image=markers1,
                                           points_in_baseline=markers2)
    print cv_utils.calculate_image_similarity(image2,
                                              warped_image_gt1,
                                              method='ssim')
    print cv_utils.calculate_image_similarity(image2,
                                              warped_image_gt1,
                                              method='mse')

    if viz_mode:
        viz_utils.show_image('image1', image1)
        viz_utils.show_image('image2', image2)

    # TODO: consider playing with the crop_ratio for calculate_image_diff
    print '\nmine:'
    warped_image2 = cv_utils.warp_image(image=image2,
                                        points_in_image=points2,
                                        points_in_baseline=points1)
    if viz_mode:
        viz_utils.show_image('warped image', warped_image2)
    print cv_utils.calculate_image_similarity(image1,
                                              warped_image2,
                                              method='ssim')
    print cv_utils.calculate_image_similarity(image1,
                                              warped_image2,
                                              method='mse')
    def task(self, **kwargs):

        viz_mode = kwargs.get('viz_mode')
        # Read image
        image = cv2.imread(self.data_sources)
        cv2.imwrite(os.path.join(self.repetition_dir, 'image.jpg'), image)
        if viz_mode:
            viz_utils.show_image('image', image)

        # Save contours mask
        _, contours_mask = segmentation.extract_canopy_contours(image)
        cv2.imwrite(os.path.join(self.repetition_dir, 'contours_mask.jpg'), contours_mask)

        # Crop central ROI
        cropped_image_size = np.min([image.shape[0], image.shape[1]]) * self.params['crop_ratio']
        cropped_image, crop_origin, _ = cv_utils.crop_region(image, x_center=image.shape[1] / 2, y_center=image.shape[0] / 2,
                                                             x_pixels=cropped_image_size, y_pixels=cropped_image_size)
        cv2.imwrite(os.path.join(self.repetition_dir, 'cropped_image.jpg'), cropped_image)
        if viz_mode:
            viz_utils.show_image('cropped image', cropped_image)

        # Estimate orchard orientation
        orientation = trunks_detection_old_cv.estimate_rows_orientation(cropped_image)
        rotation_mat = cv2.getRotationMatrix2D((cropped_image.shape[1] / 2, cropped_image.shape[0] / 2), orientation * (-1), scale=1.0)
        vertical_rows_image = cv2.warpAffine(cropped_image, rotation_mat, (cropped_image.shape[1], cropped_image.shape[0]))
        cv2.imwrite(os.path.join(self.repetition_dir, 'vertical_rows.jpg'), vertical_rows_image)
        if viz_mode:
            viz_utils.show_image('vertical rows', vertical_rows_image)

        # Get tree centroids
        centroids, rotated_centroids, aisle_centers, slices_and_cumsums = trunks_detection_old_cv.find_tree_centroids(cropped_image, correction_angle=orientation * (-1))
        vertical_rows_aisle_centers_image = cv_utils.draw_lines_on_image(vertical_rows_image, lines_list=[((center, 0), (center, vertical_rows_image.shape[0]))
                                                                         for center in aisle_centers], color=(0, 0, 255))
        cv2.imwrite(os.path.join(self.repetition_dir, 'vertical_rows_aisle_centers.jpg'), vertical_rows_aisle_centers_image)
        slice_image, cumsum_vector = slices_and_cumsums[len(slices_and_cumsums) / 2]
        cv2.imwrite(os.path.join(self.repetition_dir, 'vertical_row_slice.jpg'), slice_image)
        fig = plt.figure()
        plt.plot(cumsum_vector)
        plt.savefig(os.path.join(self.repetition_dir, 'cumsum_vector.jpg'))
        vertical_rows_centroids_image = cv_utils.draw_points_on_image(vertical_rows_image, itertools.chain.from_iterable(rotated_centroids), color=(0, 0, 255))
        cv2.imwrite(os.path.join(self.repetition_dir, 'vertical_rows_centroids.jpg'), vertical_rows_centroids_image)
        if viz_mode:
            viz_utils.show_image('vertical rows aisle centers', vertical_rows_aisle_centers_image)
            viz_utils.show_image('vertical rows centroids', vertical_rows_centroids_image)

        # Estimate grid parameters
        grid_dim_x, grid_dim_y = trunks_detection_old_cv.estimate_grid_dimensions(rotated_centroids)
        shear, drift_vectors = trunks_detection_old_cv.estimate_shear(rotated_centroids)
        drift_vectors_image = cv_utils.draw_lines_on_image(vertical_rows_centroids_image, drift_vectors, color=(255, 255, 0))
        cv2.imwrite(os.path.join(self.repetition_dir, 'drift_vectors.jpg'), drift_vectors_image)
        if viz_mode:
            viz_utils.show_image('drift vectors', drift_vectors_image)

        # Get essential grid
        essential_grid = trunks_detection_old_cv.get_essential_grid(grid_dim_x, grid_dim_y, shear, orientation, n=self.params['grid_size_for_optimization'])
        essential_grid_shape = np.max(essential_grid, axis=0) - np.min(essential_grid, axis=0)
        margin = essential_grid_shape * 0.2
        essential_grid_shifted = [tuple(elem) for elem in np.array(essential_grid) - np.min(essential_grid, axis=0) + margin / 2]
        estimated_grid_image = np.full((int(essential_grid_shape[1] + margin[1]), int(essential_grid_shape[0] + margin[0]), 3), 0, dtype=np.uint8)
        estimated_grid_image = cv_utils.draw_points_on_image(estimated_grid_image, essential_grid_shifted, color=(255, 0, 0))
        cv2.imwrite(os.path.join(self.repetition_dir, 'estimated_grid.jpg'), estimated_grid_image)
        if viz_mode:
            viz_utils.show_image('estimated grid', estimated_grid_image)

        # Find translation of the grid
        positioned_grid, translation, drift_vectors = trunks_detection_old_cv.find_min_mse_position(centroids, essential_grid, cropped_image.shape[1], cropped_image.shape[0])
        if positioned_grid is None:
            raise ExperimentFailure
        positioned_grid_image = cv_utils.draw_points_on_image(cropped_image, positioned_grid, color=(255, 0, 0), radius=20)
        positioned_grid_image = cv_utils.draw_points_on_image(positioned_grid_image, centroids, color=(0, 0, 255), radius=10)
        positioned_grid_image = cv_utils.draw_lines_on_image(positioned_grid_image, drift_vectors, color=(255, 255, 0), thickness=3)
        cv2.imwrite(os.path.join(self.repetition_dir, 'positioned_grid.jpg'), positioned_grid_image)
        if viz_mode:
            viz_utils.show_image('positioned grid', positioned_grid_image)

        # Estimate sigma as a portion of intra-row distance
        sigma = grid_dim_y * self.params['initial_sigma_to_dim_y_ratio']

        # Get a grid of gaussians
        grid = trunks_detection_old_cv.get_grid(grid_dim_x, grid_dim_y, translation, orientation, shear, n=self.params['grid_size_for_optimization'])
        gaussians_filter = trunks_detection_old_cv.get_gaussians_grid_image(grid, sigma, cropped_image.shape[1], cropped_image.shape[0])
        cv2.imwrite(os.path.join(self.repetition_dir, 'gaussians_filter.jpg'), 255.0 * gaussians_filter)
        _, contours_mask = segmentation.extract_canopy_contours(cropped_image)
        filter_output = np.multiply(gaussians_filter, contours_mask)
        cv2.imwrite(os.path.join(self.repetition_dir, 'filter_output.jpg'), filter_output)
        if viz_mode:
            viz_utils.show_image('gaussians filter', gaussians_filter)
            viz_utils.show_image('filter output', filter_output)

        # Optimize the grid
        optimized_grid, optimized_grid_args = trunks_detection_old_cv.optimize_grid(grid_dim_x, grid_dim_y, translation, orientation, shear, sigma, cropped_image, n=self.params['grid_size_for_optimization'])
        optimized_grid_dim_x, optimized_grid_dim_y, optimized_translation_x, optimized_translation_y, optimized_orientation, optimized_shear, optimized_sigma = optimized_grid_args
        self.results[self.repetition_id] = {'optimized_grid_dim_x': optimized_grid_dim_x,
                                            'optimized_grid_dim_y': optimized_grid_dim_y,
                                            'optimized_translation_x': optimized_translation_x,
                                            'optimized_translation_y': optimized_translation_y,
                                            'optimized_orientation': optimized_orientation,
                                            'optimized_shear': optimized_shear,
                                            'optimized_sigma': optimized_sigma}
        optimized_grid_image = cv_utils.draw_points_on_image(cropped_image, optimized_grid, color=(0, 255, 0))
        optimized_grid_image = cv_utils.draw_points_on_image(optimized_grid_image, positioned_grid, color=(255, 0, 0))
        cv2.imwrite(os.path.join(self.repetition_dir, 'optimized_grid.jpg'), optimized_grid_image)
        if viz_mode:
            viz_utils.show_image('optimized grid', optimized_grid_image)

        # Extrapolate full grid on the entire image
        full_grid_np = trunks_detection_old_cv.extrapolate_full_grid(optimized_grid_dim_x, optimized_grid_dim_y, optimized_orientation, optimized_shear,
                                                                     base_grid_origin=np.array(optimized_grid[0]) + np.array(crop_origin),
                                                                     image_width=image.shape[1], image_height=image.shape[0])
        full_grid_image = cv_utils.draw_points_on_image(image, [elem for elem in full_grid_np.flatten() if type(elem) is tuple], color=(255, 0, 0))
        cv2.imwrite(os.path.join(self.repetition_dir, 'full_grid.jpg'), full_grid_image)
        if viz_mode:
            viz_utils.show_image('full grid', full_grid_image)


        # Match given orchard pattern to grid
        full_grid_scores_np = trunks_detection_old_cv.get_grid_scores_array(full_grid_np, image, optimized_sigma)
        orchard_pattern_np = self.params['orchard_pattern']
        pattern_origin, pattern_match_score = trunks_detection_old_cv.fit_pattern_on_grid(full_grid_scores_np, orchard_pattern_np)
        if pattern_origin is None:
            raise ExperimentFailure
        self.results[self.repetition_id]['pattern_match_score'] = pattern_match_score
        trunk_coordinates_np = full_grid_np[pattern_origin[0] : pattern_origin[0] + orchard_pattern_np.shape[0],
                                            pattern_origin[1] : pattern_origin[1] + orchard_pattern_np.shape[1]]
        trunk_points_list = trunk_coordinates_np[orchard_pattern_np != -1]
        trunk_coordinates_np[orchard_pattern_np == -1] = np.nan
        semantic_trunks_image = cv_utils.draw_points_on_image(image, trunk_points_list, color=(255, 255, 255))
        for i in range(trunk_coordinates_np.shape[0]):
            for j in range(trunk_coordinates_np.shape[1]):
                if np.any(np.isnan(trunk_coordinates_np[(i, j)])):
                    continue
                label_coordinates = (int(trunk_coordinates_np[(i, j)][0]) + 15, int(trunk_coordinates_np[(i, j)][1]) + 15)
                tree_label = '%d/%s' % (j + 1, chr(65 + (trunk_coordinates_np.shape[0] - 1 - i)))
                cv2.putText(semantic_trunks_image, tree_label, label_coordinates, fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                            fontScale=2, color=(255, 255, 255), thickness=8, lineType=cv2.LINE_AA)
        cv2.imwrite(os.path.join(self.repetition_dir, 'semantic_trunks.jpg'), semantic_trunks_image)
        if viz_mode:
            viz_utils.show_image('semantic trunks', semantic_trunks_image)

        # Refine trunk locations
        refined_trunk_coordinates_np = trunks_detection_old_cv.refine_trunk_locations(image, trunk_coordinates_np, optimized_sigma,
                                                                                      optimized_grid_dim_x, optimized_grid_dim_x)
        refined_trunk_points_list = refined_trunk_coordinates_np[orchard_pattern_np != -1]
        refined_trunk_coordinates_np[orchard_pattern_np == -1] = np.nan
        refined_semantic_trunks_image = cv_utils.draw_points_on_image(image, refined_trunk_points_list, color=(255, 255, 255))
        semantic_trunks = {}
        for i in range(refined_trunk_coordinates_np.shape[0]):
            for j in range(refined_trunk_coordinates_np.shape[1]):
                if np.any(np.isnan(refined_trunk_coordinates_np[(i, j)])):
                    continue
                trunk_coordinates = (int(refined_trunk_coordinates_np[(i, j)][0]), int(refined_trunk_coordinates_np[(i, j)][1]))
                label_coordinates = tuple(np.array(trunk_coordinates) + np.array([15, 15]))
                semantic_trunks['%d/%s' % (j + 1, chr(65 + (trunk_coordinates_np.shape[0] - 1 - i)))] = trunk_coordinates
                tree_label = '%d/%s' % (j + 1, chr(65 + (refined_trunk_coordinates_np.shape[0] - 1 - i)))
                cv2.putText(refined_semantic_trunks_image, tree_label, label_coordinates, fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                            fontScale=2, color=(255, 255, 255), thickness=8, lineType=cv2.LINE_AA)
        cv2.imwrite(os.path.join(self.repetition_dir, 'refined_semantic_trunks.jpg'), refined_semantic_trunks_image)
        self.results[self.repetition_id]['semantic_trunks'] = semantic_trunks
        if viz_mode:
            viz_utils.show_image('refined semantic trunks', refined_semantic_trunks_image)
import cv2
import json

from framework import cv_utils
from framework import viz_utils
from computer_vision import maps_generation
from content.data_pointers.lavi_april_18 import dji

if __name__ == '__main__':
    with open(dji.snapshots_60_meters_markers_locations_json_path) as f:
        markers_locations = json.load(f)
    for key, data_descriptor in dji.snapshots_60_meters.items():
        image = cv2.imread(data_descriptor.path)
        map_image = maps_generation.generate_canopies_map(image)
        map_image = cv2.cvtColor(map_image, cv2.COLOR_GRAY2RGB)
        map_image = cv_utils.mark_rectangle_on_image(map_image,
                                                     markers_locations[key])
        cv_utils.mark_bounding_box(map_image,
                                   markers_locations[key],
                                   expand_ratio=0.1)
        viz_utils.show_image(key, map_image)
예제 #9
0
                                             x_pixels=2700,
                                             y_pixels=1700)
        contours, contours_mask = canopy_contours.extract_canopy_contours(
            cropped_image)
        cv2.drawContours(cropped_image,
                         contours,
                         contourIdx=-1,
                         color=(0, 255, 0),
                         thickness=3)
        idx += 1

        all_contours_points = np.concatenate([contour for contour in contours])
        all_contours_points_2d_array = np.empty((len(all_contours_points), 2),
                                                dtype=np.float64)
        for i in range(all_contours_points_2d_array.shape[0]):
            all_contours_points_2d_array[i, 0] = all_contours_points[i, 0, 0]
            all_contours_points_2d_array[i, 1] = all_contours_points[i, 0, 1]

        cropped_image = get_orientation(all_contours_points_2d_array,
                                        cropped_image)

        if show:
            viz_utils.show_image('image', cropped_image)
            # viz_utils.show_image('image', image)

        # TODO: try all the friends below on the data extracted from ORB/SIFT but on the contours mask
        # cv2.estimateAffine2D()
        # cv2.estimateAffinePartial2D()
        # cv2.estimateRigidTransform()

        # break
    def task(self, **kwargs):

        viz_mode = kwargs.get('viz_mode')

        map_image_path = self.data_sources['map_image_path']
        localization_image_path = self.data_sources['localization_image_path']
        roi_center = self.data_sources['roi_center']
        map_alignment_points = self.data_sources['map_alignment_points']
        localization_alignment_points = self.data_sources[
            'localization_alignment_points']
        roi_size = self.params['roi_size']

        map_image = cv2.imread(map_image_path)
        localization_image = cv2.imread(localization_image_path)
        localization_image, _ = cv_utils.warp_image(
            localization_image, localization_alignment_points,
            map_alignment_points)
        roi_image, _, _ = cv_utils.crop_region(localization_image,
                                               roi_center[0], roi_center[1],
                                               roi_size, roi_size)
        matches_image = map_image.copy()
        cv2.circle(matches_image,
                   roi_center,
                   radius=15,
                   color=(0, 0, 255),
                   thickness=-1)
        cv2.rectangle(
            matches_image,
            (roi_center[0] - roi_size / 2, roi_center[1] - roi_size / 2),
            (roi_center[0] + roi_size / 2, roi_center[1] + roi_size / 2),
            (0, 0, 255),
            thickness=2)

        for method in [
                'TM_CCOEFF', 'TM_CCOEFF_NORMED', 'TM_CCORR', 'TM_CCORR_NORMED',
                'TM_SQDIFF', 'TM_SQDIFF_NORMED'
        ]:
            matching_result = cv2.matchTemplate(map_image,
                                                roi_image,
                                                method=eval('cv2.%s' % method))
            min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(matching_result)
            if method in ['TM_SQDIFF', 'TM_SQDIFF_NORMED']:
                match_top_left = min_loc
            else:
                match_top_left = max_loc
            match_bottom_right = (match_top_left[0] + roi_image.shape[1],
                                  match_top_left[1] + roi_image.shape[0])
            match_center = (match_top_left[0] + roi_image.shape[1] / 2,
                            match_top_left[1] + roi_image.shape[0] / 2)
            cv2.rectangle(matches_image,
                          match_top_left,
                          match_bottom_right, (255, 0, 0),
                          thickness=2)
            cv2.circle(matches_image,
                       match_center,
                       radius=15,
                       color=(255, 0, 0),
                       thickness=-1)
            cv2.imwrite(os.path.join(self.repetition_dir, 'matches.jpg'),
                        matches_image)
        if viz_mode:
            viz_utils.show_image('matches image', matches_image)

        self.results[self.repetition_id]['error'] = np.sqrt(
            (roi_center[0] - match_center[0])**2 +
            (roi_center[1] - match_center[1])**2)
    laser_scan.range_max = 300 * 0.0125
    laser_scan.ranges = scan_ranges
    pub.publish(laser_scan)
    prev_scan_time = curr_scan_time

    scan_coordinates_list = cv_utils.get_coordinates_list_from_scan_ranges(
        scan_ranges, vehicle_x, vehicle_y, 0, 2 * np.pi,
        0.0125)  # TODO: incorrect!!!!!!!!
    for scan_coordinate in scan_coordinates_list:
        cv2.circle(frame, (scan_coordinate[0], scan_coordinate[1]),
                   radius=3,
                   color=(0, 0, 255),
                   thickness=-1)
    if ret == True:

        viz_utils.show_image('video', frame, wait_key=False)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    else:
        break

    te = time.time()
    if n == 0:
        mean_ = te - ts
    else:
        mean_ = float(mean_) * (n - 1) / n + (te - ts) / n
    n += 1
    print(mean_)
    def task(self, **kwargs):

        viz_mode = kwargs.get('viz_mode')
        verbose_mode = kwargs.get('verbose')

        # Read image
        image = cv2.imread(self.data_sources)
        cv2.imwrite(os.path.join(self.repetition_dir, 'image.jpg'), image)
        if viz_mode:
            viz_utils.show_image('image', image)

        # Save contours mask
        _, canopies_mask = segmentation.extract_canopy_contours(image)
        cv2.imwrite(os.path.join(self.repetition_dir, 'canopies_mask.jpg'), canopies_mask)

        # Crop central ROI
        cropped_image_size = int(np.min([image.shape[0], image.shape[1]]) * self.params['crop_ratio'])
        cropped_image, crop_origin, _ = cv_utils.crop_region(image, x_center=image.shape[1] / 2, y_center=image.shape[0] / 2,
                                                             x_pixels=cropped_image_size, y_pixels=cropped_image_size)
        _, cropped_canopies_mask = segmentation.extract_canopy_contours(cropped_image)
        crop_square_image = image.copy()
        cv2.rectangle(crop_square_image, crop_origin, (crop_origin[0] + cropped_image_size, crop_origin[1] + cropped_image_size),
                      color=(120, 0, 0), thickness=20)
        cv2.imwrite(os.path.join(self.repetition_dir, 'crop_square_image.jpg'), crop_square_image)
        cv2.imwrite(os.path.join(self.repetition_dir, 'cropped_image.jpg'), cropped_image)
        if viz_mode:
            viz_utils.show_image('cropped image', cropped_image)

        # Estimate orchard orientation
        orientation, angle_to_minima_mean, angle_to_sum_vector = trunks_detection.estimate_rows_orientation(cropped_image)
        rotation_mat = cv2.getRotationMatrix2D((cropped_image.shape[1] / 2, cropped_image.shape[0] / 2), orientation * (-1), scale=1.0)
        vertical_rows_image = cv2.warpAffine(cropped_image, rotation_mat, (cropped_image.shape[1], cropped_image.shape[0]))
        cv2.imwrite(os.path.join(self.repetition_dir, 'vertical_rows.jpg'), vertical_rows_image)
        if verbose_mode:
            angle_to_minima_mean_df = pd.DataFrame(angle_to_minima_mean.values(), index=angle_to_minima_mean.keys(), columns=['minima_mean']).sort_index()
            angle_to_minima_mean_df.to_csv(os.path.join(self.repetition_dir, 'angle_to_minima_mean.csv'))
            self.results[self.repetition_id]['angle_to_minima_mean_path'] = os.path.join(self.repetition_dir, 'angle_to_minima_mean.csv')
            max_sum_value = max(map(lambda vector: vector.max(), angle_to_sum_vector.values()))
            os.mkdir(os.path.join(self.repetition_dir, 'orientation_estimation'))
            for angle in angle_to_sum_vector:
                plt.figure()
                plt.plot(angle_to_sum_vector[angle], color='green')
                plt.xlabel('x')
                plt.ylabel('column sums')
                plt.ylim([(-0.05 * max_sum_value), int(max_sum_value * 1.05)])
                plt.ticklabel_format(axis='y', style='sci', scilimits=(0, 4))
                plt.autoscale(enable=True, axis='x', tight=True)
                plt.tight_layout()
                plt.savefig(os.path.join(self.repetition_dir, 'orientation_estimation', 'sums_vector_%.2f[deg].jpg' % angle))
                rotation_mat = cv2.getRotationMatrix2D((cropped_canopies_mask.shape[1] / 2, cropped_canopies_mask.shape[0] / 2), angle, scale=1.0)
                rotated_canopies_mask = cv2.warpAffine(cropped_canopies_mask, rotation_mat, (cropped_canopies_mask.shape[1], cropped_canopies_mask.shape[0]))
                cv2.imwrite(os.path.join(self.repetition_dir, 'orientation_estimation', 'rotated_canopies_mask_%.2f[deg]_minima_mean=%.2f.jpg'
                                         % (angle, angle_to_minima_mean[angle])), rotated_canopies_mask)
        if viz_mode:
            viz_utils.show_image('vertical rows', vertical_rows_image)

        # Get tree centroids
        centroids, rotated_centroids, aisle_centers, slices_sum_vectors_and_trees, column_sums_vector = trunks_detection.find_tree_centroids(cropped_image, correction_angle=orientation * (-1))
        _, vertical_rows_canopies_mask = segmentation.extract_canopy_contours(vertical_rows_image)
        vertical_rows_aisle_centers_image = cv_utils.draw_lines_on_image(cv2.cvtColor(vertical_rows_canopies_mask, cv2.COLOR_GRAY2BGR),
                                                                         lines_list=[((center, 0), (center, vertical_rows_image.shape[0]))
                                                                         for center in aisle_centers], color=(0, 0, 255))
        slice_image, slice_row_sums_vector, tree_locations_in_row = slices_sum_vectors_and_trees[len(slices_sum_vectors_and_trees) / 2]
        tree_locations = [(slice_image.shape[1] / 2, vertical_location) for vertical_location in tree_locations_in_row]
        slice_image = cv_utils.draw_points_on_image(cv2.cvtColor(slice_image, cv2.COLOR_GRAY2BGR), tree_locations, color=(0, 0, 255))
        cv2.imwrite(os.path.join(self.repetition_dir, 'vertical_rows_aisle_centers.jpg'), vertical_rows_aisle_centers_image)
        plt.figure()
        plt.plot(column_sums_vector, color='green')
        plt.xlabel('x')
        plt.ylabel('column sums')
        plt.ticklabel_format(axis='y', style='sci', scilimits=(0, 4))
        plt.autoscale(enable=True, axis='x', tight=True)
        plt.tight_layout()
        plt.savefig(os.path.join(self.repetition_dir, 'vertical_rows_column_sums.jpg'))
        cv2.imwrite(os.path.join(self.repetition_dir, 'vertical_row_slice.jpg'), slice_image)
        plt.figure(figsize=(4, 5))
        plt.plot(slice_row_sums_vector[::-1], range(len(slice_row_sums_vector)), color='green')
        plt.xlabel('row sums')
        plt.ylabel('y')
        plt.axes().set_aspect(60)
        plt.ticklabel_format(axis='x', style='sci', scilimits=(0, 4))
        plt.autoscale(enable=True, axis='y', tight=True)
        plt.tight_layout()
        plt.savefig(os.path.join(self.repetition_dir, 'slice_row_sums.jpg'))
        vertical_rows_centroids_image = cv_utils.draw_points_on_image(vertical_rows_image, itertools.chain.from_iterable(rotated_centroids), color=(0, 0, 255))
        cv2.imwrite(os.path.join(self.repetition_dir, 'vertical_rows_centroids.jpg'), vertical_rows_centroids_image)
        centroids_image = cv_utils.draw_points_on_image(cropped_image, centroids, color=(0, 0, 255))
        cv2.imwrite(os.path.join(self.repetition_dir, 'centroids.jpg'), centroids_image)
        if viz_mode:
            viz_utils.show_image('vertical rows aisle centers', vertical_rows_aisle_centers_image)
            viz_utils.show_image('vertical rows centroids', vertical_rows_centroids_image)

        # Estimate grid parameters
        grid_dim_x, grid_dim_y = trunks_detection.estimate_grid_dimensions(rotated_centroids)
        shear, drift_vectors, drift_vectors_filtered = trunks_detection.estimate_shear(rotated_centroids)
        drift_vectors_image = cv_utils.draw_lines_on_image(vertical_rows_centroids_image, drift_vectors, color=(255, 255, 0), arrowed=True)
        cv2.imwrite(os.path.join(self.repetition_dir, 'drift_vectors.jpg'), drift_vectors_image)
        drift_vectors_filtered_image = cv_utils.draw_lines_on_image(vertical_rows_centroids_image, drift_vectors_filtered, color=(255, 255, 0), arrowed=True)
        cv2.imwrite(os.path.join(self.repetition_dir, 'drift_vectors_filtered.jpg'), drift_vectors_filtered_image)
        if viz_mode:
            viz_utils.show_image('drift vectors', drift_vectors_filtered_image)

        # Get essential grid
        essential_grid = trunks_detection.get_essential_grid(grid_dim_x, grid_dim_y, shear, orientation, n=self.params['grid_size_for_optimization'])
        essential_grid_shape = np.max(essential_grid, axis=0) - np.min(essential_grid, axis=0)
        margin = essential_grid_shape * 0.2
        essential_grid_shifted = [tuple(elem) for elem in np.array(essential_grid) - np.min(essential_grid, axis=0) + margin / 2]
        estimated_grid_image = np.full((int(essential_grid_shape[1] + margin[1]), int(essential_grid_shape[0] + margin[0]), 3), 255, dtype=np.uint8)
        estimated_grid_image = cv_utils.draw_points_on_image(estimated_grid_image, essential_grid_shifted, color=(255, 90, 0), radius=25)
        cv2.imwrite(os.path.join(self.repetition_dir, 'estimated_grid.png'), estimated_grid_image)
        if viz_mode:
            viz_utils.show_image('estimated grid', estimated_grid_image)

        # Find translation of the grid
        positioned_grid, translation, drift_vectors = trunks_detection.find_min_mse_position(centroids, essential_grid, cropped_image.shape[1], cropped_image.shape[0])
        if positioned_grid is None:
            raise ExperimentFailure
        positioned_grid_image = cv_utils.draw_points_on_image(cropped_image, positioned_grid, color=(255, 90, 0), radius=25)
        cv2.imwrite(os.path.join(self.repetition_dir, 'positioned_grid_only.jpg'), positioned_grid_image)
        positioned_grid_image = cv_utils.draw_points_on_image(positioned_grid_image, centroids, color=(0, 0, 255))
        positioned_grid_image = cv_utils.draw_lines_on_image(positioned_grid_image, drift_vectors, color=(255, 255, 0), thickness=3)
        cv2.imwrite(os.path.join(self.repetition_dir, 'positioned_grid.jpg'), positioned_grid_image)
        if viz_mode:
            viz_utils.show_image('positioned grid', positioned_grid_image)

        # Estimate sigma as a portion of intra-row distance
        sigma = grid_dim_y * self.params['initial_sigma_to_dim_y_ratio']

        # Get a grid of gaussians
        grid = trunks_detection.get_grid(grid_dim_x, grid_dim_y, translation, orientation, shear, n=self.params['grid_size_for_optimization'])
        gaussians_filter = trunks_detection.get_gaussians_grid_image(grid, sigma, cropped_image.shape[1], cropped_image.shape[0])
        cv2.imwrite(os.path.join(self.repetition_dir, 'gaussians_filter.jpg'), 255.0 * gaussians_filter)
        filter_output = np.multiply(gaussians_filter, cropped_canopies_mask)
        cv2.imwrite(os.path.join(self.repetition_dir, 'filter_output.jpg'), filter_output)
        if viz_mode:
            viz_utils.show_image('gaussians filter', gaussians_filter)
            viz_utils.show_image('filter output', filter_output)

        # Optimize the squared grid
        optimized_grid, optimized_grid_args, optimization_steps = trunks_detection.optimize_grid(grid_dim_x, grid_dim_y,
                                                                                                 translation, orientation,
                                                                                                 shear, sigma,
                                                                                                 cropped_image,
                                                                                                 pattern=np.ones([self.params['grid_size_for_optimization'],self.params['grid_size_for_optimization']]))
        optimized_grid_dim_x, optimized_grid_dim_y, optimized_translation_x, optimized_translation_y, optimized_orientation, optimized_shear, optimized_sigma = optimized_grid_args
        self.results[self.repetition_id] = {'optimized_grid_dim_x': optimized_grid_dim_x,
                                            'optimized_grid_dim_y': optimized_grid_dim_y,
                                            'optimized_translation_x': optimized_translation_x,
                                            'optimized_translation_y': optimized_translation_y,
                                            'optimized_orientation': optimized_orientation,
                                            'optimized_shear': optimized_shear,
                                            'optimized_sigma': optimized_sigma}
        optimized_grid_image = cv_utils.draw_points_on_image(cropped_image, optimized_grid, color=(0, 255, 0))
        optimized_grid_image = cv_utils.draw_points_on_image(optimized_grid_image, positioned_grid, color=(255, 90, 0))
        cv2.imwrite(os.path.join(self.repetition_dir, 'optimized_square_grid.jpg'), optimized_grid_image)
        if verbose_mode:
            os.mkdir(os.path.join(self.repetition_dir, 'nelder_mead_steps'))
            self.results[self.repetition_id]['optimization_steps_scores'] = {}
            for step_idx, (step_grid, step_score, step_sigma) in enumerate(optimization_steps):
                self.results[self.repetition_id]['optimization_steps_scores'][step_idx] = step_score
                step_image = cropped_image.copy()
                step_gaussians_filter = trunks_detection.get_gaussians_grid_image(step_grid, step_sigma, cropped_image.shape[1], cropped_image.shape[0])
                step_gaussians_filter = cv2.cvtColor((255.0 * step_gaussians_filter).astype(np.uint8), cv2.COLOR_GRAY2BGR)
                alpha = 0.5
                weighted = cv2.addWeighted(step_image, alpha, step_gaussians_filter, 1 - alpha, gamma=0)
                update_indices = np.where(step_gaussians_filter != 0)
                step_image[update_indices] = weighted[update_indices]
                step_image = cv_utils.draw_points_on_image(step_image, step_grid, color=(0, 255, 0))
                cv2.imwrite(os.path.join(self.repetition_dir, 'nelder_mead_steps', 'optimization_step_%d_[%.2f].jpg' % (step_idx, step_score)), step_image)
        if viz_mode:
            viz_utils.show_image('optimized square grid', optimized_grid_image)

        # Extrapolate full grid on the entire image
        full_grid_np = trunks_detection.extrapolate_full_grid(optimized_grid_dim_x, optimized_grid_dim_y, optimized_orientation, optimized_shear,
                                                              base_grid_origin=np.array(optimized_grid[0]) + np.array(crop_origin),
                                                              image_width=image.shape[1], image_height=image.shape[0])
        full_grid_image = cv_utils.draw_points_on_image(image, [elem for elem in full_grid_np.flatten() if type(elem) is tuple], color=(0, 255, 0))
        cv2.imwrite(os.path.join(self.repetition_dir, 'full_grid.jpg'), full_grid_image)
        if viz_mode:
            viz_utils.show_image('full grid', full_grid_image)

        # Match given orchard pattern to grid
        full_grid_scores_np, full_grid_pose_to_score = trunks_detection.get_grid_scores_array(full_grid_np, image, sigma)
        full_grid_with_scores_image = full_grid_image.copy()
        top_bottom_margin_size = int(0.05 * full_grid_with_scores_image.shape[0])
        left_right_marign_size = int(0.05 * full_grid_with_scores_image.shape[1])
        full_grid_with_scores_image = cv2.copyMakeBorder(full_grid_with_scores_image, top_bottom_margin_size, top_bottom_margin_size,
                                                         left_right_marign_size, left_right_marign_size, cv2.BORDER_CONSTANT,
                                                         dst=None, value=(255, 255, 255))
        for pose, score in full_grid_pose_to_score.items():
            pose = tuple(np.array(pose) + np.array([left_right_marign_size, top_bottom_margin_size]))
            full_grid_with_scores_image = cv_utils.put_shaded_text_on_image(full_grid_with_scores_image, '%.2f' % score,
                                                                            pose, color=(0, 255, 0), offset=(15, 15))
        cv2.imwrite(os.path.join(self.repetition_dir, 'full_grid_with_scores.jpg'), full_grid_with_scores_image)
        orchard_pattern_np = self.params['orchard_pattern']
        pattern_origin, origin_to_sub_scores_array = trunks_detection.fit_pattern_on_grid(full_grid_scores_np, orchard_pattern_np)
        if pattern_origin is None:
            raise ExperimentFailure
        if verbose_mode:
            os.mkdir(os.path.join(self.repetition_dir, 'pattern_matching'))
            for step_origin, step_sub_score_array in origin_to_sub_scores_array.items():
                pattern_matching_image = image.copy()
                step_trunk_coordinates_np = full_grid_np[step_origin[0] : step_origin[0] + orchard_pattern_np.shape[0],
                                                         step_origin[1] : step_origin[1] + orchard_pattern_np.shape[1]]
                step_trunk_points_list = step_trunk_coordinates_np.flatten().tolist()
                pattern_matching_image = cv_utils.draw_points_on_image(pattern_matching_image, step_trunk_points_list, color=(255, 255, 255), radius=25)
                for i in range(step_trunk_coordinates_np.shape[0]):
                    for j in range(step_trunk_coordinates_np.shape[1]):
                        step_trunk_coordinates = (int(step_trunk_coordinates_np[(i, j)][0]), int(step_trunk_coordinates_np[(i, j)][1]))
                        pattern_matching_image = cv_utils.put_shaded_text_on_image(pattern_matching_image, '%.2f' % step_sub_score_array[(i, j)],
                                                                                   step_trunk_coordinates, color=(255, 255, 255), offset=(20, 20))
                pattern_matching_image = cv_utils.draw_points_on_image(pattern_matching_image, [elem for elem in full_grid_np.flatten() if type(elem) is tuple], color=(0, 255, 0))
                mean_score = float(np.mean(step_sub_score_array))
                cv2.imwrite(os.path.join(self.repetition_dir, 'pattern_matching', 'origin=%d_%d_score=%.2f.jpg' %
                                         (step_origin[0], step_origin[1], mean_score)), pattern_matching_image)
        trunk_coordinates_np = full_grid_np[pattern_origin[0] : pattern_origin[0] + orchard_pattern_np.shape[0],
                                            pattern_origin[1] : pattern_origin[1] + orchard_pattern_np.shape[1]]
        trunk_points_list = trunk_coordinates_np[orchard_pattern_np == 1]
        trunk_coordinates_orig_np = trunk_coordinates_np.copy()
        trunk_coordinates_np[orchard_pattern_np != 1] = np.nan
        semantic_trunks_image = cv_utils.draw_points_on_image(image, trunk_points_list, color=(255, 255, 255))
        for i in range(trunk_coordinates_np.shape[0]):
            for j in range(trunk_coordinates_np.shape[1]):
                if np.any(np.isnan(trunk_coordinates_np[(i, j)])):
                    continue
                trunk_coordinates = (int(trunk_coordinates_np[(i, j)][0]), int(trunk_coordinates_np[(i, j)][1]))
                tree_label = '%d/%s' % (j + 1, chr(65 + (trunk_coordinates_np.shape[0] - 1 - i)))
                semantic_trunks_image = cv_utils.put_shaded_text_on_image(semantic_trunks_image, tree_label, trunk_coordinates,
                                                                                  color=(255, 255, 255), offset=(15, 15))
        cv2.imwrite(os.path.join(self.repetition_dir, 'semantic_trunks.jpg'), semantic_trunks_image)
        if viz_mode:
            viz_utils.show_image('semantic trunks', semantic_trunks_image)

        # Refine trunk locations
        refined_trunk_coordinates_np = trunks_detection.refine_trunk_locations(image, trunk_coordinates_np, optimized_sigma,
                                                                               optimized_grid_dim_x, optimized_grid_dim_x)
        confidence = trunks_detection.get_trees_confidence(canopies_mask, refined_trunk_coordinates_np[orchard_pattern_np == 1],
                                                           trunk_coordinates_orig_np[orchard_pattern_np == -1], optimized_sigma)
        refined_trunk_points_list = refined_trunk_coordinates_np[orchard_pattern_np == 1]
        refined_trunk_coordinates_np[orchard_pattern_np != 1] = np.nan
        refined_semantic_trunks_image = cv_utils.draw_points_on_image(image, refined_trunk_points_list, color=(255, 255, 255))
        semantic_trunks = {}
        for i in range(refined_trunk_coordinates_np.shape[0]):
            for j in range(refined_trunk_coordinates_np.shape[1]):
                if np.any(np.isnan(refined_trunk_coordinates_np[(i, j)])):
                    continue
                trunk_coordinates = (int(refined_trunk_coordinates_np[(i, j)][0]), int(refined_trunk_coordinates_np[(i, j)][1]))
                semantic_trunks['%d/%s' % (j + 1, chr(65 + (trunk_coordinates_np.shape[0] - 1 - i)))] = trunk_coordinates
                tree_label = '%d/%s' % (j + 1, chr(65 + (refined_trunk_coordinates_np.shape[0] - 1 - i)))
                refined_semantic_trunks_image = cv_utils.put_shaded_text_on_image(refined_semantic_trunks_image, tree_label, trunk_coordinates,
                                                                                  color=(255, 255, 255), offset=(15, 15))
        tree_scores_stats = trunks_detection.get_tree_scores_stats(canopies_mask, trunk_points_list, optimized_sigma)
        self.results[self.repetition_id]['semantic_trunks'] = semantic_trunks
        self.results[self.repetition_id]['tree_scores_stats'] = tree_scores_stats
        self.results[self.repetition_id]['confidence'] = confidence
        cv2.imwrite(os.path.join(self.repetition_dir, 'refined_semantic_trunks[%.2f].jpg' % confidence), refined_semantic_trunks_image)
        if viz_mode:
            viz_utils.show_image('refined semantic trunks', refined_semantic_trunks_image)
예제 #13
0
viz_mode = True

image_key = '15-08-1'

metadata_path = r'/home/omer/Downloads/experiment_metadata_baseline.json'

if __name__ == '__main__':

    with open(metadata_path) as f:
        metadata = json.load(f)
    trunks = metadata['results']['1']['trunk_points_list']
    optimized_sigma = metadata['results']['1']['optimized_sigma']

    image = cv2.imread(dji.snapshots_80_meters[image_key].path)
    if viz_mode:
        viz_utils.show_image('image', image)

    trunks = [(int(elem[0]), int(elem[1])) for elem in trunks]
    upper_left, lower_right = cv_utils.get_bounding_box(image,
                                                        trunks,
                                                        expand_ratio=0.1)
    cropped_image = image[upper_left[1]:lower_right[1],
                          upper_left[0]:lower_right[0]]
    trunks = np.array(trunks) - np.array(upper_left)

    if viz_mode:
        viz_utils.show_image('cropped image', cropped_image)

    gaussians = trunks_detection_old_cv.get_gaussians_grid_image(
        trunks,
        optimized_sigma,
예제 #14
0

#################################################################################################
#                                             CONFIG                                            #
#################################################################################################
setup = 'nov2' # apr / nov1 / nov2
#################################################################################################

if setup == 'apr':
    from content.data_pointers.lavi_april_18.dji import trunks_detection_results_dir
    from content.data_pointers.lavi_april_18.dji import selected_trunks_detection_experiments
elif setup == 'nov1':
    from content.data_pointers.lavi_november_18.dji import trunks_detection_results_dir
    from content.data_pointers.lavi_november_18.dji import plot1_selected_trunks_detection_experiments as selected_trunks_detection_experiments
elif setup == 'nov2':
    from content.data_pointers.lavi_november_18.dji import trunks_detection_results_dir
    from content.data_pointers.lavi_november_18.dji import plot2_selected_trunks_detection_experiments as selected_trunks_detection_experiments


if __name__ == '__main__':
    execution_dir = utils.create_new_execution_folder('external_trunks_tagging')
    for experiment_name in selected_trunks_detection_experiments:
        with open(os.path.join(trunks_detection_results_dir, experiment_name, 'experiment_summary.json')) as f:
            experiment_summary = json.load(f)
        image = cv2.imread(experiment_summary['data_sources'])
        external_trunk_poses = cv_utils.sample_pixel_coordinates(image, multiple=True)
        image_with_points = cv_utils.draw_points_on_image(image, external_trunk_poses, color=(255, 255, 255))
        viz_utils.show_image('external_trunks', image_with_points)
        with open(os.path.join(trunks_detection_results_dir, experiment_name, 'external_trunks.json'), 'w') as f:
            json.dump(external_trunk_poses, f, indent=4)