def generate_cost_map(image):
    contours, canopies_mask = segmentation.extract_canopy_contours(image)
    cost_map = np.full((np.size(image, 0), np.size(image, 1)),
                       fill_value=0,
                       dtype=np.uint8)
    cv2.drawContours(cost_map,
                     contours,
                     contourIdx=-1,
                     color=255,
                     thickness=-1)
    cv2.drawContours(cost_map,
                     contours,
                     contourIdx=-1,
                     color=200,
                     thickness=90)
    cost_map = np.minimum(canopies_mask, cost_map)
    cv2.drawContours(cost_map,
                     contours,
                     contourIdx=-1,
                     color=120,
                     thickness=20)
    external_ring = np.full((np.size(image, 0), np.size(image, 1)),
                            fill_value=0,
                            dtype=np.uint8)
    cv2.drawContours(external_ring,
                     contours,
                     contourIdx=-1,
                     color=50,
                     thickness=65)
    cost_map = np.maximum(cost_map, external_ring)
    cost_map = cost_map / 255.0
    return cost_map
Ejemplo n.º 2
0
 def __init__(self,
              grid_dim_x,
              grid_dim_y,
              translation,
              orientation,
              shear,
              sigma,
              image,
              n,
              m,
              pattern,
              std_normalized_tree_scores_threshold=0.6):
     self.init_grid_dim_x = grid_dim_x
     self.init_grid_dim_y = grid_dim_y
     self.init_translation_x = translation[0]
     self.init_translation_y = translation[1]
     self.init_orientation = orientation
     self.init_shear = shear
     self.init_sigma = sigma
     self.canopies_mask = segmentation.extract_canopy_contours(image)[1]
     self.n = n
     self.m = m
     self.pattern = pattern
     self.std_normalized_tree_scores_threshold = std_normalized_tree_scores_threshold
     self.steps = []
     self.width = image.shape[1]
     self.height = image.shape[0]
def estimate_rows_orientation(image,
                              search_step=0.5,
                              min_distance_between_peaks=200,
                              min_peak_width=50):
    _, contours_mask = segmentation.extract_canopy_contours(image)
    angles_to_scores = {}
    for correction_angle in np.arange(start=-90, stop=90, step=search_step):
        rotation_mat = cv2.getRotationMatrix2D(
            (image.shape[1] / 2, image.shape[0] / 2),
            correction_angle,
            scale=1.0)
        rotated_contours_mask = cv2.warpAffine(
            contours_mask, rotation_mat,
            (contours_mask.shape[1], contours_mask.shape[0]))
        column_sums_vector = np.sum(rotated_contours_mask, axis=0)
        minima_indices, _ = find_peaks(column_sums_vector * (-1),
                                       distance=min_distance_between_peaks,
                                       width=min_peak_width)
        minima_values = [column_sums_vector[index] for index in minima_indices]
        mean_minima = np.mean(
            minima_values) if len(minima_values) > 0 else 1e30
        angles_to_scores[correction_angle] = mean_minima
    return [
        key for key, value in sorted(
            angles_to_scores.iteritems(), key=lambda (k, v): v, reverse=False)
    ][0] * (-1)
def find_optimal_grid(image):
    cropped_image, _, _ = cv_utils.crop_region(image,
                                               x_center=image.shape[1] / 2,
                                               y_center=image.shape[0] / 2,
                                               x_pixels=2700,
                                               y_pixels=1700)
    points = [(541, 403), (2281, 1263)]

    _, contours_mask = segmentation.extract_canopy_contours(cropped_image)

    basic_grid = get_basic_grid(points[0],
                                points[1],
                                nodes_num_x=6,
                                nodes_num_y=4)

    delta_x_init_values = set([-int(1.8**i) for i in range(3)] +
                              [int(1.8**i) for i in range(3)])
    delta_y_init_values = set([-int(1.8**i) for i in range(3)] +
                              [int(1.8**i) for i in range(3)])
    angle_init_values = set([-int(1.4**i) for i in range(3)] +
                            [int(1.4**i) for i in range(3)])  # in degrees
    scale_init_values = np.linspace(start=0.8, stop=1.2, num=6)
    init_values_combinations = list(
        product(delta_x_init_values, delta_y_init_values, angle_init_values,
                scale_init_values))

    def objective_aux(delta_x, delta_y, angle, scale):
        return objective(basic_grid, contours_mask, delta_x, delta_y, angle,
                         scale)

    objective_values = utils.distribute_evenly_on_all_cores(
        objective_aux, init_values_combinations)
def get_grid_scores_array(full_grid_np, image, sigma):
    _, contours_mask = segmentation.extract_canopy_contours(image)
    full_grid_scores_np = np.empty(full_grid_np.shape)
    for i in range(full_grid_np.shape[0]):
        for j in range((full_grid_np.shape[1])):
            if np.any(np.isnan(full_grid_np[(i, j)])):
                full_grid_scores_np[(i, j)] = np.nan
            else:
                x, y = full_grid_np[(i, j)]
                full_grid_scores_np[(i, j)] = _trunk_point_score(
                    contours_mask, x, y, sigma)
    return full_grid_scores_np
 def __init__(self, grid_dim_x, grid_dim_y, translation, orientation, shear,
              sigma, image, n):
     self.init_grid_dim_x = grid_dim_x
     self.init_grid_dim_y = grid_dim_y
     self.init_translation_x = translation[0]
     self.init_translation_y = translation[1]
     self.init_orientation = orientation
     self.init_shear = shear
     self.init_sigma = sigma
     self.contours_mask = segmentation.extract_canopy_contours(image)[1]
     self.n = n
     self.width = image.shape[1]
     self.height = image.shape[0]
def refine_trunk_locations(image,
                           trunk_coordinates_np,
                           sigma,
                           dim_x,
                           dim_y,
                           samples_along_axis=30,
                           window_shift=50):
    _, contours_mask = segmentation.extract_canopy_contours(image)
    refined_trunk_locations_df = pd.DataFrame(
        index=range(trunk_coordinates_np.shape[0]),
        columns=range(trunk_coordinates_np.shape[1]))
    window_size = int(np.max([dim_x, dim_y]) * 1.1)
    circle_radius = int(sigma * 1.2)
    for i in range(trunk_coordinates_np.shape[0]):
        for j in range(trunk_coordinates_np.shape[1]):
            if np.any(np.isnan(trunk_coordinates_np[(i, j)])):
                continue
            x, y = trunk_coordinates_np[(i, j)]
            max_score = -np.inf
            best_x, best_y = None, None
            for candidate_x, candidate_y in itertools.product(
                    np.round(
                        np.linspace(x - window_shift,
                                    x + window_shift,
                                    num=samples_along_axis)),
                    np.round(
                        np.linspace(y - window_shift,
                                    y + window_shift,
                                    num=samples_along_axis))):
                canopy_patch, _, _ = cv_utils.crop_region(
                    contours_mask, candidate_x, candidate_y, window_size,
                    window_size)
                circle_mask = np.full(canopy_patch.shape,
                                      fill_value=0,
                                      dtype=np.uint8)
                circle_mask = cv2.circle(circle_mask,
                                         center=(canopy_patch.shape[1] / 2,
                                                 canopy_patch.shape[0] / 2),
                                         radius=circle_radius,
                                         color=255,
                                         thickness=-1)
                score = np.sum(
                    cv2.bitwise_and(canopy_patch,
                                    canopy_patch,
                                    mask=circle_mask))
                if score > max_score:
                    max_score = score
                    best_x, best_y = candidate_x, candidate_y
            refined_trunk_locations_df.loc[i, j] = (best_x, best_y)
    return np.array(refined_trunk_locations_df)
Ejemplo n.º 8
0
def get_grid_scores_array(full_grid_np, image, sigma):
    _, canopies_mask = segmentation.extract_canopy_contours(image)
    full_grid_scores_np = np.empty(full_grid_np.shape)
    full_grid_pose_to_score = {}
    for i in range(full_grid_np.shape[0]):
        for j in range((full_grid_np.shape[1])):
            if np.any(np.isnan(full_grid_np[(i, j)])):
                full_grid_scores_np[(i, j)] = np.nan
            else:
                x, y = full_grid_np[(i, j)]
                score = tree_score(canopies_mask, x, y, sigma)[1]
                full_grid_scores_np[(i, j)] = score
                full_grid_pose_to_score[(int(x), int(y))] = score
    return full_grid_scores_np, full_grid_pose_to_score
def generate_canopies_map(image,
                          lower_color=None,
                          upper_color=None,
                          min_area=None):
    contours_map = np.full((np.size(image, 0), np.size(image, 1)),
                           fill_value=0,
                           dtype=np.uint8)
    contours, _ = segmentation.extract_canopy_contours(image, lower_color,
                                                       upper_color, min_area)
    cv2.drawContours(contours_map,
                     contours,
                     contourIdx=-1,
                     color=128,
                     thickness=-1)
    cv2.drawContours(contours_map,
                     contours,
                     contourIdx=-1,
                     color=255,
                     thickness=3)
    return contours_map
Ejemplo n.º 10
0
def refine_trunk_locations(image,
                           trunk_coordinates_np,
                           sigma,
                           dim_x,
                           dim_y,
                           samples_along_axis=14):
    _, canopies_mask = segmentation.extract_canopy_contours(image)
    refined_trunk_locations_df = pd.DataFrame(
        index=range(trunk_coordinates_np.shape[0]),
        columns=range(trunk_coordinates_np.shape[1]))
    window_size = int(np.max([dim_x, dim_y]) * 1.1)
    window_shift = int(sigma / 3)
    for i in range(trunk_coordinates_np.shape[0]):
        for j in range(trunk_coordinates_np.shape[1]):
            if np.any(np.isnan(trunk_coordinates_np[(i, j)])):
                continue
            x, y = trunk_coordinates_np[(i, j)]
            max_score = -np.inf
            best_x, best_y = None, None
            for candidate_x, candidate_y in itertools.product(
                    np.round(
                        np.linspace(x - window_shift,
                                    x + window_shift,
                                    num=samples_along_axis)),
                    np.round(
                        np.linspace(y - window_shift,
                                    y + window_shift,
                                    num=samples_along_axis))):
                canopy_patch, _, _ = cv_utils.crop_region(
                    canopies_mask, candidate_x, candidate_y, window_size,
                    window_size)
                score, _ = tree_score(canopy_patch, canopy_patch.shape[1] / 2,
                                      canopy_patch.shape[0] / 2, sigma)
                if score > max_score:
                    max_score = score
                    best_x, best_y = candidate_x, candidate_y
            refined_trunk_locations_df.loc[i, j] = (best_x, best_y)
    return np.array(refined_trunk_locations_df)
def find_tree_centroids(image, correction_angle):
    rotation_mat = cv2.getRotationMatrix2D(
        (image.shape[1] / 2, image.shape[0] / 2), correction_angle,
        scale=1.0)  # TODO: verify order of coordinates
    rotated_image = cv2.warpAffine(image, rotation_mat,
                                   (image.shape[1], image.shape[0]))
    rotated_centroids = []
    _, contours_mask = segmentation.extract_canopy_contours(rotated_image)
    column_sums_vector = np.sum(contours_mask, axis=0)
    aisle_centers, _ = find_peaks(column_sums_vector * (-1),
                                  distance=200,
                                  width=50)
    slices_and_cumsums = []
    for tree_row_left_limit, tree_tow_right_limit in zip(
            aisle_centers[:-1], aisle_centers[1:]):
        tree_row = contours_mask[:, tree_row_left_limit:tree_tow_right_limit]
        row_sums_vector = np.sum(tree_row, axis=1)
        tree_locations_in_row, _ = find_peaks(row_sums_vector,
                                              distance=160,
                                              width=30)
        rotated_centroids.append([
            (int(np.mean([tree_row_left_limit,
                          tree_tow_right_limit])), tree_location)
            for tree_location in tree_locations_in_row
        ])
        slices_and_cumsums.append((tree_row, row_sums_vector))
    vertical_rows_centroids_np = np.float32(
        list(itertools.chain.from_iterable(rotated_centroids))).reshape(
            -1, 1, 2)
    rotation_mat = np.insert(cv2.getRotationMatrix2D(
        (image.shape[1] / 2, image.shape[0] / 2),
        correction_angle * (-1),
        scale=1.0), [2], [0, 0, 1],
                             axis=0)  # TODO: verify coordinates order
    centroids_np = cv2.perspectiveTransform(vertical_rows_centroids_np,
                                            rotation_mat)
    centroids = [tuple(elem) for elem in centroids_np[:, 0, :].tolist()]
    return centroids, rotated_centroids, aisle_centers, slices_and_cumsums
Ejemplo n.º 12
0
    image = cv2.line(image, (int(center_of_mass[0]), int(center_of_mass[1])),
                     (int(p2[0]), int(p2[1])), (255, 255, 0), 7, cv2.LINE_AA)
    return image


if __name__ == '__main__':
    idx = 0
    for image_path in image_paths_list:
        image = cv2.imread(image_path)
        # cropped_image = cv_utils.center_crop(image, 0.25, 0.25)
        cropped_image = cv_utils.crop_region(image,
                                             x_center=image.shape[1] / 2,
                                             y_center=image.shape[0] / 2,
                                             x_pixels=2700,
                                             y_pixels=1700)
        contours, contours_mask = canopy_contours.extract_canopy_contours(
            cropped_image)
        cv2.drawContours(cropped_image,
                         contours,
                         contourIdx=-1,
                         color=(0, 255, 0),
                         thickness=3)
        idx += 1

        all_contours_points = np.concatenate([contour for contour in contours])
        all_contours_points_2d_array = np.empty((len(all_contours_points), 2),
                                                dtype=np.float64)
        for i in range(all_contours_points_2d_array.shape[0]):
            all_contours_points_2d_array[i, 0] = all_contours_points[i, 0, 0]
            all_contours_points_2d_array[i, 1] = all_contours_points[i, 0, 1]

        cropped_image = get_orientation(all_contours_points_2d_array,
    def task(self, **kwargs):

        verbose_mode = kwargs.get('verbose_mode')

        # Read params and data sources
        map_image_path = self.data_sources['map_image_path']
        localization_image_path = self.data_sources['localization_image_path']
        trajectory = self.data_sources['trajectory']
        map_semantic_trunks = self.data_sources['map_semantic_trunks']
        bounding_box_expand_ratio = self.params['bounding_box_expand_ratio']
        roi_size = self.params['roi_size']
        methods = self.params['methods']
        downsample_rate = self.params['downsample_rate']
        localization_resolution = self.params['localization_resolution']
        use_canopies_masks = self.params['use_canopies_masks']

        # Read images
        map_image = cv2.imread(map_image_path)
        localization_image = cv2.imread(localization_image_path)
        upper_left, lower_right = cv_utils.get_bounding_box(
            map_image,
            map_semantic_trunks.values(),
            expand_ratio=bounding_box_expand_ratio)
        map_image = map_image[upper_left[1]:lower_right[1],
                              upper_left[0]:lower_right[0]]
        localization_image = localization_image[upper_left[1]:lower_right[1],
                                                upper_left[0]:lower_right[0]]
        if use_canopies_masks:
            _, map_image = segmentation.extract_canopy_contours(map_image)
            _, localization_image = segmentation.extract_canopy_contours(
                localization_image)
        cv2.imwrite(os.path.join(self.experiment_dir, 'map_image.jpg'),
                    map_image)
        cv2.imwrite(
            os.path.join(self.experiment_dir, 'localization_image.jpg'),
            localization_image)

        # Initialize errors dataframe
        errors = pd.DataFrame(index=map(
            lambda point: '%s_%s' % (point[0], point[1]), trajectory),
                              columns=methods)

        # Loop over points in trajectory
        for ugv_pose_idx, ugv_pose in enumerate(trajectory):
            if ugv_pose_idx % downsample_rate != 0:
                continue
            if ugv_pose_idx % MESSAGING_FREQUENCY == 0:
                _logger.info('At point #%d' % ugv_pose_idx)
            roi_image, _, _ = cv_utils.crop_region(localization_image,
                                                   ugv_pose[0], ugv_pose[1],
                                                   roi_size, roi_size)
            if verbose_mode:
                matches_image = map_image.copy()
                cv2.circle(matches_image,
                           tuple(ugv_pose),
                           radius=15,
                           color=(0, 0, 255),
                           thickness=-1)
                cv2.rectangle(
                    matches_image,
                    (ugv_pose[0] - roi_size / 2, ugv_pose[1] - roi_size / 2),
                    (ugv_pose[0] + roi_size / 2, ugv_pose[1] + roi_size / 2),
                    (0, 0, 255),
                    thickness=2)
            for method in methods:
                matching_result = cv2.matchTemplate(map_image,
                                                    roi_image,
                                                    method=eval('cv2.%s' %
                                                                method))
                min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(
                    matching_result)
                if method in ['TM_SQDIFF', 'TM_SQDIFF_NORMED']:
                    match_top_left = min_loc
                else:
                    match_top_left = max_loc
                match_bottom_right = (match_top_left[0] + roi_image.shape[1],
                                      match_top_left[1] + roi_image.shape[0])
                match_center = (match_top_left[0] + roi_image.shape[1] / 2,
                                match_top_left[1] + roi_image.shape[0] / 2)
                error = np.sqrt((ugv_pose[0] - match_center[0])**2 +
                                (ugv_pose[1] -
                                 match_center[1])**2) * localization_resolution
                errors.loc['%s_%s' % (ugv_pose[0], ugv_pose[1]),
                           method] = error
                if verbose_mode:
                    cv2.rectangle(matches_image,
                                  match_top_left,
                                  match_bottom_right, (255, 0, 0),
                                  thickness=2)
                    cv2.circle(matches_image,
                               match_center,
                               radius=15,
                               color=(255, 0, 0),
                               thickness=-1)
                    cv2.imwrite(
                        os.path.join(
                            self.repetition_dir,
                            'matches_%s_%s.jpg' % (ugv_pose[0], ugv_pose[1])),
                        matches_image)

        # Save results
        errors.to_csv(os.path.join(self.experiment_dir, 'errors.csv'))
Ejemplo n.º 14
0
            points_in_baseline=points_baseline,
            transformation_type='rigid')
        warped_obstacle_in_5_6_gray_image, _ = cv_utils.warp_image(
            image=obstacle_in_5_6_gray_image,
            points_in_image=points_obstacle_in_5_6,
            points_in_baseline=points_baseline,
            transformation_type='rigid')

    # _, baseline_to_obstacle_in_3_4_diff = compare_ssim(baseline_image, warped_obstacle_in_3_4_image, full=True)
    # baseline_to_obstacle_in_3_4_diff = (baseline_to_obstacle_in_3_4_diff * 255).astype('uint8')
    # _, baseline_to_obstacle_in_4_5_diff = compare_ssim(baseline_image, warped_obstacle_in_4_5_image, full=True)
    # baseline_to_obstacle_in_4_5_diff = (baseline_to_obstacle_in_4_5_diff * 255).astype('uint8')
    # _, baseline_to_obstacle_in_5_6_diff = compare_ssim(baseline_image, warped_obstacle_in_5_6_image, full=True)
    # baseline_to_obstacle_in_5_6_diff = (baseline_to_obstacle_in_5_6_diff * 255).astype('uint8')

    _, baseline_contours_mask = segmentation.extract_canopy_contours(
        baseline_image, margin_width=15, margin_color=255)
    baseline_contours_mask = cv2.dilate(baseline_contours_mask,
                                        kernel=np.ones((20, 20), np.uint8),
                                        iterations=1)
    baseline_contours_mask = 1 - (1 / 255.0) * baseline_contours_mask
    _, obstacle_in_3_4_contours_mask = segmentation.extract_canopy_contours(
        warped_obstacle_in_3_4_image, margin_width=15, margin_color=255)
    obstacle_in_3_4_contours_mask = cv2.dilate(obstacle_in_3_4_contours_mask,
                                               kernel=np.ones((20, 20),
                                                              np.uint8),
                                               iterations=1)
    obstacle_in_3_4_contours_mask = 1 - (1 /
                                         255.0) * obstacle_in_3_4_contours_mask
    _, obstacle_in_4_5_contours_mask = segmentation.extract_canopy_contours(
        warped_obstacle_in_4_5_image, margin_width=15, margin_color=255)
    obstacle_in_4_5_contours_mask = cv2.dilate(obstacle_in_4_5_contours_mask,
Ejemplo n.º 15
0
    def task(self, **kwargs):

        image = cv2.imread(self.data_sources['map_image_path'])
        waypoints = self.data_sources['waypoints']
        upper_left = self.data_sources['map_upper_left']
        lower_right = self.data_sources['map_lower_right']

        # Crop the image
        cropped_image = image[upper_left[1]:lower_right[1],
                              upper_left[0]:lower_right[0]]
        waypoints = (np.array(waypoints) - np.array(upper_left)).tolist()

        # Get cost map
        cost_map = maps_generation.generate_cost_map(cropped_image)
        cv2.imwrite(os.path.join(self.repetition_dir, 'cost_map.jpg'),
                    255.0 * cost_map)

        # Plan a path
        path_planner = AstarPathPlanner(cost_map)
        trajectory = []
        for section_start, section_end in zip(waypoints[:-1], waypoints[1:]):
            trajectory += list(
                path_planner.astar(tuple(section_start), tuple(section_end)))

        # Save results
        self.results[self.repetition_id]['trajectory'] = trajectory

        trajectory_on_cost_map_image = cv2.cvtColor(np.uint8(255.0 * cost_map),
                                                    cv2.COLOR_GRAY2BGR)
        trajectory_on_cost_map_image = cv_utils.draw_points_on_image(
            trajectory_on_cost_map_image,
            trajectory,
            color=(0, 255, 255),
            radius=5)
        cv2.imwrite(
            os.path.join(self.repetition_dir, 'trajectory_on_cost_map.jpg'),
            trajectory_on_cost_map_image)
        self.results[
            self.repetition_id]['trajectory_on_cost_map_path'] = os.path.join(
                self.repetition_dir, 'trajectory_on_cost_map.jpg')

        _, trajectory_on_mask_image = segmentation.extract_canopy_contours(
            cropped_image)
        trajectory_on_mask_image = cv2.cvtColor(trajectory_on_mask_image,
                                                cv2.COLOR_GRAY2BGR)
        trajectory_on_mask_image = cv_utils.draw_points_on_image(
            trajectory_on_mask_image,
            trajectory,
            color=(0, 255, 255),
            radius=5)
        cv2.imwrite(
            os.path.join(self.repetition_dir, 'trajectory_on_mask.jpg'),
            trajectory_on_mask_image)
        self.results[
            self.repetition_id]['trajectory_on_mask_path'] = os.path.join(
                self.repetition_dir, 'trajectory_on_mask.jpg')

        trajectory_on_image = cv_utils.draw_points_on_image(cropped_image,
                                                            trajectory,
                                                            color=(0, 255,
                                                                   255),
                                                            radius=5)
        cv2.imwrite(
            os.path.join(self.repetition_dir, 'trajectory_on_image.jpg'),
            trajectory_on_image)
        self.results[
            self.repetition_id]['trajectory_on_image_path'] = os.path.join(
                self.repetition_dir, 'trajectory_on_image.jpg')
Ejemplo n.º 16
0
if setup == 'apr':
    from content.data_pointers.lavi_april_18.dji import trunks_detection_results_dir as td_results_dir
    from content.data_pointers.lavi_april_18.dji import selected_trunks_detection_experiments as selected_td_experiments
elif setup == 'nov1':
    from content.data_pointers.lavi_november_18.dji import trunks_detection_results_dir as td_results_dir
    from content.data_pointers.lavi_november_18.dji import plot1_selected_trunks_detection_experiments as selected_td_experiments
else:
    raise NotImplementedError

if __name__ == '__main__':
    execution_dir = utils.create_new_execution_folder('canopy_contours_drawer')
    with open(os.path.join(td_results_dir, selected_td_experiments[source_image_index], 'experiment_summary.json')) as f:
        td_summary = json.load(f)
    image = cv2.imread(td_summary['data_sources'])
    contours, canopies_mask = segmentation.extract_canopy_contours(image, min_area=min_area)
    image_with_contours = image.copy()
    cv2.drawContours(image_with_contours, contours, contourIdx=-1, color=(0, 255, 0), thickness=5)
    canopies_mask_with_contours = cv2.cvtColor(canopies_mask.copy(), cv2.COLOR_GRAY2BGR)
    cv2.drawContours(canopies_mask_with_contours, contours, contourIdx=-1, color=(0, 255, 0), thickness=5)
    canopies_mask_with_trunks = cv2.cvtColor(canopies_mask.copy(), cv2.COLOR_GRAY2BGR)
    canopies_mask_with_trunks = cv_utils.draw_points_on_image(canopies_mask_with_trunks, td_summary['results']['1']['semantic_trunks'].values(), color=(0, 220, 0))
    canopies_mask_with_labeled_trunks = canopies_mask_with_trunks.copy()
    for trunk_label, trunk_pose in td_summary['results']['1']['semantic_trunks'].items():
        canopies_mask_with_labeled_trunks = cv_utils.put_shaded_text_on_image(canopies_mask_with_labeled_trunks,
                                                                              label=trunk_label,
                                                                              location=trunk_pose,
                                                                              color=(0, 220, 0),
                                                                              offset=(15, 15))

    cv2.imwrite(os.path.join(execution_dir, 'image.jpg'), image)
Ejemplo n.º 17
0
        apr_noon_trunks.values(),
        transformation_type='affine')
    nov_image, _ = cv_utils.warp_image(nov_image,
                                       nov_trunks.values(),
                                       apr_noon_trunks.values(),
                                       transformation_type='affine')
    cv2.imwrite(os.path.join(execution_dir, 'apr_noon.jpg'), apr_noon_image)
    cv2.imwrite(os.path.join(execution_dir, 'apr_late_noon.jpg'),
                apr_late_noon_image)
    cv2.imwrite(os.path.join(execution_dir, 'apr_afternoon.jpg'),
                apr_afternoon_image)
    cv2.imwrite(os.path.join(execution_dir, 'apr_late_afternoon.jpg'),
                apr_late_afternoon_image)
    cv2.imwrite(os.path.join(execution_dir, 'nov.jpg'), nov_image)

    apr_noon_contours, _ = segmentation.extract_canopy_contours(apr_noon_image)
    apr_late_noon_contours, _ = segmentation.extract_canopy_contours(
        apr_late_noon_image)
    apr_afternoon_contours, _ = segmentation.extract_canopy_contours(
        apr_afternoon_image)
    apr_late_afternoon_contours, _ = segmentation.extract_canopy_contours(
        apr_late_afternoon_image)
    nov_contours, _ = segmentation.extract_canopy_contours(nov_image)

    cv2.drawContours(apr_noon_image,
                     apr_noon_contours,
                     contourIdx=-1,
                     color=(0, 255, 0),
                     thickness=3)
    cv2.drawContours(apr_late_noon_image,
                     apr_late_noon_contours,
        # Estimate sigma to one third of intra-row distance
        sigma = grid_dim_y / 3

        # Get a grid of gaussians
        grid = trunks_detection_old_cv.get_grid(grid_dim_x,
                                                grid_dim_y,
                                                translation,
                                                orientation,
                                                shear,
                                                n=N)
        gaussians_filter = trunks_detection_old_cv.get_gaussians_grid_image(
            grid, sigma, cropped_image.shape[1], cropped_image.shape[0])
        if viz_mode:
            viz_utils.show_image('gaussians', gaussians_filter)
            _, contours_mask = segmentation.extract_canopy_contours(
                cropped_image)
            filter_result = np.multiply(gaussians_filter, contours_mask)
            viz_utils.show_image('filter result', filter_result)

        # OPTIMIZATION TODO: improve and arrange
        opt = trunks_detection_old_cv._TrunksGridOptimization(grid_dim_x,
                                                              grid_dim_y,
                                                              translation,
                                                              orientation,
                                                              shear,
                                                              sigma,
                                                              cropped_image,
                                                              n=N)
        nm = NelderMead(opt.target, opt.get_params())
        optimized_grid_args, _ = nm.maximize(n_iter=30)
        optimized_grid_dim_x, optimized_grid_dim_y, optimized_translation_x, optimized_translation_y, optimized_orientation, optimized_shear, optimized_sigma = optimized_grid_args
    def task(self, **kwargs):

        viz_mode = kwargs.get('viz_mode')
        verbose_mode = kwargs.get('verbose')

        # Read image
        image = cv2.imread(self.data_sources)
        cv2.imwrite(os.path.join(self.repetition_dir, 'image.jpg'), image)
        if viz_mode:
            viz_utils.show_image('image', image)

        # Save contours mask
        _, canopies_mask = segmentation.extract_canopy_contours(image)
        cv2.imwrite(os.path.join(self.repetition_dir, 'canopies_mask.jpg'), canopies_mask)

        # Crop central ROI
        cropped_image_size = int(np.min([image.shape[0], image.shape[1]]) * self.params['crop_ratio'])
        cropped_image, crop_origin, _ = cv_utils.crop_region(image, x_center=image.shape[1] / 2, y_center=image.shape[0] / 2,
                                                             x_pixels=cropped_image_size, y_pixels=cropped_image_size)
        _, cropped_canopies_mask = segmentation.extract_canopy_contours(cropped_image)
        crop_square_image = image.copy()
        cv2.rectangle(crop_square_image, crop_origin, (crop_origin[0] + cropped_image_size, crop_origin[1] + cropped_image_size),
                      color=(120, 0, 0), thickness=20)
        cv2.imwrite(os.path.join(self.repetition_dir, 'crop_square_image.jpg'), crop_square_image)
        cv2.imwrite(os.path.join(self.repetition_dir, 'cropped_image.jpg'), cropped_image)
        if viz_mode:
            viz_utils.show_image('cropped image', cropped_image)

        # Estimate orchard orientation
        orientation, angle_to_minima_mean, angle_to_sum_vector = trunks_detection.estimate_rows_orientation(cropped_image)
        rotation_mat = cv2.getRotationMatrix2D((cropped_image.shape[1] / 2, cropped_image.shape[0] / 2), orientation * (-1), scale=1.0)
        vertical_rows_image = cv2.warpAffine(cropped_image, rotation_mat, (cropped_image.shape[1], cropped_image.shape[0]))
        cv2.imwrite(os.path.join(self.repetition_dir, 'vertical_rows.jpg'), vertical_rows_image)
        if verbose_mode:
            angle_to_minima_mean_df = pd.DataFrame(angle_to_minima_mean.values(), index=angle_to_minima_mean.keys(), columns=['minima_mean']).sort_index()
            angle_to_minima_mean_df.to_csv(os.path.join(self.repetition_dir, 'angle_to_minima_mean.csv'))
            self.results[self.repetition_id]['angle_to_minima_mean_path'] = os.path.join(self.repetition_dir, 'angle_to_minima_mean.csv')
            max_sum_value = max(map(lambda vector: vector.max(), angle_to_sum_vector.values()))
            os.mkdir(os.path.join(self.repetition_dir, 'orientation_estimation'))
            for angle in angle_to_sum_vector:
                plt.figure()
                plt.plot(angle_to_sum_vector[angle], color='green')
                plt.xlabel('x')
                plt.ylabel('column sums')
                plt.ylim([(-0.05 * max_sum_value), int(max_sum_value * 1.05)])
                plt.ticklabel_format(axis='y', style='sci', scilimits=(0, 4))
                plt.autoscale(enable=True, axis='x', tight=True)
                plt.tight_layout()
                plt.savefig(os.path.join(self.repetition_dir, 'orientation_estimation', 'sums_vector_%.2f[deg].jpg' % angle))
                rotation_mat = cv2.getRotationMatrix2D((cropped_canopies_mask.shape[1] / 2, cropped_canopies_mask.shape[0] / 2), angle, scale=1.0)
                rotated_canopies_mask = cv2.warpAffine(cropped_canopies_mask, rotation_mat, (cropped_canopies_mask.shape[1], cropped_canopies_mask.shape[0]))
                cv2.imwrite(os.path.join(self.repetition_dir, 'orientation_estimation', 'rotated_canopies_mask_%.2f[deg]_minima_mean=%.2f.jpg'
                                         % (angle, angle_to_minima_mean[angle])), rotated_canopies_mask)
        if viz_mode:
            viz_utils.show_image('vertical rows', vertical_rows_image)

        # Get tree centroids
        centroids, rotated_centroids, aisle_centers, slices_sum_vectors_and_trees, column_sums_vector = trunks_detection.find_tree_centroids(cropped_image, correction_angle=orientation * (-1))
        _, vertical_rows_canopies_mask = segmentation.extract_canopy_contours(vertical_rows_image)
        vertical_rows_aisle_centers_image = cv_utils.draw_lines_on_image(cv2.cvtColor(vertical_rows_canopies_mask, cv2.COLOR_GRAY2BGR),
                                                                         lines_list=[((center, 0), (center, vertical_rows_image.shape[0]))
                                                                         for center in aisle_centers], color=(0, 0, 255))
        slice_image, slice_row_sums_vector, tree_locations_in_row = slices_sum_vectors_and_trees[len(slices_sum_vectors_and_trees) / 2]
        tree_locations = [(slice_image.shape[1] / 2, vertical_location) for vertical_location in tree_locations_in_row]
        slice_image = cv_utils.draw_points_on_image(cv2.cvtColor(slice_image, cv2.COLOR_GRAY2BGR), tree_locations, color=(0, 0, 255))
        cv2.imwrite(os.path.join(self.repetition_dir, 'vertical_rows_aisle_centers.jpg'), vertical_rows_aisle_centers_image)
        plt.figure()
        plt.plot(column_sums_vector, color='green')
        plt.xlabel('x')
        plt.ylabel('column sums')
        plt.ticklabel_format(axis='y', style='sci', scilimits=(0, 4))
        plt.autoscale(enable=True, axis='x', tight=True)
        plt.tight_layout()
        plt.savefig(os.path.join(self.repetition_dir, 'vertical_rows_column_sums.jpg'))
        cv2.imwrite(os.path.join(self.repetition_dir, 'vertical_row_slice.jpg'), slice_image)
        plt.figure(figsize=(4, 5))
        plt.plot(slice_row_sums_vector[::-1], range(len(slice_row_sums_vector)), color='green')
        plt.xlabel('row sums')
        plt.ylabel('y')
        plt.axes().set_aspect(60)
        plt.ticklabel_format(axis='x', style='sci', scilimits=(0, 4))
        plt.autoscale(enable=True, axis='y', tight=True)
        plt.tight_layout()
        plt.savefig(os.path.join(self.repetition_dir, 'slice_row_sums.jpg'))
        vertical_rows_centroids_image = cv_utils.draw_points_on_image(vertical_rows_image, itertools.chain.from_iterable(rotated_centroids), color=(0, 0, 255))
        cv2.imwrite(os.path.join(self.repetition_dir, 'vertical_rows_centroids.jpg'), vertical_rows_centroids_image)
        centroids_image = cv_utils.draw_points_on_image(cropped_image, centroids, color=(0, 0, 255))
        cv2.imwrite(os.path.join(self.repetition_dir, 'centroids.jpg'), centroids_image)
        if viz_mode:
            viz_utils.show_image('vertical rows aisle centers', vertical_rows_aisle_centers_image)
            viz_utils.show_image('vertical rows centroids', vertical_rows_centroids_image)

        # Estimate grid parameters
        grid_dim_x, grid_dim_y = trunks_detection.estimate_grid_dimensions(rotated_centroids)
        shear, drift_vectors, drift_vectors_filtered = trunks_detection.estimate_shear(rotated_centroids)
        drift_vectors_image = cv_utils.draw_lines_on_image(vertical_rows_centroids_image, drift_vectors, color=(255, 255, 0), arrowed=True)
        cv2.imwrite(os.path.join(self.repetition_dir, 'drift_vectors.jpg'), drift_vectors_image)
        drift_vectors_filtered_image = cv_utils.draw_lines_on_image(vertical_rows_centroids_image, drift_vectors_filtered, color=(255, 255, 0), arrowed=True)
        cv2.imwrite(os.path.join(self.repetition_dir, 'drift_vectors_filtered.jpg'), drift_vectors_filtered_image)
        if viz_mode:
            viz_utils.show_image('drift vectors', drift_vectors_filtered_image)

        # Get essential grid
        essential_grid = trunks_detection.get_essential_grid(grid_dim_x, grid_dim_y, shear, orientation, n=self.params['grid_size_for_optimization'])
        essential_grid_shape = np.max(essential_grid, axis=0) - np.min(essential_grid, axis=0)
        margin = essential_grid_shape * 0.2
        essential_grid_shifted = [tuple(elem) for elem in np.array(essential_grid) - np.min(essential_grid, axis=0) + margin / 2]
        estimated_grid_image = np.full((int(essential_grid_shape[1] + margin[1]), int(essential_grid_shape[0] + margin[0]), 3), 255, dtype=np.uint8)
        estimated_grid_image = cv_utils.draw_points_on_image(estimated_grid_image, essential_grid_shifted, color=(255, 90, 0), radius=25)
        cv2.imwrite(os.path.join(self.repetition_dir, 'estimated_grid.png'), estimated_grid_image)
        if viz_mode:
            viz_utils.show_image('estimated grid', estimated_grid_image)

        # Find translation of the grid
        positioned_grid, translation, drift_vectors = trunks_detection.find_min_mse_position(centroids, essential_grid, cropped_image.shape[1], cropped_image.shape[0])
        if positioned_grid is None:
            raise ExperimentFailure
        positioned_grid_image = cv_utils.draw_points_on_image(cropped_image, positioned_grid, color=(255, 90, 0), radius=25)
        cv2.imwrite(os.path.join(self.repetition_dir, 'positioned_grid_only.jpg'), positioned_grid_image)
        positioned_grid_image = cv_utils.draw_points_on_image(positioned_grid_image, centroids, color=(0, 0, 255))
        positioned_grid_image = cv_utils.draw_lines_on_image(positioned_grid_image, drift_vectors, color=(255, 255, 0), thickness=3)
        cv2.imwrite(os.path.join(self.repetition_dir, 'positioned_grid.jpg'), positioned_grid_image)
        if viz_mode:
            viz_utils.show_image('positioned grid', positioned_grid_image)

        # Estimate sigma as a portion of intra-row distance
        sigma = grid_dim_y * self.params['initial_sigma_to_dim_y_ratio']

        # Get a grid of gaussians
        grid = trunks_detection.get_grid(grid_dim_x, grid_dim_y, translation, orientation, shear, n=self.params['grid_size_for_optimization'])
        gaussians_filter = trunks_detection.get_gaussians_grid_image(grid, sigma, cropped_image.shape[1], cropped_image.shape[0])
        cv2.imwrite(os.path.join(self.repetition_dir, 'gaussians_filter.jpg'), 255.0 * gaussians_filter)
        filter_output = np.multiply(gaussians_filter, cropped_canopies_mask)
        cv2.imwrite(os.path.join(self.repetition_dir, 'filter_output.jpg'), filter_output)
        if viz_mode:
            viz_utils.show_image('gaussians filter', gaussians_filter)
            viz_utils.show_image('filter output', filter_output)

        # Optimize the squared grid
        optimized_grid, optimized_grid_args, optimization_steps = trunks_detection.optimize_grid(grid_dim_x, grid_dim_y,
                                                                                                 translation, orientation,
                                                                                                 shear, sigma,
                                                                                                 cropped_image,
                                                                                                 pattern=np.ones([self.params['grid_size_for_optimization'],self.params['grid_size_for_optimization']]))
        optimized_grid_dim_x, optimized_grid_dim_y, optimized_translation_x, optimized_translation_y, optimized_orientation, optimized_shear, optimized_sigma = optimized_grid_args
        self.results[self.repetition_id] = {'optimized_grid_dim_x': optimized_grid_dim_x,
                                            'optimized_grid_dim_y': optimized_grid_dim_y,
                                            'optimized_translation_x': optimized_translation_x,
                                            'optimized_translation_y': optimized_translation_y,
                                            'optimized_orientation': optimized_orientation,
                                            'optimized_shear': optimized_shear,
                                            'optimized_sigma': optimized_sigma}
        optimized_grid_image = cv_utils.draw_points_on_image(cropped_image, optimized_grid, color=(0, 255, 0))
        optimized_grid_image = cv_utils.draw_points_on_image(optimized_grid_image, positioned_grid, color=(255, 90, 0))
        cv2.imwrite(os.path.join(self.repetition_dir, 'optimized_square_grid.jpg'), optimized_grid_image)
        if verbose_mode:
            os.mkdir(os.path.join(self.repetition_dir, 'nelder_mead_steps'))
            self.results[self.repetition_id]['optimization_steps_scores'] = {}
            for step_idx, (step_grid, step_score, step_sigma) in enumerate(optimization_steps):
                self.results[self.repetition_id]['optimization_steps_scores'][step_idx] = step_score
                step_image = cropped_image.copy()
                step_gaussians_filter = trunks_detection.get_gaussians_grid_image(step_grid, step_sigma, cropped_image.shape[1], cropped_image.shape[0])
                step_gaussians_filter = cv2.cvtColor((255.0 * step_gaussians_filter).astype(np.uint8), cv2.COLOR_GRAY2BGR)
                alpha = 0.5
                weighted = cv2.addWeighted(step_image, alpha, step_gaussians_filter, 1 - alpha, gamma=0)
                update_indices = np.where(step_gaussians_filter != 0)
                step_image[update_indices] = weighted[update_indices]
                step_image = cv_utils.draw_points_on_image(step_image, step_grid, color=(0, 255, 0))
                cv2.imwrite(os.path.join(self.repetition_dir, 'nelder_mead_steps', 'optimization_step_%d_[%.2f].jpg' % (step_idx, step_score)), step_image)
        if viz_mode:
            viz_utils.show_image('optimized square grid', optimized_grid_image)

        # Extrapolate full grid on the entire image
        full_grid_np = trunks_detection.extrapolate_full_grid(optimized_grid_dim_x, optimized_grid_dim_y, optimized_orientation, optimized_shear,
                                                              base_grid_origin=np.array(optimized_grid[0]) + np.array(crop_origin),
                                                              image_width=image.shape[1], image_height=image.shape[0])
        full_grid_image = cv_utils.draw_points_on_image(image, [elem for elem in full_grid_np.flatten() if type(elem) is tuple], color=(0, 255, 0))
        cv2.imwrite(os.path.join(self.repetition_dir, 'full_grid.jpg'), full_grid_image)
        if viz_mode:
            viz_utils.show_image('full grid', full_grid_image)

        # Match given orchard pattern to grid
        full_grid_scores_np, full_grid_pose_to_score = trunks_detection.get_grid_scores_array(full_grid_np, image, sigma)
        full_grid_with_scores_image = full_grid_image.copy()
        top_bottom_margin_size = int(0.05 * full_grid_with_scores_image.shape[0])
        left_right_marign_size = int(0.05 * full_grid_with_scores_image.shape[1])
        full_grid_with_scores_image = cv2.copyMakeBorder(full_grid_with_scores_image, top_bottom_margin_size, top_bottom_margin_size,
                                                         left_right_marign_size, left_right_marign_size, cv2.BORDER_CONSTANT,
                                                         dst=None, value=(255, 255, 255))
        for pose, score in full_grid_pose_to_score.items():
            pose = tuple(np.array(pose) + np.array([left_right_marign_size, top_bottom_margin_size]))
            full_grid_with_scores_image = cv_utils.put_shaded_text_on_image(full_grid_with_scores_image, '%.2f' % score,
                                                                            pose, color=(0, 255, 0), offset=(15, 15))
        cv2.imwrite(os.path.join(self.repetition_dir, 'full_grid_with_scores.jpg'), full_grid_with_scores_image)
        orchard_pattern_np = self.params['orchard_pattern']
        pattern_origin, origin_to_sub_scores_array = trunks_detection.fit_pattern_on_grid(full_grid_scores_np, orchard_pattern_np)
        if pattern_origin is None:
            raise ExperimentFailure
        if verbose_mode:
            os.mkdir(os.path.join(self.repetition_dir, 'pattern_matching'))
            for step_origin, step_sub_score_array in origin_to_sub_scores_array.items():
                pattern_matching_image = image.copy()
                step_trunk_coordinates_np = full_grid_np[step_origin[0] : step_origin[0] + orchard_pattern_np.shape[0],
                                                         step_origin[1] : step_origin[1] + orchard_pattern_np.shape[1]]
                step_trunk_points_list = step_trunk_coordinates_np.flatten().tolist()
                pattern_matching_image = cv_utils.draw_points_on_image(pattern_matching_image, step_trunk_points_list, color=(255, 255, 255), radius=25)
                for i in range(step_trunk_coordinates_np.shape[0]):
                    for j in range(step_trunk_coordinates_np.shape[1]):
                        step_trunk_coordinates = (int(step_trunk_coordinates_np[(i, j)][0]), int(step_trunk_coordinates_np[(i, j)][1]))
                        pattern_matching_image = cv_utils.put_shaded_text_on_image(pattern_matching_image, '%.2f' % step_sub_score_array[(i, j)],
                                                                                   step_trunk_coordinates, color=(255, 255, 255), offset=(20, 20))
                pattern_matching_image = cv_utils.draw_points_on_image(pattern_matching_image, [elem for elem in full_grid_np.flatten() if type(elem) is tuple], color=(0, 255, 0))
                mean_score = float(np.mean(step_sub_score_array))
                cv2.imwrite(os.path.join(self.repetition_dir, 'pattern_matching', 'origin=%d_%d_score=%.2f.jpg' %
                                         (step_origin[0], step_origin[1], mean_score)), pattern_matching_image)
        trunk_coordinates_np = full_grid_np[pattern_origin[0] : pattern_origin[0] + orchard_pattern_np.shape[0],
                                            pattern_origin[1] : pattern_origin[1] + orchard_pattern_np.shape[1]]
        trunk_points_list = trunk_coordinates_np[orchard_pattern_np == 1]
        trunk_coordinates_orig_np = trunk_coordinates_np.copy()
        trunk_coordinates_np[orchard_pattern_np != 1] = np.nan
        semantic_trunks_image = cv_utils.draw_points_on_image(image, trunk_points_list, color=(255, 255, 255))
        for i in range(trunk_coordinates_np.shape[0]):
            for j in range(trunk_coordinates_np.shape[1]):
                if np.any(np.isnan(trunk_coordinates_np[(i, j)])):
                    continue
                trunk_coordinates = (int(trunk_coordinates_np[(i, j)][0]), int(trunk_coordinates_np[(i, j)][1]))
                tree_label = '%d/%s' % (j + 1, chr(65 + (trunk_coordinates_np.shape[0] - 1 - i)))
                semantic_trunks_image = cv_utils.put_shaded_text_on_image(semantic_trunks_image, tree_label, trunk_coordinates,
                                                                                  color=(255, 255, 255), offset=(15, 15))
        cv2.imwrite(os.path.join(self.repetition_dir, 'semantic_trunks.jpg'), semantic_trunks_image)
        if viz_mode:
            viz_utils.show_image('semantic trunks', semantic_trunks_image)

        # Refine trunk locations
        refined_trunk_coordinates_np = trunks_detection.refine_trunk_locations(image, trunk_coordinates_np, optimized_sigma,
                                                                               optimized_grid_dim_x, optimized_grid_dim_x)
        confidence = trunks_detection.get_trees_confidence(canopies_mask, refined_trunk_coordinates_np[orchard_pattern_np == 1],
                                                           trunk_coordinates_orig_np[orchard_pattern_np == -1], optimized_sigma)
        refined_trunk_points_list = refined_trunk_coordinates_np[orchard_pattern_np == 1]
        refined_trunk_coordinates_np[orchard_pattern_np != 1] = np.nan
        refined_semantic_trunks_image = cv_utils.draw_points_on_image(image, refined_trunk_points_list, color=(255, 255, 255))
        semantic_trunks = {}
        for i in range(refined_trunk_coordinates_np.shape[0]):
            for j in range(refined_trunk_coordinates_np.shape[1]):
                if np.any(np.isnan(refined_trunk_coordinates_np[(i, j)])):
                    continue
                trunk_coordinates = (int(refined_trunk_coordinates_np[(i, j)][0]), int(refined_trunk_coordinates_np[(i, j)][1]))
                semantic_trunks['%d/%s' % (j + 1, chr(65 + (trunk_coordinates_np.shape[0] - 1 - i)))] = trunk_coordinates
                tree_label = '%d/%s' % (j + 1, chr(65 + (refined_trunk_coordinates_np.shape[0] - 1 - i)))
                refined_semantic_trunks_image = cv_utils.put_shaded_text_on_image(refined_semantic_trunks_image, tree_label, trunk_coordinates,
                                                                                  color=(255, 255, 255), offset=(15, 15))
        tree_scores_stats = trunks_detection.get_tree_scores_stats(canopies_mask, trunk_points_list, optimized_sigma)
        self.results[self.repetition_id]['semantic_trunks'] = semantic_trunks
        self.results[self.repetition_id]['tree_scores_stats'] = tree_scores_stats
        self.results[self.repetition_id]['confidence'] = confidence
        cv2.imwrite(os.path.join(self.repetition_dir, 'refined_semantic_trunks[%.2f].jpg' % confidence), refined_semantic_trunks_image)
        if viz_mode:
            viz_utils.show_image('refined semantic trunks', refined_semantic_trunks_image)
    def task(self, **kwargs):

        # Read images
        image = cv2.imread(self.data_sources['image_path'])
        baseline_image = cv2.imread(self.data_sources['baseline_image_path'])
        _, baseline_canopies_mask = segmentation.extract_canopy_contours(
            baseline_image)
        cv2.imwrite(os.path.join(self.repetition_dir, 'image.jpg'), image)
        cv2.imwrite(os.path.join(self.repetition_dir, 'baseline_image.jpg'),
                    baseline_image)

        # Align images by markers
        marker_locations = self.data_sources['markers_locations']
        baseline_marker_locations = self.data_sources[
            'baseline_markers_locations']
        warped_image_by_markers, _ = cv_utils.warp_image(
            image=image,
            points_in_image=marker_locations,
            points_in_baseline=baseline_marker_locations)
        cv2.imwrite(os.path.join(self.repetition_dir, 'warped by markers.jpg'),
                    warped_image_by_markers)
        _, warped_canopies_mask_by_markers = segmentation.extract_canopy_contours(
            warped_image_by_markers)
        mse = cv_utils.calculate_image_similarity(
            baseline_canopies_mask,
            warped_canopies_mask_by_markers,
            method='mse')
        ssim = cv_utils.calculate_image_similarity(
            baseline_canopies_mask,
            warped_canopies_mask_by_markers,
            method='ssim')
        self.results['by_markers'] = {'mse': mse, 'ssim': ssim}

        # Align images by typical flow
        warped_image_by_orb, orb_matches_image = typical_image_alignment.orb_based_registration(
            image, baseline_image, transformation_type='affine')
        cv2.imwrite(os.path.join(self.repetition_dir, 'warped by orb.jpg'),
                    warped_image_by_orb)
        cv2.imwrite(os.path.join(self.repetition_dir, 'orb matches.jpg'),
                    orb_matches_image)
        _, warped_canopies_mask_by_orb = segmentation.extract_canopy_contours(
            warped_image_by_orb)
        mse = cv_utils.calculate_image_similarity(baseline_canopies_mask,
                                                  warped_canopies_mask_by_orb,
                                                  method='mse')
        ssim = cv_utils.calculate_image_similarity(baseline_canopies_mask,
                                                   warped_canopies_mask_by_orb,
                                                   method='ssim')
        self.results['by_orb'] = {'mse': mse, 'ssim': ssim}

        # Align images by trunks points
        trunks = self.data_sources['trunks_points']
        baseline_trunks = self.data_sources['baseline_trunks_points']
        warped_image_by_trunks, _ = cv_utils.warp_image(
            image=image,
            points_in_image=trunks,
            points_in_baseline=baseline_trunks)
        cv2.imwrite(os.path.join(self.repetition_dir, 'warped by trunks.jpg'),
                    warped_image_by_trunks)
        _, warped_canopies_mask_by_trunks = segmentation.extract_canopy_contours(
            warped_image_by_trunks)
        mse = cv_utils.calculate_image_similarity(
            baseline_canopies_mask,
            warped_canopies_mask_by_trunks,
            method='mse')
        ssim = cv_utils.calculate_image_similarity(
            baseline_canopies_mask,
            warped_canopies_mask_by_trunks,
            method='ssim')
        self.results['by_trunks'] = {'mse': mse, 'ssim': ssim}
    def task(self, **kwargs):

        viz_mode = kwargs.get('viz_mode')
        # Read image
        image = cv2.imread(self.data_sources)
        cv2.imwrite(os.path.join(self.repetition_dir, 'image.jpg'), image)
        if viz_mode:
            viz_utils.show_image('image', image)

        # Save contours mask
        _, contours_mask = segmentation.extract_canopy_contours(image)
        cv2.imwrite(os.path.join(self.repetition_dir, 'contours_mask.jpg'), contours_mask)

        # Crop central ROI
        cropped_image_size = np.min([image.shape[0], image.shape[1]]) * self.params['crop_ratio']
        cropped_image, crop_origin, _ = cv_utils.crop_region(image, x_center=image.shape[1] / 2, y_center=image.shape[0] / 2,
                                                             x_pixels=cropped_image_size, y_pixels=cropped_image_size)
        cv2.imwrite(os.path.join(self.repetition_dir, 'cropped_image.jpg'), cropped_image)
        if viz_mode:
            viz_utils.show_image('cropped image', cropped_image)

        # Estimate orchard orientation
        orientation = trunks_detection_old_cv.estimate_rows_orientation(cropped_image)
        rotation_mat = cv2.getRotationMatrix2D((cropped_image.shape[1] / 2, cropped_image.shape[0] / 2), orientation * (-1), scale=1.0)
        vertical_rows_image = cv2.warpAffine(cropped_image, rotation_mat, (cropped_image.shape[1], cropped_image.shape[0]))
        cv2.imwrite(os.path.join(self.repetition_dir, 'vertical_rows.jpg'), vertical_rows_image)
        if viz_mode:
            viz_utils.show_image('vertical rows', vertical_rows_image)

        # Get tree centroids
        centroids, rotated_centroids, aisle_centers, slices_and_cumsums = trunks_detection_old_cv.find_tree_centroids(cropped_image, correction_angle=orientation * (-1))
        vertical_rows_aisle_centers_image = cv_utils.draw_lines_on_image(vertical_rows_image, lines_list=[((center, 0), (center, vertical_rows_image.shape[0]))
                                                                         for center in aisle_centers], color=(0, 0, 255))
        cv2.imwrite(os.path.join(self.repetition_dir, 'vertical_rows_aisle_centers.jpg'), vertical_rows_aisle_centers_image)
        slice_image, cumsum_vector = slices_and_cumsums[len(slices_and_cumsums) / 2]
        cv2.imwrite(os.path.join(self.repetition_dir, 'vertical_row_slice.jpg'), slice_image)
        fig = plt.figure()
        plt.plot(cumsum_vector)
        plt.savefig(os.path.join(self.repetition_dir, 'cumsum_vector.jpg'))
        vertical_rows_centroids_image = cv_utils.draw_points_on_image(vertical_rows_image, itertools.chain.from_iterable(rotated_centroids), color=(0, 0, 255))
        cv2.imwrite(os.path.join(self.repetition_dir, 'vertical_rows_centroids.jpg'), vertical_rows_centroids_image)
        if viz_mode:
            viz_utils.show_image('vertical rows aisle centers', vertical_rows_aisle_centers_image)
            viz_utils.show_image('vertical rows centroids', vertical_rows_centroids_image)

        # Estimate grid parameters
        grid_dim_x, grid_dim_y = trunks_detection_old_cv.estimate_grid_dimensions(rotated_centroids)
        shear, drift_vectors = trunks_detection_old_cv.estimate_shear(rotated_centroids)
        drift_vectors_image = cv_utils.draw_lines_on_image(vertical_rows_centroids_image, drift_vectors, color=(255, 255, 0))
        cv2.imwrite(os.path.join(self.repetition_dir, 'drift_vectors.jpg'), drift_vectors_image)
        if viz_mode:
            viz_utils.show_image('drift vectors', drift_vectors_image)

        # Get essential grid
        essential_grid = trunks_detection_old_cv.get_essential_grid(grid_dim_x, grid_dim_y, shear, orientation, n=self.params['grid_size_for_optimization'])
        essential_grid_shape = np.max(essential_grid, axis=0) - np.min(essential_grid, axis=0)
        margin = essential_grid_shape * 0.2
        essential_grid_shifted = [tuple(elem) for elem in np.array(essential_grid) - np.min(essential_grid, axis=0) + margin / 2]
        estimated_grid_image = np.full((int(essential_grid_shape[1] + margin[1]), int(essential_grid_shape[0] + margin[0]), 3), 0, dtype=np.uint8)
        estimated_grid_image = cv_utils.draw_points_on_image(estimated_grid_image, essential_grid_shifted, color=(255, 0, 0))
        cv2.imwrite(os.path.join(self.repetition_dir, 'estimated_grid.jpg'), estimated_grid_image)
        if viz_mode:
            viz_utils.show_image('estimated grid', estimated_grid_image)

        # Find translation of the grid
        positioned_grid, translation, drift_vectors = trunks_detection_old_cv.find_min_mse_position(centroids, essential_grid, cropped_image.shape[1], cropped_image.shape[0])
        if positioned_grid is None:
            raise ExperimentFailure
        positioned_grid_image = cv_utils.draw_points_on_image(cropped_image, positioned_grid, color=(255, 0, 0), radius=20)
        positioned_grid_image = cv_utils.draw_points_on_image(positioned_grid_image, centroids, color=(0, 0, 255), radius=10)
        positioned_grid_image = cv_utils.draw_lines_on_image(positioned_grid_image, drift_vectors, color=(255, 255, 0), thickness=3)
        cv2.imwrite(os.path.join(self.repetition_dir, 'positioned_grid.jpg'), positioned_grid_image)
        if viz_mode:
            viz_utils.show_image('positioned grid', positioned_grid_image)

        # Estimate sigma as a portion of intra-row distance
        sigma = grid_dim_y * self.params['initial_sigma_to_dim_y_ratio']

        # Get a grid of gaussians
        grid = trunks_detection_old_cv.get_grid(grid_dim_x, grid_dim_y, translation, orientation, shear, n=self.params['grid_size_for_optimization'])
        gaussians_filter = trunks_detection_old_cv.get_gaussians_grid_image(grid, sigma, cropped_image.shape[1], cropped_image.shape[0])
        cv2.imwrite(os.path.join(self.repetition_dir, 'gaussians_filter.jpg'), 255.0 * gaussians_filter)
        _, contours_mask = segmentation.extract_canopy_contours(cropped_image)
        filter_output = np.multiply(gaussians_filter, contours_mask)
        cv2.imwrite(os.path.join(self.repetition_dir, 'filter_output.jpg'), filter_output)
        if viz_mode:
            viz_utils.show_image('gaussians filter', gaussians_filter)
            viz_utils.show_image('filter output', filter_output)

        # Optimize the grid
        optimized_grid, optimized_grid_args = trunks_detection_old_cv.optimize_grid(grid_dim_x, grid_dim_y, translation, orientation, shear, sigma, cropped_image, n=self.params['grid_size_for_optimization'])
        optimized_grid_dim_x, optimized_grid_dim_y, optimized_translation_x, optimized_translation_y, optimized_orientation, optimized_shear, optimized_sigma = optimized_grid_args
        self.results[self.repetition_id] = {'optimized_grid_dim_x': optimized_grid_dim_x,
                                            'optimized_grid_dim_y': optimized_grid_dim_y,
                                            'optimized_translation_x': optimized_translation_x,
                                            'optimized_translation_y': optimized_translation_y,
                                            'optimized_orientation': optimized_orientation,
                                            'optimized_shear': optimized_shear,
                                            'optimized_sigma': optimized_sigma}
        optimized_grid_image = cv_utils.draw_points_on_image(cropped_image, optimized_grid, color=(0, 255, 0))
        optimized_grid_image = cv_utils.draw_points_on_image(optimized_grid_image, positioned_grid, color=(255, 0, 0))
        cv2.imwrite(os.path.join(self.repetition_dir, 'optimized_grid.jpg'), optimized_grid_image)
        if viz_mode:
            viz_utils.show_image('optimized grid', optimized_grid_image)

        # Extrapolate full grid on the entire image
        full_grid_np = trunks_detection_old_cv.extrapolate_full_grid(optimized_grid_dim_x, optimized_grid_dim_y, optimized_orientation, optimized_shear,
                                                                     base_grid_origin=np.array(optimized_grid[0]) + np.array(crop_origin),
                                                                     image_width=image.shape[1], image_height=image.shape[0])
        full_grid_image = cv_utils.draw_points_on_image(image, [elem for elem in full_grid_np.flatten() if type(elem) is tuple], color=(255, 0, 0))
        cv2.imwrite(os.path.join(self.repetition_dir, 'full_grid.jpg'), full_grid_image)
        if viz_mode:
            viz_utils.show_image('full grid', full_grid_image)


        # Match given orchard pattern to grid
        full_grid_scores_np = trunks_detection_old_cv.get_grid_scores_array(full_grid_np, image, optimized_sigma)
        orchard_pattern_np = self.params['orchard_pattern']
        pattern_origin, pattern_match_score = trunks_detection_old_cv.fit_pattern_on_grid(full_grid_scores_np, orchard_pattern_np)
        if pattern_origin is None:
            raise ExperimentFailure
        self.results[self.repetition_id]['pattern_match_score'] = pattern_match_score
        trunk_coordinates_np = full_grid_np[pattern_origin[0] : pattern_origin[0] + orchard_pattern_np.shape[0],
                                            pattern_origin[1] : pattern_origin[1] + orchard_pattern_np.shape[1]]
        trunk_points_list = trunk_coordinates_np[orchard_pattern_np != -1]
        trunk_coordinates_np[orchard_pattern_np == -1] = np.nan
        semantic_trunks_image = cv_utils.draw_points_on_image(image, trunk_points_list, color=(255, 255, 255))
        for i in range(trunk_coordinates_np.shape[0]):
            for j in range(trunk_coordinates_np.shape[1]):
                if np.any(np.isnan(trunk_coordinates_np[(i, j)])):
                    continue
                label_coordinates = (int(trunk_coordinates_np[(i, j)][0]) + 15, int(trunk_coordinates_np[(i, j)][1]) + 15)
                tree_label = '%d/%s' % (j + 1, chr(65 + (trunk_coordinates_np.shape[0] - 1 - i)))
                cv2.putText(semantic_trunks_image, tree_label, label_coordinates, fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                            fontScale=2, color=(255, 255, 255), thickness=8, lineType=cv2.LINE_AA)
        cv2.imwrite(os.path.join(self.repetition_dir, 'semantic_trunks.jpg'), semantic_trunks_image)
        if viz_mode:
            viz_utils.show_image('semantic trunks', semantic_trunks_image)

        # Refine trunk locations
        refined_trunk_coordinates_np = trunks_detection_old_cv.refine_trunk_locations(image, trunk_coordinates_np, optimized_sigma,
                                                                                      optimized_grid_dim_x, optimized_grid_dim_x)
        refined_trunk_points_list = refined_trunk_coordinates_np[orchard_pattern_np != -1]
        refined_trunk_coordinates_np[orchard_pattern_np == -1] = np.nan
        refined_semantic_trunks_image = cv_utils.draw_points_on_image(image, refined_trunk_points_list, color=(255, 255, 255))
        semantic_trunks = {}
        for i in range(refined_trunk_coordinates_np.shape[0]):
            for j in range(refined_trunk_coordinates_np.shape[1]):
                if np.any(np.isnan(refined_trunk_coordinates_np[(i, j)])):
                    continue
                trunk_coordinates = (int(refined_trunk_coordinates_np[(i, j)][0]), int(refined_trunk_coordinates_np[(i, j)][1]))
                label_coordinates = tuple(np.array(trunk_coordinates) + np.array([15, 15]))
                semantic_trunks['%d/%s' % (j + 1, chr(65 + (trunk_coordinates_np.shape[0] - 1 - i)))] = trunk_coordinates
                tree_label = '%d/%s' % (j + 1, chr(65 + (refined_trunk_coordinates_np.shape[0] - 1 - i)))
                cv2.putText(refined_semantic_trunks_image, tree_label, label_coordinates, fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                            fontScale=2, color=(255, 255, 255), thickness=8, lineType=cv2.LINE_AA)
        cv2.imwrite(os.path.join(self.repetition_dir, 'refined_semantic_trunks.jpg'), refined_semantic_trunks_image)
        self.results[self.repetition_id]['semantic_trunks'] = semantic_trunks
        if viz_mode:
            viz_utils.show_image('refined semantic trunks', refined_semantic_trunks_image)