def plot_trajectory(td_summary, image_path, trajectory, waypoints_coordinates, start_waypoint_idx, output_file_path): semantic_trunks = td_summary['results']['1']['semantic_trunks'] trunk_points_list = semantic_trunks.values() image = cv2.imread(image_path) upper_left, lower_right = cv_utils.get_bounding_box( image, trunk_points_list, expand_ratio=config.bounding_box_expand_ratio) cropped_image = image[upper_left[1]:lower_right[1], upper_left[0]:lower_right[0]] trajectory_image = cv_utils.draw_points_on_image(cropped_image, trajectory, color=(0, 255, 255), radius=5) trajectory_image = cv_utils.draw_points_on_image(trajectory_image, [ tuple(np.array(coordinates) - np.array(upper_left)) for coordinates in waypoints_coordinates ], color=(0, 255, 255), radius=30) label_idx = start_waypoint_idx for coordinates in waypoints_coordinates: trajectory_image = cv_utils.put_shaded_text_on_image( trajectory_image, label=chr(label_idx + 65), location=tuple(np.array(coordinates) - np.array(upper_left)), color=(0, 255, 255), offset=(-40, -70)) label_idx += 1 cv2.imwrite(os.path.join(output_file_path), trajectory_image)
def pose_callback(self, message): self.pose_idx = (self.pose_idx + 1) % downsample_rate if self.pose_idx != 0: return canopies_scan_ranges = self.canopies_timestamp_to_scan[message.header.stamp] canopies_scan_points_list = cv_utils.get_coordinates_list_from_scan_ranges(canopies_scan_ranges, message.point.x, message.point.y, self.min_angle, self.max_angle, self.resolution) viz_image = cv_utils.draw_points_on_image(self.base_viz_image, canopies_scan_points_list, color=(0, 0, 255), radius=3) trunks_scan_ranges = self.trunks_timestamp_to_scan[message.header.stamp] trunks_scan_points_list = cv_utils.get_coordinates_list_from_scan_ranges(trunks_scan_ranges, message.point.x, message.point.y, self.min_angle, self.max_angle, self.resolution) viz_image = cv_utils.draw_points_on_image(viz_image, trunks_scan_points_list, color=(255, 255, 0), radius=3) cv2.circle(viz_image, (int(np.round(message.point.x)), int(np.round(message.point.y))), radius=5, color=(255, 0, 255), thickness=-1) viz_image, _, _ = cv_utils.crop_region(viz_image, message.point.x, message.point.y, self.window_size, self.window_size) image_message = self.bridge.cv2_to_imgmsg(viz_image, encoding='bgr8') self.image_pub.publish(image_message)
def task(self, **kwargs): viz_mode = kwargs.get('viz_mode') # Read image image = cv2.imread(self.data_sources) cv2.imwrite(os.path.join(self.repetition_dir, 'image.jpg'), image) if viz_mode: viz_utils.show_image('image', image) # Save contours mask _, contours_mask = segmentation.extract_canopy_contours(image) cv2.imwrite(os.path.join(self.repetition_dir, 'contours_mask.jpg'), contours_mask) # Crop central ROI cropped_image_size = np.min([image.shape[0], image.shape[1]]) * self.params['crop_ratio'] cropped_image, crop_origin, _ = cv_utils.crop_region(image, x_center=image.shape[1] / 2, y_center=image.shape[0] / 2, x_pixels=cropped_image_size, y_pixels=cropped_image_size) cv2.imwrite(os.path.join(self.repetition_dir, 'cropped_image.jpg'), cropped_image) if viz_mode: viz_utils.show_image('cropped image', cropped_image) # Estimate orchard orientation orientation = trunks_detection_old_cv.estimate_rows_orientation(cropped_image) rotation_mat = cv2.getRotationMatrix2D((cropped_image.shape[1] / 2, cropped_image.shape[0] / 2), orientation * (-1), scale=1.0) vertical_rows_image = cv2.warpAffine(cropped_image, rotation_mat, (cropped_image.shape[1], cropped_image.shape[0])) cv2.imwrite(os.path.join(self.repetition_dir, 'vertical_rows.jpg'), vertical_rows_image) if viz_mode: viz_utils.show_image('vertical rows', vertical_rows_image) # Get tree centroids centroids, rotated_centroids, aisle_centers, slices_and_cumsums = trunks_detection_old_cv.find_tree_centroids(cropped_image, correction_angle=orientation * (-1)) vertical_rows_aisle_centers_image = cv_utils.draw_lines_on_image(vertical_rows_image, lines_list=[((center, 0), (center, vertical_rows_image.shape[0])) for center in aisle_centers], color=(0, 0, 255)) cv2.imwrite(os.path.join(self.repetition_dir, 'vertical_rows_aisle_centers.jpg'), vertical_rows_aisle_centers_image) slice_image, cumsum_vector = slices_and_cumsums[len(slices_and_cumsums) / 2] cv2.imwrite(os.path.join(self.repetition_dir, 'vertical_row_slice.jpg'), slice_image) fig = plt.figure() plt.plot(cumsum_vector) plt.savefig(os.path.join(self.repetition_dir, 'cumsum_vector.jpg')) vertical_rows_centroids_image = cv_utils.draw_points_on_image(vertical_rows_image, itertools.chain.from_iterable(rotated_centroids), color=(0, 0, 255)) cv2.imwrite(os.path.join(self.repetition_dir, 'vertical_rows_centroids.jpg'), vertical_rows_centroids_image) if viz_mode: viz_utils.show_image('vertical rows aisle centers', vertical_rows_aisle_centers_image) viz_utils.show_image('vertical rows centroids', vertical_rows_centroids_image) # Estimate grid parameters grid_dim_x, grid_dim_y = trunks_detection_old_cv.estimate_grid_dimensions(rotated_centroids) shear, drift_vectors = trunks_detection_old_cv.estimate_shear(rotated_centroids) drift_vectors_image = cv_utils.draw_lines_on_image(vertical_rows_centroids_image, drift_vectors, color=(255, 255, 0)) cv2.imwrite(os.path.join(self.repetition_dir, 'drift_vectors.jpg'), drift_vectors_image) if viz_mode: viz_utils.show_image('drift vectors', drift_vectors_image) # Get essential grid essential_grid = trunks_detection_old_cv.get_essential_grid(grid_dim_x, grid_dim_y, shear, orientation, n=self.params['grid_size_for_optimization']) essential_grid_shape = np.max(essential_grid, axis=0) - np.min(essential_grid, axis=0) margin = essential_grid_shape * 0.2 essential_grid_shifted = [tuple(elem) for elem in np.array(essential_grid) - np.min(essential_grid, axis=0) + margin / 2] estimated_grid_image = np.full((int(essential_grid_shape[1] + margin[1]), int(essential_grid_shape[0] + margin[0]), 3), 0, dtype=np.uint8) estimated_grid_image = cv_utils.draw_points_on_image(estimated_grid_image, essential_grid_shifted, color=(255, 0, 0)) cv2.imwrite(os.path.join(self.repetition_dir, 'estimated_grid.jpg'), estimated_grid_image) if viz_mode: viz_utils.show_image('estimated grid', estimated_grid_image) # Find translation of the grid positioned_grid, translation, drift_vectors = trunks_detection_old_cv.find_min_mse_position(centroids, essential_grid, cropped_image.shape[1], cropped_image.shape[0]) if positioned_grid is None: raise ExperimentFailure positioned_grid_image = cv_utils.draw_points_on_image(cropped_image, positioned_grid, color=(255, 0, 0), radius=20) positioned_grid_image = cv_utils.draw_points_on_image(positioned_grid_image, centroids, color=(0, 0, 255), radius=10) positioned_grid_image = cv_utils.draw_lines_on_image(positioned_grid_image, drift_vectors, color=(255, 255, 0), thickness=3) cv2.imwrite(os.path.join(self.repetition_dir, 'positioned_grid.jpg'), positioned_grid_image) if viz_mode: viz_utils.show_image('positioned grid', positioned_grid_image) # Estimate sigma as a portion of intra-row distance sigma = grid_dim_y * self.params['initial_sigma_to_dim_y_ratio'] # Get a grid of gaussians grid = trunks_detection_old_cv.get_grid(grid_dim_x, grid_dim_y, translation, orientation, shear, n=self.params['grid_size_for_optimization']) gaussians_filter = trunks_detection_old_cv.get_gaussians_grid_image(grid, sigma, cropped_image.shape[1], cropped_image.shape[0]) cv2.imwrite(os.path.join(self.repetition_dir, 'gaussians_filter.jpg'), 255.0 * gaussians_filter) _, contours_mask = segmentation.extract_canopy_contours(cropped_image) filter_output = np.multiply(gaussians_filter, contours_mask) cv2.imwrite(os.path.join(self.repetition_dir, 'filter_output.jpg'), filter_output) if viz_mode: viz_utils.show_image('gaussians filter', gaussians_filter) viz_utils.show_image('filter output', filter_output) # Optimize the grid optimized_grid, optimized_grid_args = trunks_detection_old_cv.optimize_grid(grid_dim_x, grid_dim_y, translation, orientation, shear, sigma, cropped_image, n=self.params['grid_size_for_optimization']) optimized_grid_dim_x, optimized_grid_dim_y, optimized_translation_x, optimized_translation_y, optimized_orientation, optimized_shear, optimized_sigma = optimized_grid_args self.results[self.repetition_id] = {'optimized_grid_dim_x': optimized_grid_dim_x, 'optimized_grid_dim_y': optimized_grid_dim_y, 'optimized_translation_x': optimized_translation_x, 'optimized_translation_y': optimized_translation_y, 'optimized_orientation': optimized_orientation, 'optimized_shear': optimized_shear, 'optimized_sigma': optimized_sigma} optimized_grid_image = cv_utils.draw_points_on_image(cropped_image, optimized_grid, color=(0, 255, 0)) optimized_grid_image = cv_utils.draw_points_on_image(optimized_grid_image, positioned_grid, color=(255, 0, 0)) cv2.imwrite(os.path.join(self.repetition_dir, 'optimized_grid.jpg'), optimized_grid_image) if viz_mode: viz_utils.show_image('optimized grid', optimized_grid_image) # Extrapolate full grid on the entire image full_grid_np = trunks_detection_old_cv.extrapolate_full_grid(optimized_grid_dim_x, optimized_grid_dim_y, optimized_orientation, optimized_shear, base_grid_origin=np.array(optimized_grid[0]) + np.array(crop_origin), image_width=image.shape[1], image_height=image.shape[0]) full_grid_image = cv_utils.draw_points_on_image(image, [elem for elem in full_grid_np.flatten() if type(elem) is tuple], color=(255, 0, 0)) cv2.imwrite(os.path.join(self.repetition_dir, 'full_grid.jpg'), full_grid_image) if viz_mode: viz_utils.show_image('full grid', full_grid_image) # Match given orchard pattern to grid full_grid_scores_np = trunks_detection_old_cv.get_grid_scores_array(full_grid_np, image, optimized_sigma) orchard_pattern_np = self.params['orchard_pattern'] pattern_origin, pattern_match_score = trunks_detection_old_cv.fit_pattern_on_grid(full_grid_scores_np, orchard_pattern_np) if pattern_origin is None: raise ExperimentFailure self.results[self.repetition_id]['pattern_match_score'] = pattern_match_score trunk_coordinates_np = full_grid_np[pattern_origin[0] : pattern_origin[0] + orchard_pattern_np.shape[0], pattern_origin[1] : pattern_origin[1] + orchard_pattern_np.shape[1]] trunk_points_list = trunk_coordinates_np[orchard_pattern_np != -1] trunk_coordinates_np[orchard_pattern_np == -1] = np.nan semantic_trunks_image = cv_utils.draw_points_on_image(image, trunk_points_list, color=(255, 255, 255)) for i in range(trunk_coordinates_np.shape[0]): for j in range(trunk_coordinates_np.shape[1]): if np.any(np.isnan(trunk_coordinates_np[(i, j)])): continue label_coordinates = (int(trunk_coordinates_np[(i, j)][0]) + 15, int(trunk_coordinates_np[(i, j)][1]) + 15) tree_label = '%d/%s' % (j + 1, chr(65 + (trunk_coordinates_np.shape[0] - 1 - i))) cv2.putText(semantic_trunks_image, tree_label, label_coordinates, fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=2, color=(255, 255, 255), thickness=8, lineType=cv2.LINE_AA) cv2.imwrite(os.path.join(self.repetition_dir, 'semantic_trunks.jpg'), semantic_trunks_image) if viz_mode: viz_utils.show_image('semantic trunks', semantic_trunks_image) # Refine trunk locations refined_trunk_coordinates_np = trunks_detection_old_cv.refine_trunk_locations(image, trunk_coordinates_np, optimized_sigma, optimized_grid_dim_x, optimized_grid_dim_x) refined_trunk_points_list = refined_trunk_coordinates_np[orchard_pattern_np != -1] refined_trunk_coordinates_np[orchard_pattern_np == -1] = np.nan refined_semantic_trunks_image = cv_utils.draw_points_on_image(image, refined_trunk_points_list, color=(255, 255, 255)) semantic_trunks = {} for i in range(refined_trunk_coordinates_np.shape[0]): for j in range(refined_trunk_coordinates_np.shape[1]): if np.any(np.isnan(refined_trunk_coordinates_np[(i, j)])): continue trunk_coordinates = (int(refined_trunk_coordinates_np[(i, j)][0]), int(refined_trunk_coordinates_np[(i, j)][1])) label_coordinates = tuple(np.array(trunk_coordinates) + np.array([15, 15])) semantic_trunks['%d/%s' % (j + 1, chr(65 + (trunk_coordinates_np.shape[0] - 1 - i)))] = trunk_coordinates tree_label = '%d/%s' % (j + 1, chr(65 + (refined_trunk_coordinates_np.shape[0] - 1 - i))) cv2.putText(refined_semantic_trunks_image, tree_label, label_coordinates, fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=2, color=(255, 255, 255), thickness=8, lineType=cv2.LINE_AA) cv2.imwrite(os.path.join(self.repetition_dir, 'refined_semantic_trunks.jpg'), refined_semantic_trunks_image) self.results[self.repetition_id]['semantic_trunks'] = semantic_trunks if viz_mode: viz_utils.show_image('refined semantic trunks', refined_semantic_trunks_image)
rotation_mat = cv2.getRotationMatrix2D( (cropped_image.shape[1] / 2, cropped_image.shape[0] / 2), orientation * (-1), scale=1.0) # TODO: center point ? (+ coordinates order) vertical_rows_image = cv2.warpAffine( cropped_image, rotation_mat, (cropped_image.shape[1], cropped_image.shape[0])) viz_utils.show_image('vertical rows', vertical_rows_image) # Get tree centroids centroids, rotated_centroids, _, _ = trunks_detection_old_cv.find_tree_centroids( cropped_image, correction_angle=orientation * (-1)) if viz_mode: # TODO: visualize the cum sum graphs vertical_rows_centroids_image = cv_utils.draw_points_on_image( vertical_rows_image, itertools.chain.from_iterable(rotated_centroids), color=(0, 0, 255)) viz_utils.show_image('vertical rows centroids', vertical_rows_centroids_image) # centroids_image = cv_utils.draw_points_on_image(cropped_image, centroids, color=(0, 0, 255)) # viz_utils.show_image('centroids', centroids_image) # Estimate grid parameters grid_dim_x, grid_dim_y = trunks_detection_old_cv.estimate_grid_dimensions( rotated_centroids) shear, drift_vectors = trunks_detection_old_cv.estimate_shear( rotated_centroids) if viz_mode: drift_vectors_image = cv_utils.draw_lines_on_image( vertical_rows_centroids_image, drift_vectors,
}, working_dir=execution_dir, metadata=trunks_detection_summary['metadata']) experiment.run(repetitions=1) selected_trunks = { label: semantic_trunks[label] for label in [start_label_1, start_label_2, goal_label_1, goal_label_2] } trajectory_on_cost_map_image = cv2.imread( experiment.results[1]['trajectory_on_cost_map_path']) trajectory_on_cost_map_image = cv_utils.draw_points_on_image( trajectory_on_cost_map_image, [ np.array(selected_trunks[trunk_label]) - np.array(upper_left) for trunk_label in selected_trunks.keys() ], color=(0, 255, 0), radius=20) trajectory_on_image = cv2.imread( experiment.results[1]['trajectory_on_image_path']) trajectory_on_image = cv_utils.draw_points_on_image( trajectory_on_image, [ np.array(selected_trunks[trunk_label]) - np.array(upper_left) for trunk_label in selected_trunks.keys() ], color=(0, 255, 0), radius=20) for trunk_label in selected_trunks.keys(): label_location = tuple( np.array(semantic_trunks[trunk_label]) - np.array(upper_left))
max_distance=config.synthetic_scan_max_distance, resolution=dummy_resolution, r_primary_search_samples=config. synthetic_scan_r_primary_search_samples, r_secondary_search_step=config.synthetic_scan_r_secondary_search_step) if noise_sigma is not None: noise = np.random.normal(loc=0, scale=noise_sigma, size=len(scan_ranges)) scan_ranges += noise trunks_scan_points_list = cv_utils.get_coordinates_list_from_scan_ranges( scan_ranges, center_x, center_y, config.synthetic_scan_min_angle, config.synthetic_scan_max_angle, dummy_resolution) viz_image = image.copy() viz_image = cv_utils.draw_points_on_image(viz_image, trunks_scan_points_list, color=(0, 0, 255), radius=3) cv2.circle(viz_image, (center_x, center_y), radius=8, color=(255, 0, 255), thickness=-1) if draw_range_circle: cv2.circle(viz_image, (center_x, center_y), radius=config.synthetic_scan_max_distance, color=(120, 0, 0), thickness=2) viz_image, _, _ = cv_utils.crop_region( viz_image, center_x, center_y, config.synthetic_scan_max_distance * roi_expansion, config.synthetic_scan_max_distance * roi_expansion) cv2.imwrite(os.path.join(execution_dir, 'synthetic_scan.jpg'), viz_image)
break if not is_success: continue video_timestamp = cap.get(cv2.CAP_PROP_POS_MSEC) * 1e-3 if video_timestamp not in scans_and_ugv_poses.keys(): continue laser_scan, vehicle_pose_point = scans_and_ugv_poses[video_timestamp] x = vehicle_pose_point.point.x y = vehicle_pose_point.point.y cv2.circle(frame, (x, y), radius=19, color=(255, 0, 255), thickness=4) coordinates_list = cv_utils.get_coordinates_list_from_scan_ranges( laser_scan.ranges, x, y, min_angle=-np.pi, max_angle=np.pi, resolution=resolution) frame = cv_utils.draw_points_on_image(frame, coordinates_list, color=(0, 0, 255), radius=4) out.write(frame) window_name = 'vid' cv2.imshow(window_name, frame) cv2.waitKey(1) # if frame_idx > 3000: # break cap.release() out.release() cv2.destroyAllWindows()
summary['metadata']['image_key'] = image_key summary['metadata']['altitude'] = altitude data_descriptor = snapshots[image_key] summary['data_sources'] = data_descriptor.path image = cv2.imread(data_descriptor.path) for i in range(plot_pattern.shape[0]): for j in range(plot_pattern.shape[1]): if plot_pattern[(i, j)] == -1: continue tree_label = '%d/%s' % (j + 1, chr(65 + (plot_pattern.shape[0] - 1 - i))) trunk_pose = cv_utils.sample_pixel_coordinates(image) summary['results'][0]['semantic_trunks'][ tree_label] = trunk_pose image = cv_utils.draw_points_on_image(image, [trunk_pose], color=(255, 255, 255)) cv2.putText(image, tree_label, trunk_pose, fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=2, color=(255, 255, 255), thickness=8, lineType=cv2.LINE_AA) # typical values summary['results'][0]['optimized_grid_dim_x'] = 270 summary['results'][0]['optimized_grid_dim_y'] = 230 summary['results'][0]['optimized_sigma'] = 80 with open(os.path.join(execution_dir, '%s_trunks.json' % image_key), 'w') as f: json.dump(summary, f, indent=4)
from content.data_pointers.lavi_november_18.dji import trunks_detection_results_dir as td_results_dir from content.data_pointers.lavi_november_18.dji import plot1_selected_trunks_detection_experiments as selected_td_experiments else: raise NotImplementedError if __name__ == '__main__': execution_dir = utils.create_new_execution_folder('canopy_contours_drawer') with open(os.path.join(td_results_dir, selected_td_experiments[source_image_index], 'experiment_summary.json')) as f: td_summary = json.load(f) image = cv2.imread(td_summary['data_sources']) contours, canopies_mask = segmentation.extract_canopy_contours(image, min_area=min_area) image_with_contours = image.copy() cv2.drawContours(image_with_contours, contours, contourIdx=-1, color=(0, 255, 0), thickness=5) canopies_mask_with_contours = cv2.cvtColor(canopies_mask.copy(), cv2.COLOR_GRAY2BGR) cv2.drawContours(canopies_mask_with_contours, contours, contourIdx=-1, color=(0, 255, 0), thickness=5) canopies_mask_with_trunks = cv2.cvtColor(canopies_mask.copy(), cv2.COLOR_GRAY2BGR) canopies_mask_with_trunks = cv_utils.draw_points_on_image(canopies_mask_with_trunks, td_summary['results']['1']['semantic_trunks'].values(), color=(0, 220, 0)) canopies_mask_with_labeled_trunks = canopies_mask_with_trunks.copy() for trunk_label, trunk_pose in td_summary['results']['1']['semantic_trunks'].items(): canopies_mask_with_labeled_trunks = cv_utils.put_shaded_text_on_image(canopies_mask_with_labeled_trunks, label=trunk_label, location=trunk_pose, color=(0, 220, 0), offset=(15, 15)) cv2.imwrite(os.path.join(execution_dir, 'image.jpg'), image) cv2.imwrite(os.path.join(execution_dir, 'canopies_mask.jpg'), canopies_mask) cv2.imwrite(os.path.join(execution_dir, 'image_with_contours.jpg'), image_with_contours) cv2.imwrite(os.path.join(execution_dir, 'canopies_mask_with_contours.jpg'), canopies_mask_with_contours) cv2.imwrite(os.path.join(execution_dir, 'canopies_mask_with_trunks.jpg'), canopies_mask_with_trunks) cv2.imwrite(os.path.join(execution_dir, 'canopies_mask_with_labeled_trunks.jpg'), canopies_mask_with_labeled_trunks)
def task(self, **kwargs): image = cv2.imread(self.data_sources['map_image_path']) waypoints = self.data_sources['waypoints'] upper_left = self.data_sources['map_upper_left'] lower_right = self.data_sources['map_lower_right'] # Crop the image cropped_image = image[upper_left[1]:lower_right[1], upper_left[0]:lower_right[0]] waypoints = (np.array(waypoints) - np.array(upper_left)).tolist() # Get cost map cost_map = maps_generation.generate_cost_map(cropped_image) cv2.imwrite(os.path.join(self.repetition_dir, 'cost_map.jpg'), 255.0 * cost_map) # Plan a path path_planner = AstarPathPlanner(cost_map) trajectory = [] for section_start, section_end in zip(waypoints[:-1], waypoints[1:]): trajectory += list( path_planner.astar(tuple(section_start), tuple(section_end))) # Save results self.results[self.repetition_id]['trajectory'] = trajectory trajectory_on_cost_map_image = cv2.cvtColor(np.uint8(255.0 * cost_map), cv2.COLOR_GRAY2BGR) trajectory_on_cost_map_image = cv_utils.draw_points_on_image( trajectory_on_cost_map_image, trajectory, color=(0, 255, 255), radius=5) cv2.imwrite( os.path.join(self.repetition_dir, 'trajectory_on_cost_map.jpg'), trajectory_on_cost_map_image) self.results[ self.repetition_id]['trajectory_on_cost_map_path'] = os.path.join( self.repetition_dir, 'trajectory_on_cost_map.jpg') _, trajectory_on_mask_image = segmentation.extract_canopy_contours( cropped_image) trajectory_on_mask_image = cv2.cvtColor(trajectory_on_mask_image, cv2.COLOR_GRAY2BGR) trajectory_on_mask_image = cv_utils.draw_points_on_image( trajectory_on_mask_image, trajectory, color=(0, 255, 255), radius=5) cv2.imwrite( os.path.join(self.repetition_dir, 'trajectory_on_mask.jpg'), trajectory_on_mask_image) self.results[ self.repetition_id]['trajectory_on_mask_path'] = os.path.join( self.repetition_dir, 'trajectory_on_mask.jpg') trajectory_on_image = cv_utils.draw_points_on_image(cropped_image, trajectory, color=(0, 255, 255), radius=5) cv2.imwrite( os.path.join(self.repetition_dir, 'trajectory_on_image.jpg'), trajectory_on_image) self.results[ self.repetition_id]['trajectory_on_image_path'] = os.path.join( self.repetition_dir, 'trajectory_on_image.jpg')
def task(self, **kwargs): viz_mode = kwargs.get('viz_mode') verbose_mode = kwargs.get('verbose') # Read image image = cv2.imread(self.data_sources) cv2.imwrite(os.path.join(self.repetition_dir, 'image.jpg'), image) if viz_mode: viz_utils.show_image('image', image) # Save contours mask _, canopies_mask = segmentation.extract_canopy_contours(image) cv2.imwrite(os.path.join(self.repetition_dir, 'canopies_mask.jpg'), canopies_mask) # Crop central ROI cropped_image_size = int(np.min([image.shape[0], image.shape[1]]) * self.params['crop_ratio']) cropped_image, crop_origin, _ = cv_utils.crop_region(image, x_center=image.shape[1] / 2, y_center=image.shape[0] / 2, x_pixels=cropped_image_size, y_pixels=cropped_image_size) _, cropped_canopies_mask = segmentation.extract_canopy_contours(cropped_image) crop_square_image = image.copy() cv2.rectangle(crop_square_image, crop_origin, (crop_origin[0] + cropped_image_size, crop_origin[1] + cropped_image_size), color=(120, 0, 0), thickness=20) cv2.imwrite(os.path.join(self.repetition_dir, 'crop_square_image.jpg'), crop_square_image) cv2.imwrite(os.path.join(self.repetition_dir, 'cropped_image.jpg'), cropped_image) if viz_mode: viz_utils.show_image('cropped image', cropped_image) # Estimate orchard orientation orientation, angle_to_minima_mean, angle_to_sum_vector = trunks_detection.estimate_rows_orientation(cropped_image) rotation_mat = cv2.getRotationMatrix2D((cropped_image.shape[1] / 2, cropped_image.shape[0] / 2), orientation * (-1), scale=1.0) vertical_rows_image = cv2.warpAffine(cropped_image, rotation_mat, (cropped_image.shape[1], cropped_image.shape[0])) cv2.imwrite(os.path.join(self.repetition_dir, 'vertical_rows.jpg'), vertical_rows_image) if verbose_mode: angle_to_minima_mean_df = pd.DataFrame(angle_to_minima_mean.values(), index=angle_to_minima_mean.keys(), columns=['minima_mean']).sort_index() angle_to_minima_mean_df.to_csv(os.path.join(self.repetition_dir, 'angle_to_minima_mean.csv')) self.results[self.repetition_id]['angle_to_minima_mean_path'] = os.path.join(self.repetition_dir, 'angle_to_minima_mean.csv') max_sum_value = max(map(lambda vector: vector.max(), angle_to_sum_vector.values())) os.mkdir(os.path.join(self.repetition_dir, 'orientation_estimation')) for angle in angle_to_sum_vector: plt.figure() plt.plot(angle_to_sum_vector[angle], color='green') plt.xlabel('x') plt.ylabel('column sums') plt.ylim([(-0.05 * max_sum_value), int(max_sum_value * 1.05)]) plt.ticklabel_format(axis='y', style='sci', scilimits=(0, 4)) plt.autoscale(enable=True, axis='x', tight=True) plt.tight_layout() plt.savefig(os.path.join(self.repetition_dir, 'orientation_estimation', 'sums_vector_%.2f[deg].jpg' % angle)) rotation_mat = cv2.getRotationMatrix2D((cropped_canopies_mask.shape[1] / 2, cropped_canopies_mask.shape[0] / 2), angle, scale=1.0) rotated_canopies_mask = cv2.warpAffine(cropped_canopies_mask, rotation_mat, (cropped_canopies_mask.shape[1], cropped_canopies_mask.shape[0])) cv2.imwrite(os.path.join(self.repetition_dir, 'orientation_estimation', 'rotated_canopies_mask_%.2f[deg]_minima_mean=%.2f.jpg' % (angle, angle_to_minima_mean[angle])), rotated_canopies_mask) if viz_mode: viz_utils.show_image('vertical rows', vertical_rows_image) # Get tree centroids centroids, rotated_centroids, aisle_centers, slices_sum_vectors_and_trees, column_sums_vector = trunks_detection.find_tree_centroids(cropped_image, correction_angle=orientation * (-1)) _, vertical_rows_canopies_mask = segmentation.extract_canopy_contours(vertical_rows_image) vertical_rows_aisle_centers_image = cv_utils.draw_lines_on_image(cv2.cvtColor(vertical_rows_canopies_mask, cv2.COLOR_GRAY2BGR), lines_list=[((center, 0), (center, vertical_rows_image.shape[0])) for center in aisle_centers], color=(0, 0, 255)) slice_image, slice_row_sums_vector, tree_locations_in_row = slices_sum_vectors_and_trees[len(slices_sum_vectors_and_trees) / 2] tree_locations = [(slice_image.shape[1] / 2, vertical_location) for vertical_location in tree_locations_in_row] slice_image = cv_utils.draw_points_on_image(cv2.cvtColor(slice_image, cv2.COLOR_GRAY2BGR), tree_locations, color=(0, 0, 255)) cv2.imwrite(os.path.join(self.repetition_dir, 'vertical_rows_aisle_centers.jpg'), vertical_rows_aisle_centers_image) plt.figure() plt.plot(column_sums_vector, color='green') plt.xlabel('x') plt.ylabel('column sums') plt.ticklabel_format(axis='y', style='sci', scilimits=(0, 4)) plt.autoscale(enable=True, axis='x', tight=True) plt.tight_layout() plt.savefig(os.path.join(self.repetition_dir, 'vertical_rows_column_sums.jpg')) cv2.imwrite(os.path.join(self.repetition_dir, 'vertical_row_slice.jpg'), slice_image) plt.figure(figsize=(4, 5)) plt.plot(slice_row_sums_vector[::-1], range(len(slice_row_sums_vector)), color='green') plt.xlabel('row sums') plt.ylabel('y') plt.axes().set_aspect(60) plt.ticklabel_format(axis='x', style='sci', scilimits=(0, 4)) plt.autoscale(enable=True, axis='y', tight=True) plt.tight_layout() plt.savefig(os.path.join(self.repetition_dir, 'slice_row_sums.jpg')) vertical_rows_centroids_image = cv_utils.draw_points_on_image(vertical_rows_image, itertools.chain.from_iterable(rotated_centroids), color=(0, 0, 255)) cv2.imwrite(os.path.join(self.repetition_dir, 'vertical_rows_centroids.jpg'), vertical_rows_centroids_image) centroids_image = cv_utils.draw_points_on_image(cropped_image, centroids, color=(0, 0, 255)) cv2.imwrite(os.path.join(self.repetition_dir, 'centroids.jpg'), centroids_image) if viz_mode: viz_utils.show_image('vertical rows aisle centers', vertical_rows_aisle_centers_image) viz_utils.show_image('vertical rows centroids', vertical_rows_centroids_image) # Estimate grid parameters grid_dim_x, grid_dim_y = trunks_detection.estimate_grid_dimensions(rotated_centroids) shear, drift_vectors, drift_vectors_filtered = trunks_detection.estimate_shear(rotated_centroids) drift_vectors_image = cv_utils.draw_lines_on_image(vertical_rows_centroids_image, drift_vectors, color=(255, 255, 0), arrowed=True) cv2.imwrite(os.path.join(self.repetition_dir, 'drift_vectors.jpg'), drift_vectors_image) drift_vectors_filtered_image = cv_utils.draw_lines_on_image(vertical_rows_centroids_image, drift_vectors_filtered, color=(255, 255, 0), arrowed=True) cv2.imwrite(os.path.join(self.repetition_dir, 'drift_vectors_filtered.jpg'), drift_vectors_filtered_image) if viz_mode: viz_utils.show_image('drift vectors', drift_vectors_filtered_image) # Get essential grid essential_grid = trunks_detection.get_essential_grid(grid_dim_x, grid_dim_y, shear, orientation, n=self.params['grid_size_for_optimization']) essential_grid_shape = np.max(essential_grid, axis=0) - np.min(essential_grid, axis=0) margin = essential_grid_shape * 0.2 essential_grid_shifted = [tuple(elem) for elem in np.array(essential_grid) - np.min(essential_grid, axis=0) + margin / 2] estimated_grid_image = np.full((int(essential_grid_shape[1] + margin[1]), int(essential_grid_shape[0] + margin[0]), 3), 255, dtype=np.uint8) estimated_grid_image = cv_utils.draw_points_on_image(estimated_grid_image, essential_grid_shifted, color=(255, 90, 0), radius=25) cv2.imwrite(os.path.join(self.repetition_dir, 'estimated_grid.png'), estimated_grid_image) if viz_mode: viz_utils.show_image('estimated grid', estimated_grid_image) # Find translation of the grid positioned_grid, translation, drift_vectors = trunks_detection.find_min_mse_position(centroids, essential_grid, cropped_image.shape[1], cropped_image.shape[0]) if positioned_grid is None: raise ExperimentFailure positioned_grid_image = cv_utils.draw_points_on_image(cropped_image, positioned_grid, color=(255, 90, 0), radius=25) cv2.imwrite(os.path.join(self.repetition_dir, 'positioned_grid_only.jpg'), positioned_grid_image) positioned_grid_image = cv_utils.draw_points_on_image(positioned_grid_image, centroids, color=(0, 0, 255)) positioned_grid_image = cv_utils.draw_lines_on_image(positioned_grid_image, drift_vectors, color=(255, 255, 0), thickness=3) cv2.imwrite(os.path.join(self.repetition_dir, 'positioned_grid.jpg'), positioned_grid_image) if viz_mode: viz_utils.show_image('positioned grid', positioned_grid_image) # Estimate sigma as a portion of intra-row distance sigma = grid_dim_y * self.params['initial_sigma_to_dim_y_ratio'] # Get a grid of gaussians grid = trunks_detection.get_grid(grid_dim_x, grid_dim_y, translation, orientation, shear, n=self.params['grid_size_for_optimization']) gaussians_filter = trunks_detection.get_gaussians_grid_image(grid, sigma, cropped_image.shape[1], cropped_image.shape[0]) cv2.imwrite(os.path.join(self.repetition_dir, 'gaussians_filter.jpg'), 255.0 * gaussians_filter) filter_output = np.multiply(gaussians_filter, cropped_canopies_mask) cv2.imwrite(os.path.join(self.repetition_dir, 'filter_output.jpg'), filter_output) if viz_mode: viz_utils.show_image('gaussians filter', gaussians_filter) viz_utils.show_image('filter output', filter_output) # Optimize the squared grid optimized_grid, optimized_grid_args, optimization_steps = trunks_detection.optimize_grid(grid_dim_x, grid_dim_y, translation, orientation, shear, sigma, cropped_image, pattern=np.ones([self.params['grid_size_for_optimization'],self.params['grid_size_for_optimization']])) optimized_grid_dim_x, optimized_grid_dim_y, optimized_translation_x, optimized_translation_y, optimized_orientation, optimized_shear, optimized_sigma = optimized_grid_args self.results[self.repetition_id] = {'optimized_grid_dim_x': optimized_grid_dim_x, 'optimized_grid_dim_y': optimized_grid_dim_y, 'optimized_translation_x': optimized_translation_x, 'optimized_translation_y': optimized_translation_y, 'optimized_orientation': optimized_orientation, 'optimized_shear': optimized_shear, 'optimized_sigma': optimized_sigma} optimized_grid_image = cv_utils.draw_points_on_image(cropped_image, optimized_grid, color=(0, 255, 0)) optimized_grid_image = cv_utils.draw_points_on_image(optimized_grid_image, positioned_grid, color=(255, 90, 0)) cv2.imwrite(os.path.join(self.repetition_dir, 'optimized_square_grid.jpg'), optimized_grid_image) if verbose_mode: os.mkdir(os.path.join(self.repetition_dir, 'nelder_mead_steps')) self.results[self.repetition_id]['optimization_steps_scores'] = {} for step_idx, (step_grid, step_score, step_sigma) in enumerate(optimization_steps): self.results[self.repetition_id]['optimization_steps_scores'][step_idx] = step_score step_image = cropped_image.copy() step_gaussians_filter = trunks_detection.get_gaussians_grid_image(step_grid, step_sigma, cropped_image.shape[1], cropped_image.shape[0]) step_gaussians_filter = cv2.cvtColor((255.0 * step_gaussians_filter).astype(np.uint8), cv2.COLOR_GRAY2BGR) alpha = 0.5 weighted = cv2.addWeighted(step_image, alpha, step_gaussians_filter, 1 - alpha, gamma=0) update_indices = np.where(step_gaussians_filter != 0) step_image[update_indices] = weighted[update_indices] step_image = cv_utils.draw_points_on_image(step_image, step_grid, color=(0, 255, 0)) cv2.imwrite(os.path.join(self.repetition_dir, 'nelder_mead_steps', 'optimization_step_%d_[%.2f].jpg' % (step_idx, step_score)), step_image) if viz_mode: viz_utils.show_image('optimized square grid', optimized_grid_image) # Extrapolate full grid on the entire image full_grid_np = trunks_detection.extrapolate_full_grid(optimized_grid_dim_x, optimized_grid_dim_y, optimized_orientation, optimized_shear, base_grid_origin=np.array(optimized_grid[0]) + np.array(crop_origin), image_width=image.shape[1], image_height=image.shape[0]) full_grid_image = cv_utils.draw_points_on_image(image, [elem for elem in full_grid_np.flatten() if type(elem) is tuple], color=(0, 255, 0)) cv2.imwrite(os.path.join(self.repetition_dir, 'full_grid.jpg'), full_grid_image) if viz_mode: viz_utils.show_image('full grid', full_grid_image) # Match given orchard pattern to grid full_grid_scores_np, full_grid_pose_to_score = trunks_detection.get_grid_scores_array(full_grid_np, image, sigma) full_grid_with_scores_image = full_grid_image.copy() top_bottom_margin_size = int(0.05 * full_grid_with_scores_image.shape[0]) left_right_marign_size = int(0.05 * full_grid_with_scores_image.shape[1]) full_grid_with_scores_image = cv2.copyMakeBorder(full_grid_with_scores_image, top_bottom_margin_size, top_bottom_margin_size, left_right_marign_size, left_right_marign_size, cv2.BORDER_CONSTANT, dst=None, value=(255, 255, 255)) for pose, score in full_grid_pose_to_score.items(): pose = tuple(np.array(pose) + np.array([left_right_marign_size, top_bottom_margin_size])) full_grid_with_scores_image = cv_utils.put_shaded_text_on_image(full_grid_with_scores_image, '%.2f' % score, pose, color=(0, 255, 0), offset=(15, 15)) cv2.imwrite(os.path.join(self.repetition_dir, 'full_grid_with_scores.jpg'), full_grid_with_scores_image) orchard_pattern_np = self.params['orchard_pattern'] pattern_origin, origin_to_sub_scores_array = trunks_detection.fit_pattern_on_grid(full_grid_scores_np, orchard_pattern_np) if pattern_origin is None: raise ExperimentFailure if verbose_mode: os.mkdir(os.path.join(self.repetition_dir, 'pattern_matching')) for step_origin, step_sub_score_array in origin_to_sub_scores_array.items(): pattern_matching_image = image.copy() step_trunk_coordinates_np = full_grid_np[step_origin[0] : step_origin[0] + orchard_pattern_np.shape[0], step_origin[1] : step_origin[1] + orchard_pattern_np.shape[1]] step_trunk_points_list = step_trunk_coordinates_np.flatten().tolist() pattern_matching_image = cv_utils.draw_points_on_image(pattern_matching_image, step_trunk_points_list, color=(255, 255, 255), radius=25) for i in range(step_trunk_coordinates_np.shape[0]): for j in range(step_trunk_coordinates_np.shape[1]): step_trunk_coordinates = (int(step_trunk_coordinates_np[(i, j)][0]), int(step_trunk_coordinates_np[(i, j)][1])) pattern_matching_image = cv_utils.put_shaded_text_on_image(pattern_matching_image, '%.2f' % step_sub_score_array[(i, j)], step_trunk_coordinates, color=(255, 255, 255), offset=(20, 20)) pattern_matching_image = cv_utils.draw_points_on_image(pattern_matching_image, [elem for elem in full_grid_np.flatten() if type(elem) is tuple], color=(0, 255, 0)) mean_score = float(np.mean(step_sub_score_array)) cv2.imwrite(os.path.join(self.repetition_dir, 'pattern_matching', 'origin=%d_%d_score=%.2f.jpg' % (step_origin[0], step_origin[1], mean_score)), pattern_matching_image) trunk_coordinates_np = full_grid_np[pattern_origin[0] : pattern_origin[0] + orchard_pattern_np.shape[0], pattern_origin[1] : pattern_origin[1] + orchard_pattern_np.shape[1]] trunk_points_list = trunk_coordinates_np[orchard_pattern_np == 1] trunk_coordinates_orig_np = trunk_coordinates_np.copy() trunk_coordinates_np[orchard_pattern_np != 1] = np.nan semantic_trunks_image = cv_utils.draw_points_on_image(image, trunk_points_list, color=(255, 255, 255)) for i in range(trunk_coordinates_np.shape[0]): for j in range(trunk_coordinates_np.shape[1]): if np.any(np.isnan(trunk_coordinates_np[(i, j)])): continue trunk_coordinates = (int(trunk_coordinates_np[(i, j)][0]), int(trunk_coordinates_np[(i, j)][1])) tree_label = '%d/%s' % (j + 1, chr(65 + (trunk_coordinates_np.shape[0] - 1 - i))) semantic_trunks_image = cv_utils.put_shaded_text_on_image(semantic_trunks_image, tree_label, trunk_coordinates, color=(255, 255, 255), offset=(15, 15)) cv2.imwrite(os.path.join(self.repetition_dir, 'semantic_trunks.jpg'), semantic_trunks_image) if viz_mode: viz_utils.show_image('semantic trunks', semantic_trunks_image) # Refine trunk locations refined_trunk_coordinates_np = trunks_detection.refine_trunk_locations(image, trunk_coordinates_np, optimized_sigma, optimized_grid_dim_x, optimized_grid_dim_x) confidence = trunks_detection.get_trees_confidence(canopies_mask, refined_trunk_coordinates_np[orchard_pattern_np == 1], trunk_coordinates_orig_np[orchard_pattern_np == -1], optimized_sigma) refined_trunk_points_list = refined_trunk_coordinates_np[orchard_pattern_np == 1] refined_trunk_coordinates_np[orchard_pattern_np != 1] = np.nan refined_semantic_trunks_image = cv_utils.draw_points_on_image(image, refined_trunk_points_list, color=(255, 255, 255)) semantic_trunks = {} for i in range(refined_trunk_coordinates_np.shape[0]): for j in range(refined_trunk_coordinates_np.shape[1]): if np.any(np.isnan(refined_trunk_coordinates_np[(i, j)])): continue trunk_coordinates = (int(refined_trunk_coordinates_np[(i, j)][0]), int(refined_trunk_coordinates_np[(i, j)][1])) semantic_trunks['%d/%s' % (j + 1, chr(65 + (trunk_coordinates_np.shape[0] - 1 - i)))] = trunk_coordinates tree_label = '%d/%s' % (j + 1, chr(65 + (refined_trunk_coordinates_np.shape[0] - 1 - i))) refined_semantic_trunks_image = cv_utils.put_shaded_text_on_image(refined_semantic_trunks_image, tree_label, trunk_coordinates, color=(255, 255, 255), offset=(15, 15)) tree_scores_stats = trunks_detection.get_tree_scores_stats(canopies_mask, trunk_points_list, optimized_sigma) self.results[self.repetition_id]['semantic_trunks'] = semantic_trunks self.results[self.repetition_id]['tree_scores_stats'] = tree_scores_stats self.results[self.repetition_id]['confidence'] = confidence cv2.imwrite(os.path.join(self.repetition_dir, 'refined_semantic_trunks[%.2f].jpg' % confidence), refined_semantic_trunks_image) if viz_mode: viz_utils.show_image('refined semantic trunks', refined_semantic_trunks_image)
trunks, expand_ratio=0.1) cropped_image = image[upper_left[1]:lower_right[1], upper_left[0]:lower_right[0]] trunks = np.array(trunks) - np.array(upper_left) if viz_mode: viz_utils.show_image('cropped image', cropped_image) gaussians = trunks_detection_old_cv.get_gaussians_grid_image( trunks, optimized_sigma, cropped_image.shape[1], cropped_image.shape[0], scale_factor=0.7, square_size_to_sigma_ratio=3, circle_radius_to_sigma_ratio=3) if viz_mode: viz_utils.show_image('gaussians', gaussians) contours, contours_mask = segmentation.extract_canopy_contours( cropped_image) cost_map = cv2.bitwise_and(gaussians, gaussians, mask=contours_mask) cost_map = cv_utils.draw_points_on_image(cost_map, trunks, color=255, radius=20) # cost_map = cv2.drawContours(cost_map, contours, contourIdx=-1, color=255, thickness=3) if viz_mode: viz_utils.show_image('cost_map', cost_map)
################################################################################################# # CONFIG # ################################################################################################# setup = 'nov2' # apr / nov1 / nov2 ################################################################################################# if setup == 'apr': from content.data_pointers.lavi_april_18.dji import trunks_detection_results_dir from content.data_pointers.lavi_april_18.dji import selected_trunks_detection_experiments elif setup == 'nov1': from content.data_pointers.lavi_november_18.dji import trunks_detection_results_dir from content.data_pointers.lavi_november_18.dji import plot1_selected_trunks_detection_experiments as selected_trunks_detection_experiments elif setup == 'nov2': from content.data_pointers.lavi_november_18.dji import trunks_detection_results_dir from content.data_pointers.lavi_november_18.dji import plot2_selected_trunks_detection_experiments as selected_trunks_detection_experiments if __name__ == '__main__': execution_dir = utils.create_new_execution_folder('external_trunks_tagging') for experiment_name in selected_trunks_detection_experiments: with open(os.path.join(trunks_detection_results_dir, experiment_name, 'experiment_summary.json')) as f: experiment_summary = json.load(f) image = cv2.imread(experiment_summary['data_sources']) external_trunk_poses = cv_utils.sample_pixel_coordinates(image, multiple=True) image_with_points = cv_utils.draw_points_on_image(image, external_trunk_poses, color=(255, 255, 255)) viz_utils.show_image('external_trunks', image_with_points) with open(os.path.join(trunks_detection_results_dir, experiment_name, 'external_trunks.json'), 'w') as f: json.dump(external_trunk_poses, f, indent=4)