def approximate_environment(environment_points, step=0.1, min_number_of_approximated_points=30): temp = PointsObject() temp.add_points(environment_points) normals = temp.get_normals() found_shapes = shape_recognition.RANSAC( environment_points, normals, number_of_points_threshold=environment_points.shape[0] * 0.1, number_of_iterations=10, min_pc_number=environment_points.shape[0] * 0.3, number_of_subsets=10, use_planes=True, use_box=False, use_sphere=False, use_cylinder=True, use_cone=False) points = np.empty((0, 3)) normals = np.empty((0, 3)) for s in found_shapes: s = np.round(s / step) * step temp = PointsObject() temp.add_points(s) points = np.append(points, s, axis=0) normals = np.append(normals, temp.get_normals(), axis=0) return points, normals
def check_normals_estimation(): stable_object = download_point_cloud.download_to_object("3d_map/room.pcd") points = stable_object.get_points()[0] normals = stable_object.get_normals() / 100 normals_object = PointsObject(points + normals) visualization.visualize([stable_object, normals_object]) d_x = 0.1 new_points, new_normals, _ = data_generation.reduce_environment_points( stable_object.get_points()[0], stable_object.get_normals(), d_x) new_points_object = PointsObject(new_points, np.full(new_points.shape, 0.3)) new_normals_object = PointsObject(new_points + new_normals / 100) visualization.visualize([new_points_object, new_normals_object])
def temp(): ground_truth_vector = [0, 1, 0] vector_model = PointsObject() vector_model.add_points(np.asarray([ground_truth_vector]), np.asarray([[1, 0, 0]])) vector_model_2 = PointsObject() vector_model_2.add_points(np.asarray([ground_truth_vector])) vector_model.rotate([1, 1, 1], math.radians(60)) normal = vector_model.get_points()[0][0] angle = shape_recognition.angle_between_normals(ground_truth_vector, normal) axis = np.cross(ground_truth_vector, normal) vector_model.rotate(axis, angle) visualization.visualize_object([vector_model, vector_model_2])
def save_point_cloud_from_VREP(): depth_im, rgb_im = save_images_from_VREP("preDiploma_PC/") depth, rgb = image_processing.calculate_point_cloud(rgb_im, depth_im) current_object = PointsObject() current_object.add_points(depth, rgb) current_object.save_all_points("preDiploma_PC/", "box")
def generate_color_shapes(found_points, probabilities): blue = 0.7 hsv = np.ones([probabilities.shape[0], 3]) hsv[:, 0] = blue - probabilities / np.max(probabilities) * blue color = matplotlib.colors.hsv_to_rgb(hsv) object_to_return = PointsObject() object_to_return.add_points(found_points, color) return object_to_return
def save_point_cloud_from_images(): rgb_im = image_processing.load_image("preDiploma_PC/", "rgb_box_0.png") depth_im = image_processing.load_image("preDiploma_PC/", "depth_box_0.png", "depth") points, color = image_processing.calculate_point_cloud( rgb_im / 255, depth_im / 255) current_object = PointsObject() current_object.add_points(points, color) current_object.save_all_points("preDiploma_PC/", "box")
def object_from_point_cloud(path, number_of_points): xyz_points, rgb_points = download_point_cloud.download_ply(path) if number_of_points is not None and number_of_points > xyz_points.shape[ 0]: number_of_points = xyz_points.shape[0] object_points = PointsObject() object_points.set_points(xyz_points, rgb_points, number_of_points) object_points.scale(0.5) # visualization.visualize_object([object_points]) return object_points
def object_from_picture(): mask, depth, rgb = get_moving_mask() object_mask = get_one_object_mask(mask / 255, depth / 255, depth_threshold=0.05, number_of_object=1) xyz_points, rgb_points = image_processing.calculate_point_cloud( rgb / 255, depth * object_mask / 255) pc = PointsObject() pc.set_points(xyz_points, rgb_points) visualization.visualize_object([pc])
def fill_the_shape_part(): # save_points_cloud() ball = PointsObject() ball = download_point_cloud.download_to_object("preDiploma_PC/box.pcd") # visualization.visualize_object([ball]) full_model = ball # full_model = download_point_cloud.download_to_object("models/blue conus.ply", 3000) # full_model.scale(0.1) # full_model.shift([0.01, 0.05, 0.01]) # full_model.rotate([1, 1, 1], math.radians(35)) # # full_model_2 = download_point_cloud.download_to_object("models/orange sphere.ply", 3000) # full_model_2.scale(0.1) # full_model_2.shift([-0.1, -0.1, 0.1]) # full_model.rotate([1, 1, 1], math.radians(60)) # full_model.add_points(full_model_2.get_points()[0], full_model_2.get_points()[1]) # # full_model_2 = download_point_cloud.download_to_object("models/orange sphere.ply", 3000) # full_model_2.scale(0.1) # full_model_2.shift([-0.01, 0.1, 0.3]) # full_model.rotate([1, 0, 1], math.radians(30)) # full_model.add_points(full_model_2.get_points()[0], full_model_2.get_points()[1]) # visualization.visualize_object([full_model]) # temp() # temp_2() start = time.time() found_shapes = shape_recognition.RANSAC(full_model.get_points()[0], full_model.get_normals()) print(time.time() - start) shapes = [full_model] for _, s in enumerate(found_shapes): new_shape = PointsObject() new_shape.add_points( s, np.asarray([[random.random(), random.random(), random.random()]] * s.shape[0])) shapes.append(new_shape) visualization.visualize_object(shapes)
def generate_trajectory(points, trajectory_fun, trajectory_param, ttime): current_points = points.get_points()[0] center = np.mean(current_points, axis=0) shapes_to_return = [] center_trajectory = [] shifts = trajectory_fun(trajectory_param, ttime) for shift in shifts: current_shape = PointsObject() current_shape.add_points(current_points + shift) shapes_to_return.append(current_shape) center_trajectory.append(center + shift) return shapes_to_return, np.asarray(center_trajectory)
def check_data_generation(): data_generation.save_images_from_VREP() depth_im = image_processing.load_image("3d_map/", "room_depth0.png", "depth") rgb_im = image_processing.load_image("3d_map/", "room_rgb0.png") xyz, rgb = image_processing.calculate_point_cloud(rgb_im / 255, depth_im / 255) temp = PointsObject() temp.add_points(xyz, rgb) temp.save_all_points("3d_map/", "room") visualization.visualize([temp])
def check_RANSAC(): ball = download_point_cloud.download_to_object("preDiploma_PC/box.pcd") full_model = ball found_shapes = shape_recognition.RANSAC(full_model.get_points()[0], full_model.get_normals()) shapes = [full_model] for _, s in enumerate(found_shapes): new_shape = PointsObject() new_shape.add_points( s, np.asarray([[random.random(), random.random(), random.random()]] * s.shape[0])) shapes.append(new_shape) visualization.visualize_object(shapes)
def download_to_object(path, number_of_points=None): """Downloading .ply file and making an instance of PointsObject Args: path (str): Path to the .ply file number_of_points (int): number of active points which will be added to object Returns: object (instance of PointsObject) """ from points_object import PointsObject pcd = open3d.io.read_point_cloud(path) object = PointsObject(np.asarray(pcd.points), np.asarray(pcd.colors), camera_position=np.asarray([0, 0, 0]), number=number_of_points) return object
def create_movement_path(object, angles_params, shift_params, observation_time): angles_params = np.flip(angles_params, axis=1) shift_params = np.flip(shift_params, axis=1) number_of_points = observation_time.shape[0] shifts = generate_func(shift_params, observation_time) rotations = generate_func(angles_params, observation_time) points = [] zero_t_points = object.get_points()[0] for i, t in enumerate(observation_time): points.append(PointsObject()) points[-1].add_points(zero_t_points) points[-1].rotate(rotations[i]) points[-1].shift(shifts[i]) return rotations, shifts + object.get_center(), points
def try_vrep_connection(): """Function for checking if vrep_functions and PointsObject are working fine""" client_id = vrep_functions.vrep_connection() vrep_functions.vrep_start_sim(client_id) kinect_rgb_id = vrep_functions.get_object_id(client_id, 'kinect_rgb') kinect_depth_id = vrep_functions.get_object_id(client_id, 'kinect_depth') depth_im, rgb_im = vrep_functions.vrep_get_kinect_images( client_id, kinect_rgb_id, kinect_depth_id) print(depth_im.shape, rgb_im.shape) vrep_functions.vrep_stop_sim(client_id) depth, rgb = vrep_functions.calculate_point_cloud(rgb_im, depth_im, cam_angle, near_clipping_plane, far_clipping_plane) background = PointsObject() background.set_points(depth, rgb, number_of_active_points) # visualization.visualize_object([background]) xyz = np.asarray(background.return_n_last_points(number_of_active_points)) print(xyz[0].shape)
def point_cloud_from_VREP(): import vrep_functions import image_processing """Function for checking if vrep_functions and PointsObject are working fine""" client_id = vrep_functions.vrep_connection() vrep_functions.vrep_start_sim(client_id) kinect_rgb_id = vrep_functions.get_object_id(client_id, 'kinect_rgb') kinect_depth_id = vrep_functions.get_object_id(client_id, 'kinect_depth') depth_im, rgb_im = vrep_functions.vrep_get_kinect_images( client_id, kinect_rgb_id, kinect_depth_id) image_processing.save_image(rgb_im, "preDiploma_PC/", 0, "rgb_box_") image_processing.save_image(depth_im, "preDiploma_PC/", 0, "depth_box_") print(depth_im.shape, rgb_im.shape) vrep_functions.vrep_stop_sim(client_id) depth, rgb = image_processing.calculate_point_cloud(rgb_im, depth_im) current_object = PointsObject() current_object.add_points(depth, rgb) current_object.save_all_points("preDiploma_PC/", "box")
def generate_found_shapes(object, found_centers, probabilities_of_centers, number_of_points=100): found_shapes = [] blue = 0.7 hsv = np.ones([probabilities_of_centers.shape[0], 3]) hsv[:, 0] = blue - probabilities_of_centers / np.max( probabilities_of_centers) * blue rgb = matplotlib.colors.hsv_to_rgb(hsv) center = object.get_center() points = object.get_points()[0] points -= center for f, f_center in enumerate(found_centers): current_shape = PointsObject() current_rgb = np.zeros([points.shape[0], 3]) + rgb[f] current_shape.add_points(points + f_center, current_rgb, number_of_points) found_shapes.append(current_shape) return found_shapes
def objects_test_moving_figures_local(): number_of_comparing_points = 100 classes = {} rgb_im = image_processing.load_image("falling balls and cylinder", "rgb_" + str(0) + ".png") depth_im = image_processing.load_image("falling balls and cylinder", "depth_" + str(0) + ".png", "depth") mog = RGBD_MoG(rgb_im, depth_im, number_of_gaussians=3) for number_of_frame in range(1, 5): color_mask = np.zeros([rgb_im.shape[0], rgb_im.shape[1], 3]) rgb_im = image_processing.load_image( "falling balls and cylinder", "rgb_" + str(number_of_frame) + ".png") depth_im = image_processing.load_image( "falling balls and cylinder", "depth_" + str(number_of_frame) + ".png", "depth") mask = mog.set_mask(rgb_im, depth_im) depth_im = depth_im * (mask / 255).astype(int) masks = region_growing(mask / 255, depth_im / 255, depth_threshold=0.01, significant_number_of_points=10) if len(masks) == 0: print("No moving objects in the frame") else: for mask in masks: xyz_points, rgb_points = image_processing.calculate_point_cloud( rgb_im / 255, depth_im * mask / 255) current_object = PointsObject() current_object.set_points(xyz_points, rgb_points) norms = current_object.get_normals() compared_object_descriptor = CovarianceDescriptor( xyz_points, rgb_points, norms, k_nearest_neighbours=None, relevant_distance=0.1, use_alpha=True, use_beta=True, use_ro=True, use_theta=True, use_psi=True, use_rgb=True) match_found = False lengths = np.zeros([ len(classes), np.amin( [number_of_comparing_points, xyz_points.shape[0]]) ]) if number_of_frame == 1: match_found = False else: match_found = True for object_number, object_class in enumerate(classes): lengths[ object_number] = object_class.compare_descriptors( compared_object_descriptor. object_descriptor, number_of_comparing_points) print(np.sum(mask)) min_args = np.argmin( lengths, axis=0)[np.amin(lengths, axis=0) < 0.1] unique, counts = np.unique(min_args, return_counts=True) best_match = unique[np.argmax(counts)] for object_number, object_class in enumerate(classes): if object_number == best_match: color_mask[:, :, 0] += mask * classes[object_class][0] color_mask[:, :, 1] += mask * classes[object_class][1] color_mask[:, :, 2] += mask * classes[object_class][2] if not match_found: classes[compared_object_descriptor] = np.random.rand(3) color_mask[:, :, 0] += mask * classes[ compared_object_descriptor][0] color_mask[:, :, 1] += mask * classes[ compared_object_descriptor][1] color_mask[:, :, 2] += mask * classes[ compared_object_descriptor][2] image_processing.save_image(color_mask, "tracking_results", frame_number=number_of_frame, image_name="local_two_same")
def objects_test_moving_figures_global(): classes = {} rgb_im = image_processing.load_image("falling balls and cylinder", "rgb_" + str(0) + ".png") depth_im = image_processing.load_image("falling balls and cylinder", "depth_" + str(0) + ".png", "depth") mog = RGBD_MoG(rgb_im, depth_im, number_of_gaussians=3) for number_of_frame in range(1, 5): color_mask = np.zeros([rgb_im.shape[0], rgb_im.shape[1], 3]) rgb_im = image_processing.load_image( "falling balls and cylinder", "rgb_" + str(number_of_frame) + ".png") depth_im = image_processing.load_image( "falling balls and cylinder", "depth_" + str(number_of_frame) + ".png", "depth") mask = mog.set_mask(rgb_im, depth_im) depth_im = depth_im * (mask / 255).astype(int) masks = region_growing(mask / 255, depth_im / 255, depth_threshold=0.01, significant_number_of_points=10) if len(masks) == 0: print("No moving objects in the frame") else: for mask in masks: xyz_points, rgb_points = image_processing.calculate_point_cloud( rgb_im / 255, depth_im * mask / 255) current_object = PointsObject() current_object.set_points(xyz_points, rgb_points) norms = current_object.get_normals() compared_object_descriptor = GlobalCovarianceDescriptor( xyz_points, rgb_points, norms, depth_im, rgb_im, mask, use_xyz=True, use_rgb=True, use_normals=True) match_found = False lengths = np.zeros([len(classes)]) if number_of_frame == 1: match_found = False else: match_found = True for object_number, object_class in enumerate(classes): lengths[ object_number] = object_class.compare_descriptors( compared_object_descriptor. object_descriptor) min_arg = np.argmin(lengths) print(lengths) for object_number, object_class in enumerate(classes): if object_number == min_arg: color_mask[:, :, 0] += mask * classes[object_class][0] color_mask[:, :, 1] += mask * classes[object_class][1] color_mask[:, :, 2] += mask * classes[object_class][2] if not match_found: classes[compared_object_descriptor] = np.random.rand(3) color_mask[:, :, 0] += mask * classes[ compared_object_descriptor][0] color_mask[:, :, 1] += mask * classes[ compared_object_descriptor][1] color_mask[:, :, 2] += mask * classes[ compared_object_descriptor][2] image_processing.save_image(color_mask, "tracking_results", frame_number=number_of_frame, image_name="global_two_same")
def observation_momental(): # load the model stable_object = download_point_cloud.download_to_object( "models/grey plane.ply", 3000) stable_object.scale(0.3) stable_object.rotate([90, 0, 0]) falling_object = download_point_cloud.download_to_object( "models/red cube.ply", 3000) falling_object.scale(0.3) falling_object.shift([0, 3, 0]) shapes = [falling_object] center = falling_object.get_center() # temp_1() # generate observation data rotation_params = np.asarray([[0, 70], [0, 50], [0, 80]]) moving_params = np.asarray([[0, 0.1, -0.3], [0, -1.5, 0.1], [0, 1, 0]]) observation_step_time = 0.2 number_of_observations = 5 observation_moments = np.arange( 0, round(number_of_observations * observation_step_time, 3), observation_step_time) rotation_angles_gt, center_position_gt, moving_objects = create_movement_path( falling_object, rotation_params, moving_params, observation_moments) for i, m in enumerate(moving_objects): found_shapes = shape_recognition.RANSAC(m.get_points()[0], m.get_normals()) moving_objects[i].set_points(found_shapes[-1]) found_rotation, found_center_positions = find_observations( moving_objects, falling_object.get_center()) print(center_position_gt) print(found_center_positions) shapes = [] shapes += moving_objects # visualization.visualize(shapes) # find functions for xyz trajectory start = time.time() trajectory_functions_x = moving_prediction.find_functions( observation_moments, found_center_positions[:, 0]) trajectory_functions_y = moving_prediction.find_functions( observation_moments, found_center_positions[:, 1]) trajectory_functions_z = moving_prediction.find_functions( observation_moments, found_center_positions[:, 2]) angle_functions_x = moving_prediction.find_functions( observation_moments, found_rotation[:, 0]) angle_functions_y = moving_prediction.find_functions( observation_moments, found_rotation[:, 1]) angle_functions_z = moving_prediction.find_functions( observation_moments, found_rotation[:, 2]) print(time.time() - start) future_time = np.arange( 0, round(number_of_observations * observation_step_time * 6, 3), observation_step_time) future_angles_gt, future_center_gt, _ = create_movement_path( falling_object, rotation_params, moving_params, future_time) visualization.show_found_functions(trajectory_functions_x, observation_moments, found_center_positions[:, 0], future_time, future_center_gt[:, 0], 't, s', 'x, m', 'x coordinate of center') visualization.show_found_functions(trajectory_functions_y, observation_moments, found_center_positions[:, 1], future_time, future_center_gt[:, 1], 't, s', 'y, m', 'y coordinate of center') visualization.show_found_functions(trajectory_functions_z, observation_moments, found_center_positions[:, 2], future_time, future_center_gt[:, 2], 't, s', 'z, m', 'z coordinate of center') visualization.show_found_functions(angle_functions_x, observation_moments, found_rotation[:, 0], future_time, future_angles_gt[:, 0], 't, s', 'angle, deg', 'x axis angle') visualization.show_found_functions(angle_functions_y, observation_moments, found_rotation[:, 1], future_time, future_angles_gt[:, 1], 't, s', 'angle, deg', 'y axis angle') visualization.show_found_functions(angle_functions_z, observation_moments, found_rotation[:, 2], future_time, future_angles_gt[:, 2], 't, s', 'angle, deg', 'z axis angle') # prediction part time_of_probability = 2. d_x = 0.1 d_angle = 1 threshold_p = 0.5 prob_x, x = moving_prediction.probability_of_being_in_point( trajectory_functions_x, time_of_probability, d_x, True) prob_y, y = moving_prediction.probability_of_being_in_point( trajectory_functions_y, time_of_probability, d_x, True) prob_z, z = moving_prediction.probability_of_being_in_point( trajectory_functions_z, time_of_probability, d_x, True) prob_x_angle, x_angle = moving_prediction.probability_of_being_in_point( angle_functions_x, time_of_probability, d_angle, True) prob_y_angle, y_angle = moving_prediction.probability_of_being_in_point( angle_functions_y, time_of_probability, d_angle, True) prob_z_angle, z_angle = moving_prediction.probability_of_being_in_point( angle_functions_z, time_of_probability, d_angle, True) prediction_object = download_point_cloud.download_to_object( "models/red cube.ply", 3000) prediction_object.scale(0.3) prediction_points = prediction_object.get_points()[0] xyz_dict = moving_prediction.get_xyz_probabilities_from_angles_probabilities( prediction_points, x_angle, prob_x_angle, y_angle, prob_y_angle, z_angle, prob_z_angle, d_x, threshold_p) points, probabilities = moving_prediction.probability_of_all_points( xyz_dict, prob_x, x, prob_y, y, prob_z, z, threshold_p) if xyz_dict == -1: print("всё сломалось") sys.exit(0) high_probabilities = np.where(probabilities >= threshold_p, True, False) high_probable_points, high_probable_points_probabilities = points[ high_probabilities], probabilities[high_probabilities] shapes.append( generate_color_shapes(high_probable_points, high_probable_points_probabilities)) # generate ground truth observation_moment = np.asarray([time_of_probability]) _, _, moving_objects = create_movement_path(falling_object, rotation_params, moving_params, observation_moment) points = moving_objects[0].get_points()[0] gt_object = PointsObject() gt_object.add_points(points, falling_object.get_points()[1]) shapes += [gt_object] visualization.visualize_object(shapes) get_histogram(high_probable_points, high_probable_points_probabilities)
def save_points_cloud(): points_cloud, points_color = create_points_cloud() object = PointsObject() object.set_points(points_cloud, points_color) object.save_all_points("Test", "ball")