def approximate_environment(environment_points, step=0.1, min_number_of_approximated_points=30): temp = PointsObject() temp.add_points(environment_points) normals = temp.get_normals() found_shapes = shape_recognition.RANSAC( environment_points, normals, number_of_points_threshold=environment_points.shape[0] * 0.1, number_of_iterations=10, min_pc_number=environment_points.shape[0] * 0.3, number_of_subsets=10, use_planes=True, use_box=False, use_sphere=False, use_cylinder=True, use_cone=False) points = np.empty((0, 3)) normals = np.empty((0, 3)) for s in found_shapes: s = np.round(s / step) * step temp = PointsObject() temp.add_points(s) points = np.append(points, s, axis=0) normals = np.append(normals, temp.get_normals(), axis=0) return points, normals
def generate_color_shapes(found_points, probabilities): blue = 0.7 hsv = np.ones([probabilities.shape[0], 3]) hsv[:, 0] = blue - probabilities / np.max(probabilities) * blue color = matplotlib.colors.hsv_to_rgb(hsv) object_to_return = PointsObject() object_to_return.add_points(found_points, color) return object_to_return
def save_point_cloud_from_VREP(): depth_im, rgb_im = save_images_from_VREP("preDiploma_PC/") depth, rgb = image_processing.calculate_point_cloud(rgb_im, depth_im) current_object = PointsObject() current_object.add_points(depth, rgb) current_object.save_all_points("preDiploma_PC/", "box")
def save_point_cloud_from_images(): rgb_im = image_processing.load_image("preDiploma_PC/", "rgb_box_0.png") depth_im = image_processing.load_image("preDiploma_PC/", "depth_box_0.png", "depth") points, color = image_processing.calculate_point_cloud( rgb_im / 255, depth_im / 255) current_object = PointsObject() current_object.add_points(points, color) current_object.save_all_points("preDiploma_PC/", "box")
def generate_trajectory(points, trajectory_fun, trajectory_param, ttime): current_points = points.get_points()[0] center = np.mean(current_points, axis=0) shapes_to_return = [] center_trajectory = [] shifts = trajectory_fun(trajectory_param, ttime) for shift in shifts: current_shape = PointsObject() current_shape.add_points(current_points + shift) shapes_to_return.append(current_shape) center_trajectory.append(center + shift) return shapes_to_return, np.asarray(center_trajectory)
def check_data_generation(): data_generation.save_images_from_VREP() depth_im = image_processing.load_image("3d_map/", "room_depth0.png", "depth") rgb_im = image_processing.load_image("3d_map/", "room_rgb0.png") xyz, rgb = image_processing.calculate_point_cloud(rgb_im / 255, depth_im / 255) temp = PointsObject() temp.add_points(xyz, rgb) temp.save_all_points("3d_map/", "room") visualization.visualize([temp])
def temp(): ground_truth_vector = [0, 1, 0] vector_model = PointsObject() vector_model.add_points(np.asarray([ground_truth_vector]), np.asarray([[1, 0, 0]])) vector_model_2 = PointsObject() vector_model_2.add_points(np.asarray([ground_truth_vector])) vector_model.rotate([1, 1, 1], math.radians(60)) normal = vector_model.get_points()[0][0] angle = shape_recognition.angle_between_normals(ground_truth_vector, normal) axis = np.cross(ground_truth_vector, normal) vector_model.rotate(axis, angle) visualization.visualize_object([vector_model, vector_model_2])
def check_RANSAC(): ball = download_point_cloud.download_to_object("preDiploma_PC/box.pcd") full_model = ball found_shapes = shape_recognition.RANSAC(full_model.get_points()[0], full_model.get_normals()) shapes = [full_model] for _, s in enumerate(found_shapes): new_shape = PointsObject() new_shape.add_points( s, np.asarray([[random.random(), random.random(), random.random()]] * s.shape[0])) shapes.append(new_shape) visualization.visualize_object(shapes)
def fill_the_shape_part(): # save_points_cloud() ball = PointsObject() ball = download_point_cloud.download_to_object("preDiploma_PC/box.pcd") # visualization.visualize_object([ball]) full_model = ball # full_model = download_point_cloud.download_to_object("models/blue conus.ply", 3000) # full_model.scale(0.1) # full_model.shift([0.01, 0.05, 0.01]) # full_model.rotate([1, 1, 1], math.radians(35)) # # full_model_2 = download_point_cloud.download_to_object("models/orange sphere.ply", 3000) # full_model_2.scale(0.1) # full_model_2.shift([-0.1, -0.1, 0.1]) # full_model.rotate([1, 1, 1], math.radians(60)) # full_model.add_points(full_model_2.get_points()[0], full_model_2.get_points()[1]) # # full_model_2 = download_point_cloud.download_to_object("models/orange sphere.ply", 3000) # full_model_2.scale(0.1) # full_model_2.shift([-0.01, 0.1, 0.3]) # full_model.rotate([1, 0, 1], math.radians(30)) # full_model.add_points(full_model_2.get_points()[0], full_model_2.get_points()[1]) # visualization.visualize_object([full_model]) # temp() # temp_2() start = time.time() found_shapes = shape_recognition.RANSAC(full_model.get_points()[0], full_model.get_normals()) print(time.time() - start) shapes = [full_model] for _, s in enumerate(found_shapes): new_shape = PointsObject() new_shape.add_points( s, np.asarray([[random.random(), random.random(), random.random()]] * s.shape[0])) shapes.append(new_shape) visualization.visualize_object(shapes)
def point_cloud_from_VREP(): import vrep_functions import image_processing """Function for checking if vrep_functions and PointsObject are working fine""" client_id = vrep_functions.vrep_connection() vrep_functions.vrep_start_sim(client_id) kinect_rgb_id = vrep_functions.get_object_id(client_id, 'kinect_rgb') kinect_depth_id = vrep_functions.get_object_id(client_id, 'kinect_depth') depth_im, rgb_im = vrep_functions.vrep_get_kinect_images( client_id, kinect_rgb_id, kinect_depth_id) image_processing.save_image(rgb_im, "preDiploma_PC/", 0, "rgb_box_") image_processing.save_image(depth_im, "preDiploma_PC/", 0, "depth_box_") print(depth_im.shape, rgb_im.shape) vrep_functions.vrep_stop_sim(client_id) depth, rgb = image_processing.calculate_point_cloud(rgb_im, depth_im) current_object = PointsObject() current_object.add_points(depth, rgb) current_object.save_all_points("preDiploma_PC/", "box")
def generate_found_shapes(object, found_centers, probabilities_of_centers, number_of_points=100): found_shapes = [] blue = 0.7 hsv = np.ones([probabilities_of_centers.shape[0], 3]) hsv[:, 0] = blue - probabilities_of_centers / np.max( probabilities_of_centers) * blue rgb = matplotlib.colors.hsv_to_rgb(hsv) center = object.get_center() points = object.get_points()[0] points -= center for f, f_center in enumerate(found_centers): current_shape = PointsObject() current_rgb = np.zeros([points.shape[0], 3]) + rgb[f] current_shape.add_points(points + f_center, current_rgb, number_of_points) found_shapes.append(current_shape) return found_shapes
def observation_momental(): # load the model stable_object = download_point_cloud.download_to_object( "models/grey plane.ply", 3000) stable_object.scale(0.3) stable_object.rotate([90, 0, 0]) falling_object = download_point_cloud.download_to_object( "models/red cube.ply", 3000) falling_object.scale(0.3) falling_object.shift([0, 3, 0]) shapes = [falling_object] center = falling_object.get_center() # temp_1() # generate observation data rotation_params = np.asarray([[0, 70], [0, 50], [0, 80]]) moving_params = np.asarray([[0, 0.1, -0.3], [0, -1.5, 0.1], [0, 1, 0]]) observation_step_time = 0.2 number_of_observations = 5 observation_moments = np.arange( 0, round(number_of_observations * observation_step_time, 3), observation_step_time) rotation_angles_gt, center_position_gt, moving_objects = create_movement_path( falling_object, rotation_params, moving_params, observation_moments) for i, m in enumerate(moving_objects): found_shapes = shape_recognition.RANSAC(m.get_points()[0], m.get_normals()) moving_objects[i].set_points(found_shapes[-1]) found_rotation, found_center_positions = find_observations( moving_objects, falling_object.get_center()) print(center_position_gt) print(found_center_positions) shapes = [] shapes += moving_objects # visualization.visualize(shapes) # find functions for xyz trajectory start = time.time() trajectory_functions_x = moving_prediction.find_functions( observation_moments, found_center_positions[:, 0]) trajectory_functions_y = moving_prediction.find_functions( observation_moments, found_center_positions[:, 1]) trajectory_functions_z = moving_prediction.find_functions( observation_moments, found_center_positions[:, 2]) angle_functions_x = moving_prediction.find_functions( observation_moments, found_rotation[:, 0]) angle_functions_y = moving_prediction.find_functions( observation_moments, found_rotation[:, 1]) angle_functions_z = moving_prediction.find_functions( observation_moments, found_rotation[:, 2]) print(time.time() - start) future_time = np.arange( 0, round(number_of_observations * observation_step_time * 6, 3), observation_step_time) future_angles_gt, future_center_gt, _ = create_movement_path( falling_object, rotation_params, moving_params, future_time) visualization.show_found_functions(trajectory_functions_x, observation_moments, found_center_positions[:, 0], future_time, future_center_gt[:, 0], 't, s', 'x, m', 'x coordinate of center') visualization.show_found_functions(trajectory_functions_y, observation_moments, found_center_positions[:, 1], future_time, future_center_gt[:, 1], 't, s', 'y, m', 'y coordinate of center') visualization.show_found_functions(trajectory_functions_z, observation_moments, found_center_positions[:, 2], future_time, future_center_gt[:, 2], 't, s', 'z, m', 'z coordinate of center') visualization.show_found_functions(angle_functions_x, observation_moments, found_rotation[:, 0], future_time, future_angles_gt[:, 0], 't, s', 'angle, deg', 'x axis angle') visualization.show_found_functions(angle_functions_y, observation_moments, found_rotation[:, 1], future_time, future_angles_gt[:, 1], 't, s', 'angle, deg', 'y axis angle') visualization.show_found_functions(angle_functions_z, observation_moments, found_rotation[:, 2], future_time, future_angles_gt[:, 2], 't, s', 'angle, deg', 'z axis angle') # prediction part time_of_probability = 2. d_x = 0.1 d_angle = 1 threshold_p = 0.5 prob_x, x = moving_prediction.probability_of_being_in_point( trajectory_functions_x, time_of_probability, d_x, True) prob_y, y = moving_prediction.probability_of_being_in_point( trajectory_functions_y, time_of_probability, d_x, True) prob_z, z = moving_prediction.probability_of_being_in_point( trajectory_functions_z, time_of_probability, d_x, True) prob_x_angle, x_angle = moving_prediction.probability_of_being_in_point( angle_functions_x, time_of_probability, d_angle, True) prob_y_angle, y_angle = moving_prediction.probability_of_being_in_point( angle_functions_y, time_of_probability, d_angle, True) prob_z_angle, z_angle = moving_prediction.probability_of_being_in_point( angle_functions_z, time_of_probability, d_angle, True) prediction_object = download_point_cloud.download_to_object( "models/red cube.ply", 3000) prediction_object.scale(0.3) prediction_points = prediction_object.get_points()[0] xyz_dict = moving_prediction.get_xyz_probabilities_from_angles_probabilities( prediction_points, x_angle, prob_x_angle, y_angle, prob_y_angle, z_angle, prob_z_angle, d_x, threshold_p) points, probabilities = moving_prediction.probability_of_all_points( xyz_dict, prob_x, x, prob_y, y, prob_z, z, threshold_p) if xyz_dict == -1: print("всё сломалось") sys.exit(0) high_probabilities = np.where(probabilities >= threshold_p, True, False) high_probable_points, high_probable_points_probabilities = points[ high_probabilities], probabilities[high_probabilities] shapes.append( generate_color_shapes(high_probable_points, high_probable_points_probabilities)) # generate ground truth observation_moment = np.asarray([time_of_probability]) _, _, moving_objects = create_movement_path(falling_object, rotation_params, moving_params, observation_moment) points = moving_objects[0].get_points()[0] gt_object = PointsObject() gt_object.add_points(points, falling_object.get_points()[1]) shapes += [gt_object] visualization.visualize_object(shapes) get_histogram(high_probable_points, high_probable_points_probabilities)