def test_euler(): q = Quaternion.from_euler(0, 0, 0) assert q == Quaternion(1, 0, 0, 0), f"{q} != Quaternion(1,0,0,0)" assert q.magnitude == 1.0, f"{q.magnitude} magnitude != 1.0" delta = 10 for i in range(-179, 180, delta): for j in range(-89, 90, delta): for k in range(-179, 180, delta): q = Quaternion.from_euler(i, j, k, degrees=True) e = q.to_euler(degrees=True) for a, b in zip((i, j, k,), e): diff = abs(a - b) assert (diff < 0.001), "{}: {} {} {}".format(diff, i, j, k)
def move_camera(self, t): """ This function moves the camera in a circle around the table. After 1000 steps the camera moves lower (to get a different angle and maybe more nice data) TODO: Maybe other camera movement? :param t: step :param camera: camera frame :param angle: angle in which camera need to move :param radius: around which camera is moving :return: updated angle """ cam_px, cam_py, cam_pz = self.camera.getPosition() cam_q1, cam_q2, cam_q3, cam_q4 = self.camera.getQuaternion() q = Quaternion(cam_q1, cam_q2, cam_q3, cam_q4) e1, e2, e3 = q.to_euler(degrees=True) cam_px_new = cam_px + np.cos(self.angle) * self.radius cam_py_new = cam_py + np.sin(self.angle) * self.radius if t % 1000 == 0: cam_pz = cam_pz - 0.05 e1 = e1 + 2.5 quat_new = Quaternion.from_euler(e1, e2, e3 + 5, degrees=True) self.camera.setPosition([cam_px_new, cam_py_new, cam_pz]) self.camera.setQuaternion([quat_new[0], quat_new[1], quat_new[2], quat_new[3]]) self.angle += 0.0872665 if t == 12000: self.camera.setPosition([0.6, -.75, 1.85]) self.camera.setQuaternion(self.cam_quat) self.angle = 0 self.radius = 0.075
def set_goal_orientation(self, n0, n1): # Sets the goal orientation to be the vector between the prior and current nodes # Currently using this as a proxy to get the robot to stay relatively in line with the path p0 = self.select_point_in_room(new_room=n0) p1 = self.select_point_in_room(new_room=n1) del_x = p1[0] - p0[0] del_y = p1[1] - p0[1] new_yaw = arctan2(del_y, del_x) self.goal_orientation = Quaternion.from_euler(0, 0, new_yaw) rospy.loginfo('New orientation: {}'.format(self.goal_orientation))
def compensate(self, accel, mag): """ """ try: mx, my, mz = normalize3(*mag) ax, ay, az = normalize3(*accel) pitch = asin(-ax) if abs(pitch) >= pi / 2: roll = 0.0 else: roll = asin(ay / cos(pitch)) # mx, my, mz = mag x = mx * cos(pitch) + mz * sin(pitch) y = mx * sin(roll) * sin(pitch) + my * cos(roll) - mz * sin( roll) * cos(pitch) heading = atan2(y, x) # wrap heading between 0 and 360 degrees if heading > 2 * pi: heading -= 2 * pi elif heading < 0: heading += 2 * pi if self.angle_units == Angle.degrees: roll *= rad2deg pitch *= rad2deg heading *= rad2deg elif self.angle_units == Angle.quaternion: return Quaternion.from_euler(roll, pitch, heading) return ( roll, pitch, heading, ) except ZeroDivisionError as e: print('Error', e) if self.angle_units == Angle.quaternion: return Quaternion(1, 0, 0, 0) else: return ( 0.0, 0.0, 0.0, )
def update_transform(self): """ updates the transform from map frame to encoder_baselink frame according to the current estimation """ self.transform_msg.header.stamp = self.latest_timestamp self.transform_msg.transform.translation.x = self.x self.transform_msg.transform.translation.y = self.y q = Quaternion.from_euler(0.0, 0.0, self.theta) #inputs: roll,pitch,yaw self.transform_msg.transform.rotation.x = q[1] self.transform_msg.transform.rotation.y = q[2] self.transform_msg.transform.rotation.z = q[3] self.transform_msg.transform.rotation.w = q[0]
def static_start(self): state_msg = ModelState() state_msg.model_name = 'robot' quaternion = Quaternion.from_euler(0, 0, 0) state_msg.pose.position.x = -9.0 state_msg.pose.position.y = -9.0 state_msg.pose.orientation.z = quaternion[3] state_msg.pose.orientation.w = quaternion[0] rospy.wait_for_service('/gazebo/set_model_state') try: set_state = rospy.ServiceProxy('/gazebo/set_model_state', SetModelState) resp = set_state(state_msg) except (rospy.ServiceException) as e: print("Service call failed: %s" % e)
def set_goal_orientation(self, orientation): if orientation == 'up': del_y = 1 del_x = 0 elif orientation == "down": del_y = -1 del_x = 0 elif orientation == "left": del_y = 0 del_x = -1 elif orientation == "right": del_y = 0 del_x = 1 else: raise ValueError("you're dumb.") new_yaw = arctan2(del_y, del_x) self.goal_orientation = Quaternion.from_euler(0, 0, new_yaw) rospy.loginfo('New orientation: {}'.format(self.goal_orientation))
def f(frame_ids): # headless - no window # verbose - output number of frames rendered, etc.. visii.initialize(headless=True, verbose=False) # Use a neural network to denoise ray traced visii.enable_denoiser() # set up dome background negatives = list(glob.glob("negatives/*.jpg")) visii.set_dome_light_intensity(1) # create an entity that will serve as our camera. camera = visii.entity.create(name="camera") # To place the camera into our scene, we'll add a "transform" component. # (All visii objects have a "name" that can be used for easy lookup later.) camera.set_transform(visii.transform.create(name="camera_transform")) # To make our camera entity act like a "camera", we'll add a camera component camera.set_camera( visii.camera.create_from_fov( name="camera_camera", field_of_view=1.4, # note, this is in radians aspect=opt.width / float(opt.height))) # Finally, we'll select this entity to be the current camera entity. # (visii can only use one camera at the time) visii.set_camera_entity(camera) # lets store the camera look at information so we can export it camera_struct_look_at = { 'at': [0, 0, 0], 'up': [0, 0, 1], 'eye': [-1, 0, 0] } # Lets set the camera to look at an object. # We'll do this by editing the transform component. camera.get_transform().look_at(at=camera_struct_look_at['at'], up=camera_struct_look_at['up'], eye=camera_struct_look_at['eye']) # This function loads a mesh ignoring .mtl mesh = visii.mesh.create_from_file(opt.entity, opt.model) # creates visii entity using loaded mesh obj_entity = visii.entity.create( name=opt.entity + "_entity", mesh=mesh, transform=visii.transform.create(opt.entity + "_entity"), material=visii.material.create(opt.entity + "_entity"), ) # obj_entity.get_light().set_intensity(0.05) # you can also set the light color manually # obj_entity.get_light().set_color((1,0,0)) # Add texture to the material material = visii.material.get(opt.entity + "_entity") texture = visii.texture.create_from_file(opt.entity, "./models/Cutie.PNG") material.set_base_color_texture(texture) # Lets add the cuboid to the object we want to export add_cuboid(opt.entity + "_entity", opt.debug) # lets keep track of the entities we want to export entities_to_export = [opt.entity + "_entity"] # Loop where we change and render each frame for i in tqdm(frame_ids): # load a random negtive onto the dome negative = cv2.imread(random.choice(negatives)) # Skip dark backgrounds (20/255) if np.mean(negative) < 20: continue # Fix lighting of background and make it small within the FOV background = make_background(negative) cv2.imwrite("test" + str(i) + ".png", background) dome = visii.texture.create_from_file("dome", "test" + str(i) + ".png") visii.set_dome_light_texture(dome) visii.set_dome_light_rotation( visii.angleAxis(visii.pi() * .5, visii.vec3(0, 0, 1))) stretch_factor = 2 scale = [ random.uniform(1, stretch_factor), # width random.uniform(1, stretch_factor), # length random.uniform(1, stretch_factor) # height ] obj_entity.get_transform().set_scale(scale) # create random rotation while making usre the entity is facing forward in each frame rot = [ random.uniform(-10, 10), # Roll random.uniform(-15, 15), # Pitch random.uniform(-45, 45) # Yaw ] q = Quaternion.from_euler(rot[0], rot[1], rot[2], degrees=True) position = [ random.uniform(0, 4), # X Depth random.uniform(-1, 1), # Y random.uniform(-1, 1) # Z ] # Scale the position based on depth into image to make sure it remains in frame position[1] *= position[0] position[2] *= position[0] obj_entity.get_transform().set_position(tuple(position)) obj_entity.get_transform().set_rotation((q.x, q.y, q.z, q.w)) # use random to make 95 % probability the frame data goes into training and # 5% chance it goes in test folder folder = '' if random.randint(0, 100) < opt.test_percent: folder = opt.entity + '_test/' else: folder = opt.entity + '_training/' # Render the scene visii.render_to_file(width=opt.width, height=opt.height, samples_per_pixel=opt.spp, file_path=opt.out + folder + opt.entity + str(i) + '.png') # set up JSON export_to_ndds_file(filename=opt.out + folder + opt.entity + str(i) + '.json', obj_names=entities_to_export, width=opt.width, height=opt.height, camera_struct=camera_struct_look_at) # remove current negative from the dome visii.clear_dome_light_texture() visii.texture.remove("dome") os.remove("test" + str(i) + ".png") visii.deinitialize()
def test_e2q(): q = Quaternion.from_euler(0, 0, 0) assert q == Quaternion(1, 0, 0, 0), f"{q} != Quaternion(1,0,0,0)" assert q.magnitude == 1.0, f"{q.magnitude} magnitude != 1.0"