def motion_model(particle_poses, speed_command, odom_pose, odom_pose_prev, dt): """Apply motion model and return updated array of particle_poses. Parameters ---------- particle_poses: an M x 3 array of particle_poses where M is the number of particles. Each pose is (x, y, theta) where x and y are in metres and theta is in radians. speed_command: a two element array of the current commanded speed vector, (v, omega), where v is the forward speed in m/s and omega is the angular speed in rad/s. odom_pose: the current local odometry pose (x, y, theta). odom_pose_prev: the previous local odometry pose (x, y, theta). dt is the time step (s). Returns ------- An M x 3 array of updated particle_poses. """ M = particle_poses.shape[0] # Robot Trajectory between poses. trajectory = arctan2((odom_pose[1] - odom_pose_prev[1]), (odom_pose[0] - odom_pose_prev[0])) # Pose variables. d = sqrt(((odom_pose[1] - odom_pose_prev[1]) ** 2) + ((odom_pose[0] - odom_pose_prev[0]) ** 2)) phi_1_local = angle_difference(odom_pose_prev[2], trajectory) phi_2_local = angle_difference(odom_pose[2], trajectory) # Calculate difference between poses. difference_x = d * cos(odom_pose[2] + phi_1_local) # First column is x. difference_y = d * sin(odom_pose[2] + phi_1_local) # Second column is y. difference_theta = wraptopi(phi_1_local + phi_2_local) # Third colum is theta. # Assign gaussian noise values. mu_x = 0 sigma_x = 0.01 mu_y = 0 sigma_y = 0.008 mu_theta = 0 sigma_theta = 0.0007 # Update particle poses. for m in range(M): particle_poses[m, 0] += difference_x + mu_x + randn() * sigma_x # First column is x. particle_poses[m, 1] += difference_y + mu_y + randn() * sigma_y # Second column is y. particle_poses[m, 2] = wraptopi((particle_poses[m, 2] + difference_theta) + mu_theta + randn() * sigma_theta) # Third colum is theta. return particle_poses
def predict_angles(pdb_path, resolution, A): """ Predict the tilt angle at which each reflection will be observed. Here a positive tilt angle corresponds to images with a +y coordinate. Reflections that lie in the missing wedge are excluded. Inputs: ------- pdb_path: path to reference PDB file resolution: high-resolution limit of structure factors A: crystal setting matrix Outputs: -------- hkl_t: dict with keys as Millers and values as tilt angles """ # predict coordinates of all reflections in reciprocal pixels sg_symbol, sg_no, cell, cs = cctbx_utils.unit_cell_info(pdb_path) hkl = np.array( miller.build_set(crystal_symmetry=cs, anomalous_flag=True, d_min=resolution).expand_to_p1().indices()) qvecs = np.inner(A, np.squeeze(hkl)).T # predict tilt angle from associated coordinates t = np.rad2deg(np.arctan2(qvecs[:, 1], qvecs[:, 0])) t[(t > 90) & (t <= 180)] = utils.wraptopi(t[(t > 90) & (t < 180)] + 180.0) # shift q2 to q4 t[(t >= -180) & (t <= -90)] += 180.0 # shift q3 to q1 # generate a dict with keys as Millers and values as tilt angles hkl_t = OrderedDict((tuple(key), val) for key, val in zip(hkl, t)) return hkl_t
def add_phase_errors(hklp, sigma, friedels_same=True): """ Add phase errors drawn from a normal distribution, N(mu, sigma), to the data in hklp. Parameters and data should be in degrees. Note that mu is set to 0, as this corresponds to a global phase shift. Inputs: ------- hklp: dict whose keys are Millers and values are phases in degrees sigma: standard deviation of error normal distribution friedels_same: force phase relationship between Friedel mates, boolean Outputs: -------- hklp_error: dict whose keys are Millers and values are phases with errors """ # draw errors from a normal distribution and add to phases errors = sigma * np.random.randn(len(hklp.keys())) p_errors = utils.wraptopi(np.array(hklp.values()) + errors) hklp_error = OrderedDict( (key, val) for key, val in zip(hklp.keys(), p_errors)) # force phase relationship between Friedel mates if friedels_same is True: for key in hklp_error: fkey = (-1 * key[0], -1 * key[1], -1 * key[2]) if fkey in hklp_error.keys(): hklp_error[fkey] = -1 * hklp_error[key] return hklp_error
def initial_errors(hklp, refp, shifts): """ Compute the starting phase errors to reference; output metrics are sigma of the normal distribution and mean of phase errors to reference. Inputs: ------- hklp: dict with Millers as keys and values as phases refp: dict with Millers as keys and values as reference phases shifts: fractional shifts that reposition hklp on reference phase origin Outputs: -------- sigma: variance of normal distribution fit to phase residuals to reference m_error: mean of phase residuals to reference """ # shift phases back to reference origin hkl, p = np.array(hklp.keys()), np.array(hklp.values()) p_unshift = utils.wraptopi(p + 360 * np.dot(hkl, shifts).ravel()) # remove Friedel mates from the shifted phases and compute error metrics hkl_sel = utils.remove_Friedels(hklp.keys()) hklp_unshift = OrderedDict((key, val) for key, val in zip(hklp.keys(), p_unshift) if key in hkl_sel) sigma, m_error = utils.residual_phase_distribution(refp, hklp_unshift) return sigma, m_error
def transition(self, v, omega, dt=0.1): from numpy import sin, cos hp = self.heading if omega == 0.0: self.x += v * cos(hp) * dt self.y += v * sin(hp) * dt else: self.x += -v / omega * sin(hp) + v / omega * sin(hp + omega * dt) self.y += v / omega * cos(hp) - v / omega * cos(hp + omega * dt) self.heading = wraptopi(hp + omega * dt)
def test_wraptopi(): """ Confirming that utils.wraptopi yields same results as Matlab's wrapTo180 function, except that former wraps to domain [-180,180) rather than [-180,180] as in Matlab. See: https://www.mathworks.com/help/map/ref/wrapto180.html for more details. """ x = np.array([-400, -190, -180, -175, 175, 180, 190, 380]).astype(float) xw = np.array([-40, 170, -180, -175, 175, 180, -170, 20]).astype(float) xw[xw == 180] = -180 yw = utils.wraptopi(x) np.testing.assert_array_almost_equal(yw, xw) return
def motion_model(poses, command_prev, odom_pose, odom_pose_prev): """Apply motion model and return updated array of poses. Parameters ---------- poses: an M x 3 array of robot poses where M is the number of particles. Each pose is (x, y, theta) where x and y are in metres and theta is in radians. command: a two element array of the current commanded speed vector, (v, omega), where v is the forward speed in m/s and omega is the angular speed in rad/s. odom_pose: the current local odometry pose (x, y, theta). odom_pose_prev: the previous local odometry pose (x, y, theta). Returns ------- An M x 3 array of updated poses. """ M = poses.shape[0] # For each particle calculate its predicted pose plus some # additive error to represent the process noise. # Odometry model pose change parameterisation phi_1 = np.arctan2((odom_pose[1] - odom_pose_prev[1]), (odom_pose[0] - odom_pose_prev[0])) - odom_pose_prev[2], phi_2 = wraptopi(odom_pose[2] - odom_pose_prev[2] - phi_1) distance = np.sqrt((odom_pose[1] - odom_pose_prev[1])**2 + (odom_pose[0] - odom_pose_prev[0])**2) # Adding randomly sampled noise as s mu, sigma = 0, 0.02 # mean and standard deviation s = np.random.normal(mu, sigma, len(poses)) # Updating poses poses[:, 0] = poses[:, 0] + distance * np.cos(poses[:, 2] + phi_1) + s #odom_pose_prev[2] poses[:, 1] = poses[:, 1] + distance * np.sin(poses[:, 2] + phi_1) + s #odom_pose_prev[2] poses[:, 2] = poses[:, 2] + phi_1 + phi_2 + s return poses
def test_average_phases(): """ Confirming that mean of circular data is correctly computed by utils.average_phases. Comparison is to scipy.stats.circmean function, but this doesn't have the option of computing the weighted mean. """ p_vals = (np.random.rand(10) - 0.5) * 360.0 p_ref = scipy.stats.circmean(np.deg2rad(p_vals), low=-1 * np.pi, high=np.pi) p_ref = utils.wraptopi(np.array(np.rad2deg(p_ref))) p_est = utils.average_phases(p_vals) np.testing.assert_allclose(p_ref, p_est) return
def test_std_phases(): """ Compare utils.std_phases to the output of scipy.stats.circmean. Note that the results diverge when the phases are randomly distributed over the unit circle, but match well when the range is not too large. Here it's checked that for phases randomly drawn from a domain of 45 degrees, the two functions match to within 1%. """ p_domain = 45.0 p_vals = (np.random.rand(10) - 0.5) * p_domain + np.random.randint( -180, 180) p_vals = utils.wraptopi(p_vals) p_ref = np.rad2deg(scipy.stats.circstd(np.deg2rad(p_vals))) p_est = utils.std_phases(p_vals) assert np.abs(p_est - p_ref) / p_ref * 100.0 < 1.0 return
def test_reduce_crystals(): import ProcessCrystals as proc # set up paths and default values pdb_path, res = "./reference/pdb_files/4bfh.pdb", 3.0 sg_symbol, sg_no, cell, cs = cctbx_utils.unit_cell_info(pdb_path) refI, refp = cctbx_utils.reference_sf(pdb_path, res, expand_to_p1=True, table='electron') refI_mod = OrderedDict((key, np.array([val])) for key, val in refI.items()) refp_mod = OrderedDict((key, np.array([val])) for key, val in refp.items()) # check that reduced phases are internally consistent rc = proc.ReduceCrystals(refI_mod, refp_mod, cell, sg_symbol) p_asu = rc.reduce_phases(weighted=True) rc.reduce_intensities() eq = np.array([np.allclose(v, v[0]) for v in p_asu.values()]) assert all([ np.allclose(np.around(p_asu.values()[i]) % 180, 0) for i in np.where(eq == False)[0] ]) # check that reduced phases and intensities match reference hkl_asu = list( cs.build_miller_set(anomalous_flag=False, d_min=res).indices()) assert np.allclose( utils.wraptopi( np.array([rc.data['PHIB'][hkl] - refp[hkl] for hkl in hkl_asu])), 0) assert np.allclose( np.array([rc.data['IMEAN'][hkl] - refI[hkl] for hkl in hkl_asu]), 0) # check that shifting phases from origin leads to loss of symmetry-expected relationships rc.shift_phases(np.random.random(3)) p_asu = rc.reduce_phases(weighted=True) eq = np.array([np.allclose(v, v[0]) for v in p_asu.values()]) assert not all([ np.allclose(np.around(p_asu.values()[i]) % 180, 0) for i in np.where(eq == False)[0] ]) return
def predict_angles(specs_path, pdb_path, resolution, A): """ Predict the tilt angle at which each reflection will be observed. Here a positive tilt angle corresponds to images with a +y coordinate. Reflections that lie in the missing wedge are excluded. Inputs: ------- specs_path: dict specifying details of data collection strategy pdb_path: path to reference PDB file resolution: high-resolution limit of structure factors A: crystal setting matrix Outputs: -------- hkl_t: dict with keys as Millers and values as tilt angles """ # load information about collection strategy specs = pickle.load(open(specs_path)) # predict coordinates of all reflections in reciprocal pixels sg_symbol, sg_no, cell, cs = cctbx_utils.unit_cell_info(pdb_path) hkl = np.array( miller.build_set(crystal_symmetry=cs, anomalous_flag=True, d_min=resolution).expand_to_p1().indices()) qvecs = np.inner(A, np.squeeze(hkl)).T xyz = qvecs * 1.0 / specs['mag'] * specs['px_size'] # predict tilt angle from associated coordinates t = np.rad2deg(np.arctan2(xyz[:, 1], xyz[:, 0])) t[(t > 90) & (t <= 180)] = utils.wraptopi(t[(t > 90) & (t < 180)] + 180.0) # shift q2 to q4 t[(t >= -180) & (t <= -90)] += 180.0 # shift q3 to q1 # retain reflections not in missing wedge region max_angle = specs['angle'] + 0.5 * specs['increment'] valid_idx = np.where((t > -1 * max_angle) & (t < max_angle))[0] t, hkl = t[valid_idx], hkl[valid_idx] print "Retained %i reflection outside missing wedge" % (len(hkl)) # generate a dict with keys as Millers and values as tilt angles hkl_t = OrderedDict((tuple(key), val) for key, val in zip(hkl, t)) return hkl_t
def add_random_phase_shift(hklp): """ Introduce a random phase shift, at most one unit cell length along each axis. Inputs: ------- hklp: dict whose keys are Millers and values are phases in degrees Outputs: -------- hklp_shifted: dict whose keys are Millers and values are shifted phases fshifts: fractional shifts by which origin was translated """ fshifts = np.array([random.random() for i in range(3)]) hkl, p = np.array(hklp.keys()), np.array(hklp.values()) p_shifted = utils.wraptopi(p - 360 * np.dot(hkl, fshifts).ravel()) hklp_shifted = OrderedDict( (tuple(key), val) for key, val in zip(hkl, p_shifted)) return hklp_shifted, fshifts
def __init__(self, x=0, y=0, heading=pi / 2): self.x = x self.y = y self.heading = wraptopi(heading)