def test_atan2_f64(): grad_test(lambda x: ti.atan2(0.4, x), lambda x: np.arctan2(0.4, x), default_fp=ti.f64) grad_test(lambda y: ti.atan2(y, 0.4), lambda y: np.arctan2(y, 0.4), default_fp=ti.f64)
def get_angles(sin_theta, cos_theta): theta = np.arctan2(sin_theta, cos_theta) if theta > 0: alpha = (-np.pi + theta) else: alpha = (np.pi + theta) return alpha, theta
def m_step(self, expectations, datas, inputs, masks, tags, **kwargs): x = np.concatenate(datas) weights = np.concatenate([Ez for Ez, _, _ in expectations]) # T x D assert x.shape[0] == weights.shape[0] # convert angles to 2D representation and employ closed form solutions x_k = np.stack((np.sin(x), np.cos(x)), axis=1) # T x 2 x D r_k = np.tensordot(weights.T, x_k, axes=1) # K x 2 x D r_norm = np.sqrt(np.sum(np.power(r_k, 2), axis=1)) # K x D mus_k = np.divide(r_k, r_norm[:, None]) # K x 2 x D r_bar = np.divide(r_norm, np.sum(weights, 0)[:, None]) # K x D mask = (r_norm.sum(1) == 0) mus_k[mask] = 0 r_bar[mask] = 0 # Approximation kappa0 = r_bar * (self.D + 1 - np.power(r_bar, 2)) / ( 1 - np.power(r_bar, 2)) # K,D kappa0[kappa0 == 0] += 1e-6 for k in range(self.K): self.mus[k] = np.arctan2(*mus_k[k]) # self.log_kappas[k] = np.log(kappa0[k]) # K, D
def generate_candidate_edge(cinfo_a, cinfo_b): # we want a left of b (so a's successor will be b and b's # predecessor will be a) make sure right endpoint of b is to the # right of left endpoint of a. if cinfo_a.point0[0] > cinfo_b.point1[0]: tmp = cinfo_a cinfo_a = cinfo_b cinfo_b = tmp x_overlap_a = cinfo_a.local_overlap(cinfo_b) x_overlap_b = cinfo_b.local_overlap(cinfo_a) overall_tangent = cinfo_b.center - cinfo_a.center overall_angle = np.arctan2(overall_tangent[1], overall_tangent[0]) delta_angle = max(angle_dist(cinfo_a.angle, overall_angle), angle_dist(cinfo_b.angle, overall_angle)) * 180/np.pi # we want the largest overlap in x to be small x_overlap = max(x_overlap_a, x_overlap_b) dist = np.linalg.norm(cinfo_b.point0 - cinfo_a.point1) if (dist > EDGE_MAX_LENGTH or x_overlap > EDGE_MAX_OVERLAP or delta_angle > EDGE_MAX_ANGLE): return None else: score = dist + delta_angle*EDGE_ANGLE_COST return (score, cinfo_a, cinfo_b)
def avg_angle(th1, th2): x = np.cos(th1) + np.cos(th2) y = np.sin(th1) + np.sin(th2) if (x == 0 and y == 0): return 0 else: return np.arctan2(y, x)
def vInfRA(self, r, v, mu): """ right ascension of v infinity """ s = self.sVector(r, v, mu) RA = np.arctan2(s[1], s[0]) return RA
def m_step(self, expectations, datas, inputs, masks, tags, **kwargs): from autograd.scipy.special import i0, i1 x = np.concatenate(datas) weights = np.concatenate([Ez for Ez, _, _ in expectations]) # convert angles to 2D representation and employ closed form solutions x_k = np.stack((np.sin(x), np.cos(x)), axis=1) r_k = np.tensordot(weights.T, x_k, (-1, 0)) r_norm = np.sqrt(np.sum(r_k**2, 1)) mus_k = r_k / r_norm[:, None] r_bar = r_norm / weights.sum(0)[:, None] # truncated newton approximation with 2 iterations kappa_0 = r_bar * (2 - r_bar**2) / (1 - r_bar**2) kappa_1 = kappa_0 - ((i1(kappa_0)/i0(kappa_0)) - r_bar) / \ (1 - (i1(kappa_0)/i0(kappa_0)) ** 2 - (i1(kappa_0)/i0(kappa_0)) / kappa_0) kappa_2 = kappa_1 - ((i1(kappa_1)/i0(kappa_1)) - r_bar) / \ (1 - (i1(kappa_1)/i0(kappa_1)) ** 2 - (i1(kappa_1)/i0(kappa_1)) / kappa_1) for k in range(self.K): self.mus[k] = np.arctan2(*mus_k[k]) self.log_kappas[k] = np.log(kappa_2[k])
def ur5fk(thetas): thetas_0 = anp.array([0, pi / 2, 0, pi / 2, pi]) thetas = thetas + thetas_0 #thetas = thetas._value d0 = 0.3 d1 = 8.92e-2 d2 = 0.11 d5 = 9.475e-2 #d6 = 7.495e-2 d6 = 1.1495e-1 a2 = 4.251e-1 a3 = 3.9215e-1 #All = np.zeros((6, 4, 4)) #All[:, 3, 3] = 1 A1 = anp.array([[anp.cos(thetas[0]), -anp.sin(thetas[0]), 0, 0], [anp.sin(thetas[0]), anp.cos(thetas[0]), 0, 0], [0, 0, 1, d1], [0, 0, 0, 1]]) A2 = anp.array([[anp.cos(thetas[1]), -anp.sin(thetas[1]), 0, 0], [0, 0, -1, -d2], [anp.sin(thetas[1]), anp.cos(thetas[1]), 0, 0], [0, 0, 0, 1]]) A3 = anp.array([[anp.cos(thetas[2]), -anp.sin(thetas[2]), 0, a2], [anp.sin(thetas[2]), anp.cos(thetas[2]), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]) A4 = anp.array([[anp.cos(thetas[3]), -anp.sin(thetas[3]), 0, a3], [anp.sin(thetas[3]), anp.cos(thetas[3]), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]) A5 = anp.array([[anp.cos(thetas[4]), -anp.sin(thetas[4]), 0, 0], [0, 0, -1, -d5], [anp.sin(thetas[4]), anp.cos(thetas[4]), 0, 0], [0, 0, 0, 1]]) A6 = anp.array([[1, 0, 0, 0], [0, 1, 0, -d6], [0, 0, 1, 0], [0, 0, 0, 1]]) A0 = anp.zeros((4, 4)) A0[0, 1] = 1 A0[1, 0] = -1 A0[2, 2] = 1 A0[2, 3] = d0 A0[3, 3] = 1 #A0[2, 3] = 0 A = A0 @ A1 @ A2 @ A3 @ A4 @ A5 @ A6 eular = anp.array([ anp.arctan2(A[2, 1], A[2, 2]), anp.arctan2(-A[2, 0], anp.sqrt(A[2, 1]**2 + A[2, 2]**2)), anp.arctan2(A[1, 0], A[0, 0]) ]) return anp.concatenate([A[:3, 3], eular])
def bTheta(self, r, v, mu): """ b plane theta (clock angle) """ BR = self.bDotR(r, v, mu) BT = self.bDotT(r, v, mu) theta = np.arctan2(BR, BT) return theta
def TE_angle(self): # Returns the trailing edge angle of the airfoil, in degrees upper_TE_vec = self.coordinates[0, :] - self.coordinates[1, :] lower_TE_vec = self.coordinates[-1, :] - self.coordinates[-2, :] return np.degrees(np.arctan2( upper_TE_vec[0] * lower_TE_vec[1] - upper_TE_vec[1] * lower_TE_vec[0], upper_TE_vec[0] * lower_TE_vec[0] + upper_TE_vec[1] * upper_TE_vec[1] ))
def _g(self, z, i): xydiff = self.beacons[i] - z[:2] if np.linalg.norm(xydiff) <= self.max_radar_range: range_ = np.linalg.norm(xydiff) bearing = modpi2pi(np.arctan2(xydiff[1], xydiff[0]) - z[2]) self.logger.info( f"DiffDrive {self.name} observations: range {range_} / bearing {bearing}" ) return np.array([range_, bearing]) return None
def angle_between_2_vectors(self, x, y): """Calculate angle between 2 3D vectors in rad""" c = np.dot(np.transpose(x), y) # cosine is the dot product xcy = np.dot(self.crossmat(x), y) # sine is the norm of the cross product s = self.column_vector_norm2( xcy) # sine is the norm of the cross product if (s == 0. and c == 0.): angle = 0. # degenerate case else: angle = np.arctan2(s, c[0, 0]) return angle
def evaluate(variable_values, parameters): ax = variable_values[parameters["ax"]] bx = variable_values[parameters["bx"]] dx = bx - ax ay = variable_values[parameters["ay"]] by = variable_values[parameters["by"]] dy = by - ay angle = numpy.arctan2(dy, dx) error1 = angle - parameters["angle"] # Normalize the angular differnce using # (a + 180°) % 360° - 180° # https://stackoverflow.com/questions/1878907/the-smallest-difference-between-2-angles return numpy.remainder(error1 + math.pi, 2 * math.pi) - math.pi
def obstaclePathCallback(self, msg): ''' Stores the obstacle avoidance path generated by the path planner for determining where to update the final point of the path in order to accomodate the robots pose at the end of the obstacle path and the start of the maneuver's first segment. Args: ----- msg: nav_msgs/Path object Returns: -------- None ''' if (self.update_obstacle_end_pose): # Calculate the pose from the last two points in the path x_1 = msg.poses[-2].pose.position.x y_1 = msg.poses[-2].pose.position.y x_2 = msg.poses[-1].pose.position.x y_2 = msg.poses[-1].pose.position.y theta_obstacle_path = np.arctan2((y_2 - y_1), (x_2 - x_1)) # Find the heading error for the first maneuver segment theta_error = self.pose_s.theta - theta_obstacle_path direction = np.sign(theta_error) # Place end point at appropriate position along the minimum turning radius based on heading error center_point = np.array([self.pose_s.x, self.pose_s.y]) R_s = rotZ2D(self.pose_s.theta) new_target = center_point + R_s.dot( np.array([ self.min_radius * np.cos(-direction * (np.pi / 2) - theta_error), self.min_radius * np.sin(-direction * (np.pi / 2) - theta_error) ])) # Update target position for obstacle avoidance path rospy.set_param("/control_panel_node/goal_x", float(new_target[0])) rospy.set_param("/control_panel_node/goal_y", float(new_target[1])) self.update_obstacle_end_pose = False
def __init__(self, contour, rect, mask): self.contour = contour self.rect = rect self.mask = mask self.center, self.tangent = blob_mean_and_tangent(contour) self.angle = np.arctan2(self.tangent[1], self.tangent[0]) clx = [self.proj_x(point) for point in contour] lxmin = min(clx) lxmax = max(clx) self.local_xrng = (lxmin, lxmax) self.point0 = self.center + self.tangent * lxmin self.point1 = self.center + self.tangent * lxmax self.pred = None self.succ = None
def inverse_dynamics(self, x, u): u = np.clip(u, -self._umax, self._umax) g, m, l = 9.80665, 1., 1. # transfer to th/thd space sth, cth, dth = x _x = np.hstack((np.arctan2(sth, cth), dth)) def f(x, u): th, dth = x return np.hstack((dth, -3. * g / (2. * l) * np.sin(th + np.pi) + 3. / (m * l ** 2) * (u - self._k * dth))) k1 = f(_x, u) k2 = f(_x - 0.5 * self.dt * k1, u) k3 = f(_x - 0.5 * self.dt * k2, u) k4 = f(_x - self.dt * k3, u) _xn = _x - self.dt / 6. * (k1 + 2. * k2 + 2. * k3 + k4) xn = np.array([np.cos(_xn[0]), np.sin(_xn[0]), _xn[1]]) xn = np.clip(xn, -self._xmax, self._xmax) return xn
def d_z_d_t_numpy_batchs(x, y, z, t, params, ode_params): a_p, a_q, a_r, a_s, a_t = params[0], params[3], params[6], params[ 9], params[12] b_p, b_q, b_r, b_s, b_t = params[1], params[4], params[7], params[ 10], params[13] theta_p, theta_q, theta_r, theta_s, theta_t = params[2], params[5], params[ 8], params[11], params[14] theta = np.arctan2(y, x) delta_theta_p = np.fmod(theta - theta_p, 2 * math.pi) delta_theta_q = np.fmod(theta - theta_q, 2 * math.pi) delta_theta_r = np.fmod(theta - theta_r, 2 * math.pi) delta_theta_s = np.fmod(theta - theta_s, 2 * math.pi) delta_theta_t = np.fmod(theta - theta_t, 2 * math.pi) z_p = a_p * delta_theta_p * \ np.exp((- delta_theta_p * delta_theta_p / (2 * b_p * b_p))) z_q = a_q * delta_theta_q * \ np.exp((- delta_theta_q * delta_theta_q / (2 * b_q * b_q))) z_r = a_r * delta_theta_r * \ np.exp((- delta_theta_r * delta_theta_r / (2 * b_r * b_r))) z_s = a_s * delta_theta_s * \ np.exp((- delta_theta_s * delta_theta_s / (2 * b_s * b_s))) z_t = a_t * delta_theta_t * \ np.exp((- delta_theta_t * delta_theta_t / (2 * b_t * b_t))) z_0_t = (ode_params.A * np.sin(2 * math.pi * ode_params.f2 * t)) f_z = -1 * (z_p + z_q + z_r + z_s + z_t) - (z - z_0_t) return f_z
def maneuverIneqConstraints(self, x, params): ''' Optimization inequality constraint function. scipy.optimize.minimize defines the constraints as c_j >= 0. subject to: 1) -obstaclesInManeuver() >= 0 2) abs(r_1) - min_radius >= 0 Args: ----- x: [x_s, y_s] x_s: forklift starting 'x' position of maneuver y_s: forklist starting 'y' position of maneuver params: {"obstacles" : obstacles, "min_radius" : min_radius} "obstacles" : obstacles: mx2 numpy array of obstacle locations (x,y) "min_radius" : min_radius: the minimum allowable turning radius Returns: -------- C: number of obstacles in the bounding box ''' # Unpack variables and parameters x_s = x[0] y_s = x[1] # Point the pose towards the center of the roll theta_s = np.arctan2(self.target_y - y_s, self.target_x - x_s) # Convert states to poses pose_s = Pose2D(x_s, y_s, theta_s) # Frist constraint C = -self.obstaclesInManeuver(pose_s, params["obstacles"]) return C
octave, layer, scale = unpackSIFTOctave(keypoints[0][j].octave) theta = keypoints[0][j].angle dx_, dy_ = np.cos(np.deg2rad(theta)), -np.sin(np.deg2rad(theta)) x_0, y_0, _ = tuple( h_apply(H[0, i], (x0 + s * dx_, y0 + s * dy_, 1))) x_1, y_1, _ = tuple( h_apply(H[0, i], (x0 - s * dy_, y0 + s * dx_, 1))) x_2, y_2, _ = tuple( h_apply(H[0, i], (x0 + s * dy_, y0 - s * dx_, 1))) x_3, y_3, _ = tuple( h_apply(H[0, i], (x0 - s * dx_, y0 - s * dy_, 1))) s_new = np.mean([ np.linalg.norm((x_0 - x_3, y_0 - y_3)) / 2, np.linalg.norm(((x_1 - x_2, y_1 - y_2))) / 2 ]) angle_new = np.arctan2(-y_0 + y_3, x_0 - x_3) angle_new = np.rad2deg(angle_new + 2 * np.pi * (angle_new < 0)) myplot.append((s_new, s, octave, layer, scale, theta, angle_new)) # Angles a_orig = np.array([x[5] for x in myplot]) a_new = np.array([x[6] for x in myplot]) plt.scatter(a_orig, a_new) plt.title('Angle differences') plt.xlabel('Original') plt.ylabel('New') plt.show() sys.exit() # Plot sizes and octaves z = np.array([0 for x in myplot]) sorig = np.array([x[1] for x in myplot]) snew = np.array([x[0] for x in myplot])
def test_bfgs_linear_speed(self): for _ in range(10): h = 0.1 x0, y0 = np.random.uniform(-1, 1 - h, (2,)) x1, y1 = x0 + h, y0 + h x, y = x0 - h, y0 vx, vy = np.random.uniform(-0.05, 0.05, (2,)) s_gt = get_linear_speed_s(vx, vy) slow = sjs.get_linear_speed_field2(vx, vy) tau_Omega = get_linear_speed_tau(vx, vy) tau = lambda lam, mu: tau_Omega( (1 - lam)*x0 + lam*x1, (1 - mu)*y0 + mu*y1 ) grad_tau = autograd.grad(lambda args: tau(args[0], args[1])) hess_tau = autograd.hessian(lambda args: tau(args[0], args[1])) tau_x = lambda lam, mu: grad_tau(np.array([lam, mu]))[0] tau_y = lambda lam, mu: grad_tau(np.array([lam, mu]))[1] tau_xy = lambda lam, mu: hess_tau(np.array([lam, mu]))[1][0] data = np.array([ [ tau(0., 0.), tau(0., 1.), tau_y(0., 0.), tau_y(0., 1.)], [ tau(1., 0.), tau(1., 1.), tau_y(1., 0.), tau_y(1., 1.)], [tau_x(0., 0.), tau_x(0., 1.), tau_xy(0., 0.), tau_xy(0., 1.)], [tau_x(1., 0.), tau_x(1., 1.), tau_xy(1., 0.), tau_xy(1., 1.)], ]) bicubic = sjs.Bicubic(data) T = bicubic.get_f_on_edge(sjs.BicubicVariable.Lambda, 0) Tx = bicubic.get_fx_on_edge(sjs.BicubicVariable.Lambda, 0) Ty = bicubic.get_fy_on_edge(sjs.BicubicVariable.Lambda, 0) a_T = np.array([T.a[i] for i in range(4)]) a_Tx = np.array([Tx.a[i] for i in range(4)]) a_Ty = np.array([Ty.a[i] for i in range(4)]) p, p0, p1 = np.array([x, y]), np.array([x0, y0]), np.array([x0, y1]) context_gt = F4(s_gt, a_T, a_Tx, a_Ty, p, p0, p1) xy = sjs.Dvec2(*p) xy0 = sjs.Dvec2(*p0) xy1 = sjs.Dvec2(*p1) context = sjs.F4Context(T, Tx, Ty, xy, xy0, xy1, slow) context3 = sjs.F3Context(T, xy, xy0, xy1, slow) def F3(eta): context3.compute(eta) return context3.F3 def F3_eta(eta): context3.compute(eta) return context3.F3_eta if np.sign(F3_eta(0.)) == np.sign(F3_eta(1.)): argeta3 = 0. if F3(0.) < F3(1.) else 1. else: argeta3 = brentq(F3_eta, 0, 1) lp = (p - p0 - argeta3*(p1 - p0)) lp /= np.linalg.norm(lp) argth3 = np.arctan2(*reversed(lp)) xk_gt = np.array([argeta3, argth3]) eps = 1e-7 hess_gt = context_gt.hess_F4(xk_gt) hess_fd = context.hess_fd(argeta3, argth3, eps) self.assertTrue(abs(hess_gt[0, 0] - hess_fd[0, 0]) < eps) self.assertTrue(abs(hess_gt[1, 0] - hess_fd[1, 0]) < eps) self.assertTrue(abs(hess_gt[0, 1] - hess_fd[0, 1]) < eps) self.assertTrue(abs(hess_gt[1, 1] - hess_fd[1, 1]) < eps) gk_gt = context_gt.grad_F4(xk_gt) Hk_gt = np.linalg.inv(hess_gt) xk, gk, Hk = context.bfgs_init(*xk_gt) eps = 1e-6 self.assertTrue(abs(Hk_gt[0, 0] - Hk[0, 0]) < eps) self.assertTrue(abs(Hk_gt[1, 0] - Hk[1, 0]) < eps) self.assertTrue(abs(Hk_gt[0, 1] - Hk[0, 1]) < eps) self.assertTrue(abs(Hk_gt[1, 1] - Hk[1, 1]) < eps)
def rayIntersectsSegment(point_in, segment): ''' Determines whether a given point with a ray shooting in the positive X direction intersects a line segment specifiec by two points. Returns 0 if it does not intersect, 1 if it does intersect, 2 if it is on the segment. Args: ----- point_in: 2x1 numpy array containing an (x,y) position segment: 2x2 numpy array containing 2 (x,y) points that define a line segment Returns: -------- intersect: 0 if no intersection, 1 if intersects but not on the segment, 2 if on the segment Diagrams Bad ^ | --------------o | /T G | ? / | o | / | -> Bad o | / ? | d |B/ | -------o------- | v Bad point o o /T / / / \/ point angle / / \ // \ | o---------- B segment angle ''' # If the point is equal to one of the vertices, return 2 for "on the segment" point = copy.copy(point_in) # this keeps "point_in" from being altered if ((point[0] == segment[0, 0] and point[1] == segment[0, 1]) or (point[0] == segment[1, 0] and point[1] == segment[1, 1])): return 2 # If the point is equal with one of the points in the Y dimension, increase # its Y position by a small amount if (point[1] == segment[0, 1] or point[1] == segment[1, 1]): point[1] = point[1] + 0.000001 # Find the top and bottom points in the Y direction if (segment[0, 1] > segment[1, 1]): top = segment[0, :] bottom = segment[1, :] else: top = segment[1, :] bottom = segment[0, :] # Get min and max values x_max = max(segment[0, 0], segment[1, 0]) x_min = min(segment[0, 0], segment[1, 0]) y_max = top[1] y_min = bottom[1] # Check if point is in the "Bad" region if (point[0] > x_max or point[1] < y_min or point[1] > y_max): return 0 # Check if point is in the "Good" region elif (point[0] < x_min): return 1 # Check if point is in the "?" region else: # Calculate the angle of the segment and the angle of the point with the # segment's bottom point. If the point angle is larger than the segment # angle, then the ray crosses the segment. If they are equal, the point # is on the segment. If the point angle is smaller, the ray does not # cross. # # NOTE: use these equations for normal operation # segment_angle = np.arctan2(top[1] - bottom[1], top[0] - bottom[0]) # point_angle = np.arctan2(point[1] - bottom[1], point[0] - bottom[0]) # NOTE: use these equations for autograd automatic differentiation segment_angle = anp.arctan2(top[1] - bottom[1], top[0] - bottom[0]) point_angle = anp.arctan2(point[1] - bottom[1], point[0] - bottom[0]) # Check angle between point, bottom of segment, and positive X direction if (point_angle > segment_angle): return 1 elif (point_angle < segment_angle): return 0 else: return 2
def test_atan2(): grad_test(lambda x: ti.atan2(0.4, x), lambda x: np.arctan2(0.4, x)) grad_test(lambda y: ti.atan2(y, 0.4), lambda y: np.arctan2(y, 0.4))
def reconstruct_ptychography( # ______________________________________ # |Raw data and experimental parameters|________________________________ fname, probe_pos, probe_size, obj_size, theta_st=0, theta_end=PI, n_theta=None, theta_downsample=None, energy_ev=5000, psize_cm=1e-7, free_prop_cm=None, # ___________________________ # |Reconstruction parameters|___________________________________________ n_epochs='auto', crit_conv_rate=0.03, max_nepochs=200, alpha_d=None, alpha_b=None, gamma=1e-6, learning_rate=1.0, minibatch_size=None, multiscale_level=1, n_epoch_final_pass=None, initial_guess=None, n_batch_per_update=1, reweighted_l1=False, interpolation='bilinear', # ___________________________ # |Finite support constraint|___________________________________________ finite_support_mask_path=None, shrink_cycle=None, shrink_threshold=1e-9, # ___________________ # |Object contraints| object_type='normal', # _______________ # |Forward model|_______________________________________________________ forward_algorithm='fresnel', binning=1, fresnel_approx=False, pure_projection=False, two_d_mode=False, probe_type='gaussian', probe_initial=None, # _____ # |I/O|_________________________________________________________________ save_path='.', output_folder=None, save_intermediate=False, full_intermediate=False, use_checkpoint=True, save_stdout=False, # _____________ # |Performance|_________________________________________________________ cpu_only=False, core_parallelization=True, shared_file_object=True, n_dp_batch=20, # __________________________ # |Object optimizer options|____________________________________________ optimizer='adam', # _________________________ # |Other optimizer options|_____________________________________________ probe_learning_rate=1e-3, optimize_probe_defocusing=False, probe_defocusing_learning_rate=1e-5, optimize_probe_pos_offset=False, # ________________ # |Other settings|______________________________________________________ dynamic_rate=True, pupil_function=None, probe_circ_mask=0.9, dynamic_dropping=False, dropping_threshold=8e-5, **kwargs,): # ______________________________________________________________________ """ Notes: 1. Input data are assumed to be contained in an HDF5 under 'exchange/data', as a 4D dataset of shape [n_theta, n_spots, detector_size_y, detector_size_x]. 2. Full-field reconstruction is treated as ptychography. If the image is not divided, the programs runs as if it is dealing with ptychography with only 1 spot per angle. 3. Full-field reconstruction with minibatch_size > 1 but without image dividing is not supported. In this case, minibatch_size will be forced to be 1, so that each rank process only one rotation angle's image at a time. To perform large fullfield reconstruction efficiently, divide the data into sub-chunks. 4. Full-field reconstruction using shared_file_mode but without image dividing is not recommended even if minibatch_size is 1. In shared_file_mode, all ranks process data from the same rotation angle in each synchronized batch. Doing this will cause all ranks to process the same data. To perform large fullfield reconstruction efficiently, divide the data into sub-chunks. """ def calculate_loss(obj_delta, obj_beta, probe_real, probe_imag, probe_defocus_mm, probe_pos_offset, this_i_theta, this_pos_batch, this_prj_batch): if optimize_probe_defocusing: h_probe = get_kernel(probe_defocus_mm * 1e6, lmbda_nm, voxel_nm, probe_size, fresnel_approx=fresnel_approx) probe_complex = probe_real + 1j * probe_imag probe_complex = np.fft.ifft2(np.fft.ifftshift(np.fft.fftshift(np.fft.fft2(probe_complex)) * h_probe)) probe_real = np.real(probe_complex) probe_imag = np.imag(probe_complex) if optimize_probe_pos_offset: this_pos_batch = this_pos_batch + probe_pos_offset[this_i_theta] if not shared_file_object: obj_stack = np.stack([obj_delta, obj_beta], axis=3) if not two_d_mode: obj_rot = apply_rotation(obj_stack, coord_ls[this_i_theta]) # obj_rot = sp_rotate(obj_stack, theta, axes=(1, 2), reshape=False) else: obj_rot = obj_stack probe_pos_batch_ls = [] exiting_ls = [] i_dp = 0 while i_dp < minibatch_size: probe_pos_batch_ls.append(this_pos_batch[i_dp:min([i_dp + n_dp_batch, minibatch_size])]) i_dp += n_dp_batch # Pad if needed obj_rot, pad_arr = pad_object(obj_rot, this_obj_size, probe_pos, probe_size) for k, pos_batch in enumerate(probe_pos_batch_ls): subobj_ls = [] for j in range(len(pos_batch)): pos = pos_batch[j] pos = [int(x) for x in pos] pos[0] = pos[0] + pad_arr[0, 0] pos[1] = pos[1] + pad_arr[1, 0] subobj = obj_rot[pos[0]:pos[0] + probe_size[0], pos[1]:pos[1] + probe_size[1], :, :] subobj_ls.append(subobj) subobj_ls = np.stack(subobj_ls) exiting = multislice_propagate_batch_numpy(subobj_ls[:, :, :, :, 0], subobj_ls[:, :, :, :, 1], probe_real, probe_imag, energy_ev, psize_cm * ds_level, kernel=h, free_prop_cm=free_prop_cm, obj_batch_shape=[len(pos_batch), *probe_size, this_obj_size[-1]], fresnel_approx=fresnel_approx, pure_projection=pure_projection) exiting_ls.append(exiting) exiting_ls = np.concatenate(exiting_ls, 0) loss = np.mean((np.abs(exiting_ls) - np.abs(this_prj_batch)) ** 2) else: probe_pos_batch_ls = [] exiting_ls = [] i_dp = 0 while i_dp < minibatch_size: probe_pos_batch_ls.append(this_pos_batch[i_dp:min([i_dp + n_dp_batch, minibatch_size])]) i_dp += n_dp_batch pos_ind = 0 for k, pos_batch in enumerate(probe_pos_batch_ls): subobj_ls_delta = obj_delta[pos_ind:pos_ind + len(pos_batch), :, :, :] subobj_ls_beta = obj_beta[pos_ind:pos_ind + len(pos_batch), :, :, :] exiting = multislice_propagate_batch_numpy(subobj_ls_delta, subobj_ls_beta, probe_real, probe_imag, energy_ev, psize_cm * ds_level, kernel=h, free_prop_cm=free_prop_cm, obj_batch_shape=[len(pos_batch), *probe_size, this_obj_size[-1]], fresnel_approx=fresnel_approx, pure_projection=pure_projection) exiting_ls.append(exiting) pos_ind += len(pos_batch) exiting_ls = np.concatenate(exiting_ls, 0) loss = np.mean((np.abs(exiting_ls) - np.abs(this_prj_batch)) ** 2) # dxchange.write_tiff(abs(exiting_ls._value[0]), output_folder + '/det/det', dtype='float32', overwrite=True) # raise # Regularization if reweighted_l1: if alpha_d not in [None, 0]: loss = loss + alpha_d * np.mean(weight_l1 * np.abs(obj_delta)) if alpha_b not in [None, 0]: loss = loss + alpha_b * np.mean(weight_l1 * np.abs(obj_beta)) else: if alpha_d not in [None, 0]: loss = loss + alpha_d * np.mean(np.abs(obj_delta)) if alpha_b not in [None, 0]: loss = loss + alpha_b * np.mean(np.abs(obj_beta)) if gamma not in [None, 0]: if shared_file_object: loss = loss + gamma * total_variation_3d(obj_delta, axis_offset=1) else: loss = loss + gamma * total_variation_3d(obj_delta, axis_offset=0) # Write convergence data global current_loss current_loss = loss._value f_conv.write('{},{},{},'.format(i_epoch, i_batch, current_loss)) f_conv.flush() return loss comm = MPI.COMM_WORLD n_ranks = comm.Get_size() rank = comm.Get_rank() t_zero = time.time() timestr = str(datetime.datetime.today()) timestr = timestr[:timestr.find('.')] for i in [':', '-', ' ']: if i == ' ': timestr = timestr.replace(i, '_') else: timestr = timestr.replace(i, '') # ================================================================================ # Create pointer for raw data. # ================================================================================ t0 = time.time() print_flush('Reading data...', 0, rank) f = h5py.File(os.path.join(save_path, fname), 'r') prj = f['exchange/data'] if n_theta is None: n_theta = prj.shape[0] if two_d_mode: n_theta = 1 prj_theta_ind = np.arange(n_theta, dtype=int) theta = -np.linspace(theta_st, theta_end, n_theta, dtype='float32') if theta_downsample is not None: theta = theta[::theta_downsample] prj_theta_ind = prj_theta_ind[::theta_downsample] n_theta = len(theta) original_shape = [n_theta, *prj.shape[1:]] print_flush('Data reading: {} s'.format(time.time() - t0), 0, rank) print_flush('Data shape: {}'.format(original_shape), 0, rank) comm.Barrier() not_first_level = False stdout_options = {'save_stdout': save_stdout, 'output_folder': output_folder, 'timestamp': timestr} n_pos = len(probe_pos) probe_pos = np.array(probe_pos) # ================================================================================ # Batching check. # ================================================================================ if minibatch_size > 1 and n_pos == 1: warnings.warn('It seems that you are processing undivided fullfield data with' 'minibatch > 1. A rank can only process data from the same rotation' 'angle at a time. I am setting minibatch_size to 1.') minibatch_size = 1 if shared_file_object and n_pos == 1: warnings.warn('It seems that you are processing undivided fullfield data with' 'shared_file_object=True. In shared-file mode, all ranks must' 'process data from the same rotation angle in each synchronized' 'batch.') # ================================================================================ # Set output folder name if not specified. # ================================================================================ if output_folder is None: output_folder = 'recon_{}'.format(timestr) if abs(PI - theta_end) < 1e-3: output_folder += '_180' print_flush('Output folder is {}'.format(output_folder), 0, rank) if save_path != '.': output_folder = os.path.join(save_path, output_folder) for ds_level in range(multiscale_level - 1, -1, -1): # ================================================================================ # Set metadata. # ================================================================================ ds_level = 2 ** ds_level print_flush('Multiscale downsampling level: {}'.format(ds_level), 0, rank, **stdout_options) comm.Barrier() prj_shape = original_shape if ds_level > 1: this_obj_size = [int(x / ds_level) for x in obj_size] else: this_obj_size = obj_size dim_y, dim_x = prj_shape[-2:] if minibatch_size is None: minibatch_size = n_pos comm.Barrier() # ================================================================================ # Create output directory. # ================================================================================ if rank == 0: try: os.makedirs(os.path.join(output_folder)) except: print('Target folder {} exists.'.format(output_folder)) comm.Barrier() # ================================================================================ # Create object function optimizer. # ================================================================================ if optimizer == 'adam': opt = AdamOptimizer([*this_obj_size, 2], output_folder=output_folder) optimizer_options_obj = {'step_size': learning_rate, 'shared_file_object': shared_file_object} elif optimizer == 'gd': opt = GDOptimizer([*this_obj_size, 2], output_folder=output_folder) optimizer_options_obj = {'step_size': learning_rate, 'dynamic_rate': True, 'first_downrate_iteration': 20 * max([ceil(n_pos / (minibatch_size * n_ranks)), 1])} if shared_file_object: opt.create_file_objects(use_checkpoint=use_checkpoint) else: if use_checkpoint: try: opt.restore_param_arrays_from_checkpoint() except: opt.create_param_arrays() else: opt.create_param_arrays() # ================================================================================ # Read rotation data. # ================================================================================ try: coord_ls = read_all_origin_coords('arrsize_{}_{}_{}_ntheta_{}'.format(*this_obj_size, n_theta), n_theta) except: if rank == 0: print_flush('Saving rotation coordinates...', 0, rank, **stdout_options) save_rotation_lookup(this_obj_size, n_theta) comm.Barrier() coord_ls = read_all_origin_coords('arrsize_{}_{}_{}_ntheta_{}'.format(*this_obj_size, n_theta), n_theta) # ================================================================================ # Unify random seed for all threads. # ================================================================================ comm.Barrier() seed = int(time.time() / 60) np.random.seed(seed) comm.Barrier() # ================================================================================ # Get checkpointed parameters. # ================================================================================ starting_epoch, starting_batch = (0, 0) needs_initialize = False if use_checkpoint else True if use_checkpoint and shared_file_object: try: starting_epoch, starting_batch = restore_checkpoint(output_folder, shared_file_object) except: needs_initialize = True elif use_checkpoint and (not shared_file_object): try: starting_epoch, starting_batch, obj_delta, obj_beta = restore_checkpoint(output_folder, shared_file_object, opt) except: needs_initialize = True # ================================================================================ # Create object class. # ================================================================================ obj = ObjectFunction([*this_obj_size, 2], shared_file_object=shared_file_object, output_folder=output_folder, ds_level=ds_level, object_type=object_type) if shared_file_object: obj.create_file_object(use_checkpoint) obj.create_temporary_file_object() if needs_initialize: obj.initialize_file_object(save_stdout=save_stdout, timestr=timestr, not_first_level=not_first_level, initial_guess=initial_guess) else: if needs_initialize: obj.initialize_array(save_stdout=save_stdout, timestr=timestr, not_first_level=not_first_level, initial_guess=initial_guess) else: obj.delta = obj_delta obj.beta = obj_beta # ================================================================================ # Create gradient class. # ================================================================================ gradient = Gradient(obj) if shared_file_object: gradient.create_file_object() gradient.initialize_gradient_file() else: gradient.initialize_array_with_values(np.zeros(this_obj_size), np.zeros(this_obj_size)) # ================================================================================ # If a finite support mask path is specified (common for full-field imaging), # create an instance of monochannel mask class. While finite_support_mask_path # has to point to a 3D tiff file, the mask will be written as an HDF5 if # share_file_mode is True. # ================================================================================ mask = None if finite_support_mask_path is not None: mask = Mask(this_obj_size, finite_support_mask_path, shared_file_object=shared_file_object, output_folder=output_folder, ds_level=ds_level) if shared_file_object: mask.create_file_object(use_checkpoint=use_checkpoint) mask.initialize_file_object() else: mask_arr = dxchange.read_tiff(finite_support_mask_path) mask.initialize_array_with_values(mask_arr) # ================================================================================ # Initialize probe functions. # ================================================================================ print_flush('Initialzing probe...', 0, rank, **stdout_options) probe_real, probe_imag = initialize_probe(probe_size, probe_type, pupil_function=pupil_function, probe_initial=probe_initial, save_stdout=save_stdout, output_folder=output_folder, timestr=timestr, save_path=save_path, fname=fname, **kwargs) # ================================================================================ # generate Fresnel kernel. # ================================================================================ voxel_nm = np.array([psize_cm] * 3) * 1.e7 * ds_level lmbda_nm = 1240. / energy_ev delta_nm = voxel_nm[-1] h = get_kernel(delta_nm * binning, lmbda_nm, voxel_nm, probe_size, fresnel_approx=fresnel_approx) # ================================================================================ # Create other optimizers (probe, probe defocus, probe positions, etc.). # ================================================================================ opt_arg_ls = [0, 1] if probe_type == 'optimizable': opt_probe = GDOptimizer([*probe_size, 2], output_folder=output_folder) optimizer_options_probe = {'step_size': probe_learning_rate, 'dynamic_rate': True, 'first_downrate_iteration': 4 * max([ceil(n_pos / (minibatch_size * n_ranks)), 1])} opt_arg_ls = opt_arg_ls + [2, 3] opt_probe.set_index_in_grad_return(len(opt_arg_ls)) probe_defocus_mm = np.array(0.0) if optimize_probe_defocusing: opt_probe_defocus = GDOptimizer([1], output_folder=output_folder) optimizer_options_probe_defocus = {'step_size': probe_defocusing_learning_rate, 'dynamic_rate': True, 'first_downrate_iteration': 4 * max([ceil(n_pos / (minibatch_size * n_ranks)), 1])} opt_arg_ls.append(4) opt_probe_defocus.set_index_in_grad_return(len(opt_arg_ls)) probe_pos_offset = np.zeros([n_theta, 2]) if optimize_probe_pos_offset: opt_probe_pos_offset = GDOptimizer(probe_pos_offset.shape, output_folder=output_folder) optimizer_options_probe_pos_offset = {'step_size': 0.5, 'dynamic_rate': False} opt_arg_ls.append(5) opt_probe_pos_offset.set_index_in_grad_return(len(opt_arg_ls)) # ================================================================================ # Get gradient of loss function w.r.t. optimizable variables. # ================================================================================ loss_grad = grad(calculate_loss, opt_arg_ls) # ================================================================================ # Save convergence data. # ================================================================================ if rank == 0: try: os.makedirs(os.path.join(output_folder, 'convergence')) except: pass comm.Barrier() f_conv = open(os.path.join(output_folder, 'convergence', 'loss_rank_{}.txt'.format(rank)), 'w') f_conv.write('i_epoch,i_batch,loss,time\n') # ================================================================================ # Create parameter summary file. # ================================================================================ print_flush('Optimizer started.', 0, rank, **stdout_options) if rank == 0: create_summary(output_folder, locals(), preset='ptycho') # ================================================================================ # Start outer (epoch) loop. # ================================================================================ cont = True i_epoch = starting_epoch m_p, v_p, m_pd, v_pd = (None, None, None, None) while cont: n_pos = len(probe_pos) n_spots = n_theta * n_pos n_tot_per_batch = minibatch_size * n_ranks n_batch = int(np.ceil(float(n_spots) / n_tot_per_batch)) t0 = time.time() spots_ls = range(n_spots) ind_list_rand = [] t00 = time.time() print_flush('Allocating jobs over threads...', 0, rank, **stdout_options) # Make a list of all thetas and spot positions' np.random.seed(i_epoch) comm.Barrier() if not two_d_mode: theta_ls = np.arange(n_theta) np.random.shuffle(theta_ls) else: theta_ls = np.linspace(0, 2 * PI, prj.shape[0]) theta_ls = abs(theta_ls - theta_st) < 1e-5 i_theta = np.nonzero(theta_ls)[0][0] theta_ls = np.array([i_theta]) # ================================================================================ # Put diffraction spots from all angles together, and divide into minibatches. # ================================================================================ for i, i_theta in enumerate(theta_ls): spots_ls = range(n_pos) # ================================================================================ # Append randomly selected diffraction spots if necessary, so that a rank won't be given # spots from different angles in one batch. # When using shared file object, we must also ensure that all ranks deal with data at the # same angle at a time. # ================================================================================ if not shared_file_object and n_pos % minibatch_size != 0: spots_ls = np.append(spots_ls, np.random.choice(spots_ls, minibatch_size - (n_pos % minibatch_size), replace=False)) elif shared_file_object and n_pos % n_tot_per_batch != 0: spots_ls = np.append(spots_ls, np.random.choice(spots_ls, n_tot_per_batch - (n_pos % n_tot_per_batch), replace=False)) # ================================================================================ # Create task list for the current angle. # ind_list_rand is in the format of [((5, 0), (5, 1), ...), ((17, 0), (17, 1), ..., (...))] # |___________________| |_____| # a batch for all ranks _| |_ (i_theta, i_spot) # (minibatch_size * n_ranks) # ================================================================================ if i == 0: ind_list_rand = np.vstack([np.array([i_theta] * len(spots_ls)), spots_ls]).transpose() else: ind_list_rand = np.concatenate( [ind_list_rand, np.vstack([np.array([i_theta] * len(spots_ls)), spots_ls]).transpose()], axis=0) ind_list_rand = split_tasks(ind_list_rand, n_tot_per_batch) print_flush('Allocation done in {} s.'.format(time.time() - t00), 0, rank, **stdout_options) current_i_theta = 0 for i_batch in range(starting_batch, n_batch): # ================================================================================ # Initialize. # ================================================================================ print_flush('Epoch {}, batch {} of {} started.'.format(i_epoch, i_batch, n_batch), 0, rank, **stdout_options) opt.i_batch = 0 # ================================================================================ # Save checkpoint. # ================================================================================ if shared_file_object: save_checkpoint(i_epoch, i_batch, output_folder, shared_file_object=True, obj_array=None, optimizer=opt) obj.f.flush() else: save_checkpoint(i_epoch, i_batch, output_folder, shared_file_object=False, obj_array=np.stack([obj.delta, obj.beta], axis=-1), optimizer=opt) # ================================================================================ # Get scan position, rotation angle indices, and raw data for current batch. # ================================================================================ t00 = time.time() if len(ind_list_rand[i_batch]) < n_tot_per_batch: n_supp = n_tot_per_batch - len(ind_list_rand[i_batch]) ind_list_rand[i_batch] = np.concatenate([ind_list_rand[i_batch], ind_list_rand[0][:n_supp]]) this_ind_batch = ind_list_rand[i_batch] this_i_theta = this_ind_batch[rank * minibatch_size, 0] this_ind_rank = np.sort(this_ind_batch[rank * minibatch_size:(rank + 1) * minibatch_size, 1]) this_pos_batch = probe_pos[this_ind_rank] print_flush('Current rank is processing angle ID {}.'.format(this_i_theta), 0, rank, **stdout_options) t_prj_0 = time.time() this_prj_batch = prj[this_i_theta, this_ind_rank] print_flush(' Raw data reading done in {} s.'.format(time.time() - t_prj_0), 0, rank, **stdout_options) # ================================================================================ # In shared file mode, if moving to a new angle, rotate the HDF5 object and saved # the rotated object into the temporary file object. # ================================================================================ if shared_file_object and this_i_theta != current_i_theta: current_i_theta = this_i_theta print_flush(' Rotating dataset...', 0, rank, **stdout_options) t_rot_0 = time.time() obj.rotate_data_in_file(coord_ls[this_i_theta], interpolation=interpolation, dset_2=obj.dset_rot) # opt.rotate_files(coord_ls[this_i_theta], interpolation=interpolation) # if mask is not None: mask.rotate_data_in_file(coord_ls[this_i_theta], interpolation=interpolation) comm.Barrier() print_flush(' Dataset rotation done in {} s.'.format(time.time() - t_rot_0), 0, rank, **stdout_options) if ds_level > 1: this_prj_batch = this_prj_batch[:, :, ::ds_level, ::ds_level] comm.Barrier() if shared_file_object: # ================================================================================ # Get values for local chunks of object_delta and beta; interpolate and read directly from HDF5 # ================================================================================ t_read_0 = time.time() obj_rot = obj.read_chunks_from_file(this_pos_batch, probe_size, dset_2=obj.dset_rot) print_flush(' Chunk reading done in {} s.'.format(time.time() - t_read_0), 0, rank, **stdout_options) obj_delta = np.array(obj_rot[:, :, :, :, 0]) obj_beta = np.array(obj_rot[:, :, :, :, 1]) opt.get_params_from_file(this_pos_batch, probe_size) else: obj_delta = obj.delta obj_beta = obj.beta # Update weight for reweighted L1 if shared_file_object: weight_l1 = np.max(obj_delta) / (abs(obj_delta) + 1e-8) else: if i_batch % 10 == 0: weight_l1 = np.max(obj_delta) / (abs(obj_delta) + 1e-8) # ================================================================================ # Calculate object gradients. # ================================================================================ t_grad_0 = time.time() grads = loss_grad(obj_delta, obj_beta, probe_real, probe_imag, probe_defocus_mm, probe_pos_offset, this_i_theta, this_pos_batch, this_prj_batch) print_flush(' Gradient calculation done in {} s.'.format(time.time() - t_grad_0), 0, rank, **stdout_options) grads = list(grads) # ================================================================================ # Reshape object gradient to [y, x, z, c] or [n, y, x, z, c] and average over # ranks. # ================================================================================ if shared_file_object: obj_grads = np.stack(grads[:2], axis=-1) else: this_obj_grads = np.stack(grads[:2], axis=-1) obj_grads = np.zeros_like(this_obj_grads) comm.Barrier() comm.Allreduce(this_obj_grads, obj_grads) obj_grads = obj_grads / n_ranks # ================================================================================ # Update object function with optimizer if not shared_file_object; otherwise, # just save the gradient chunk into the gradient file. # ================================================================================ if not shared_file_object: effective_iter = i_batch // max([ceil(n_pos / (minibatch_size * n_ranks)), 1]) obj_temp = opt.apply_gradient(np.stack([obj_delta, obj_beta], axis=-1), obj_grads, effective_iter, **optimizer_options_obj) obj_delta = np.take(obj_temp, 0, axis=-1) obj_beta = np.take(obj_temp, 1, axis=-1) else: t_grad_write_0 = time.time() gradient.write_chunks_to_file(this_pos_batch, np.take(obj_grads, 0, axis=-1), np.take(obj_grads, 1, axis=-1), probe_size, write_difference=False) print_flush(' Gradient writing done in {} s.'.format(time.time() - t_grad_write_0), 0, rank, **stdout_options) # ================================================================================ # Nonnegativity and phase/absorption-only constraints for non-shared-file-mode, # and update arrays in instance. # ================================================================================ if not shared_file_object: obj_delta = np.clip(obj_delta, 0, None) obj_beta = np.clip(obj_beta, 0, None) if object_type == 'absorption_only': obj_delta[...] = 0 if object_type == 'phase_only': obj_beta[...] = 0 obj.delta = obj_delta obj.beta = obj_beta # ================================================================================ # Optimize probe and other parameters if necessary. # ================================================================================ if probe_type == 'optimizable': this_probe_grads = np.stack(grads[2:4], axis=-1) probe_grads = np.zeros_like(this_probe_grads) comm.Allreduce(this_probe_grads, probe_grads) probe_grads = probe_grads / n_ranks probe_temp = opt_probe.apply_gradient(np.stack([probe_real, probe_imag], axis=-1), probe_grads, **optimizer_options_probe) probe_real = np.take(probe_temp, 0, axis=-1) probe_imag = np.take(probe_temp, 1, axis=-1) if optimize_probe_defocusing: this_pd_grad = np.array(grads[opt_probe_defocus.index_in_grad_returns]) pd_grads = np.array(0.0) comm.Allreduce(this_pd_grad, pd_grads) pd_grads = pd_grads / n_ranks probe_defocus_mm = opt_probe_defocus.apply_gradient(probe_defocus_mm, pd_grads, **optimizer_options_probe_defocus) print_flush(' Probe defocus is {} mm.'.format(probe_defocus_mm), 0, rank, **stdout_options) if optimize_probe_pos_offset: this_pos_offset_grad = np.array(grads[optimize_probe_pos_offset.index_in_grad_returns]) pos_offset_grads = np.zeros_like(probe_pos_offset) comm.Allreduce(this_pos_offset_grad, pos_offset_grads) pos_offset_grads = pos_offset_grads / n_ranks probe_pos_offset = opt_probe_pos_offset.apply_gradient(probe_pos_offset, pos_offset_grads, **optimizer_options_probe_pos_offset) # ================================================================================ # For shared-file-mode, if finishing or above to move to a different angle, # rotate the gradient back, and use it to update the object at 0 deg. Then # update the object using gradient at 0 deg. # ================================================================================ if shared_file_object and (i_batch == n_batch - 1 or ind_list_rand[i_batch + 1][0, 0] != current_i_theta): coord_new = read_origin_coords('arrsize_{}_{}_{}_ntheta_{}'.format(*this_obj_size, n_theta), this_i_theta, reverse=True) print_flush(' Rotating gradient dataset back...', 0, rank, **stdout_options) t_rot_0 = time.time() # dxchange.write_tiff(gradient.dset[:, :, :, 0], 'adhesin/test_shared_file/grad_prerot', dtype='float32') # gradient.reverse_rotate_data_in_file(coord_ls[this_i_theta], interpolation=interpolation) gradient.rotate_data_in_file(coord_new, interpolation=interpolation) # dxchange.write_tiff(gradient.dset[:, :, :, 0], 'adhesin/test_shared_file/grad_postrot', dtype='float32') # comm.Barrier() print_flush(' Gradient rotation done in {} s.'.format(time.time() - t_rot_0), 0, rank, **stdout_options) t_apply_grad_0 = time.time() opt.apply_gradient_to_file(obj, gradient, **optimizer_options_obj) print_flush(' Object update done in {} s.'.format(time.time() - t_apply_grad_0), 0, rank, **stdout_options) gradient.initialize_gradient_file() # ================================================================================ # Apply finite support mask if specified. # ================================================================================ if mask is not None: if not shared_file_object: obj.apply_finite_support_mask_to_array(mask) else: obj.apply_finite_support_mask_to_file(mask) print_flush(' Mask applied.', 0, rank, **stdout_options) # ================================================================================ # Update finite support mask if necessary. # ================================================================================ if mask is not None and shrink_cycle is not None: if i_batch % shrink_cycle == 0 and i_batch > 0: if shared_file_object: mask.update_mask_file(obj, shrink_threshold) else: mask.update_mask_array(obj, shrink_threshold) print_flush(' Mask updated.', 0, rank, **stdout_options) # ================================================================================ # Save intermediate object. # ================================================================================ if rank == 0 and save_intermediate: if shared_file_object: dxchange.write_tiff(obj.dset[:, :, :, 0], fname=os.path.join(output_folder, 'intermediate', 'current'.format(ds_level)), dtype='float32', overwrite=True) else: dxchange.write_tiff(obj.delta, fname=os.path.join(output_folder, 'intermediate', 'current'.format(ds_level)), dtype='float32', overwrite=True) comm.Barrier() print_flush('Minibatch done in {} s; loss (rank 0) is {}.'.format(time.time() - t00, current_loss), 0, rank, **stdout_options) f_conv.write('{}\n'.format(time.time() - t_zero)) f_conv.flush() if n_epochs == 'auto': pass else: if i_epoch == n_epochs - 1: cont = False i_epoch = i_epoch + 1 average_loss = 0 print_flush( 'Epoch {} (rank {}); Delta-t = {} s; current time = {} s,'.format(i_epoch, rank, time.time() - t0, time.time() - t_zero), **stdout_options) if rank == 0 and save_intermediate: if shared_file_object: dxchange.write_tiff(obj.dset[:, :, :, 0], fname=os.path.join(output_folder, 'delta_ds_{}'.format(ds_level)), dtype='float32', overwrite=True) dxchange.write_tiff(obj.dset[:, :, :, 1], fname=os.path.join(output_folder, 'beta_ds_{}'.format(ds_level)), dtype='float32', overwrite=True) dxchange.write_tiff(np.sqrt(probe_real ** 2 + probe_imag ** 2), fname=os.path.join(output_folder, 'probe_mag_ds_{}'.format(ds_level)), dtype='float32', overwrite=True) dxchange.write_tiff(np.arctan2(probe_imag, probe_real), fname=os.path.join(output_folder, 'probe_phase_ds_{}'.format(ds_level)), dtype='float32', overwrite=True) else: dxchange.write_tiff(obj.delta, fname=os.path.join(output_folder, 'delta_ds_{}'.format(ds_level)), dtype='float32', overwrite=True) dxchange.write_tiff(obj.beta, fname=os.path.join(output_folder, 'beta_ds_{}'.format(ds_level)), dtype='float32', overwrite=True) dxchange.write_tiff(np.sqrt(probe_real ** 2 + probe_imag ** 2), fname=os.path.join(output_folder, 'probe_mag_ds_{}'.format(ds_level)), dtype='float32', overwrite=True) dxchange.write_tiff(np.arctan2(probe_imag, probe_real), fname=os.path.join(output_folder, 'probe_phase_ds_{}'.format(ds_level)), dtype='float32', overwrite=True) print_flush('Current iteration finished.', 0, rank, **stdout_options) comm.Barrier()
def test_atan2_f64(): ti.set_default_fp(ti.f64) grad_test(lambda x: ti.atan2(0.4, x), lambda x: np.arctan2(0.4, x)) grad_test(lambda y: ti.atan2(y, 0.4), lambda y: np.arctan2(y, 0.4))
from matplotlib.backends.backend_pdf import PdfPages pp = PdfPages('mog_advi.pdf') fracs = np.argsort(pi)[-5:] mus = mus[fracs, :] taus = taus[fracs] circle = [] true_circle = [] for n, color in enumerate(colors): v, w = np.linalg.eigh(taus[n] * np.eye(k)) v_true, w_true = np.linalg.eigh(ts[n] * np.eye(k)) u = w[0] / np.linalg.norm(w[0]) u_true = w_true[0] / np.linalg.norm(w_true[0]) angle = np.arctan2(u[1], u[0]) angle_true = np.arctan2(u_true[1], u_true[0]) angle = 180 * angle / np.pi angle_true = 180 * angle_true / np.pi v = 2. * np.sqrt(2.) * np.sqrt(v) v_true = 2. * np.sqrt(2.) * np.sqrt(v_true) circle.append( plt.Circle(mus[n], v[0], color=color, fill=False, linestyle='dashed')) true_circle.append(plt.Circle(ms[n], v_true[0], color='black', fill=False)) fig, ax = plt.subplots(figsize=(75.0 / 25.4, 75 / 25.4)) ax = plt.gca() ax.cla() # clear things for fresh plot ax.axis('equal') ax.set_xlim((-6, 6))
def func(): y[0] = x[0] % 3 @ti.kernel def func2(): ti.atomic_add(y[0], x[0] % 3) func() func.grad() func2() func2.grad() @pytest.mark.parametrize('tifunc,npfunc', [ (lambda x: ti.atan2(0.4, x), lambda x: np.arctan2(0.4, x)), (lambda y: ti.atan2(y, 0.4), lambda y: np.arctan2(y, 0.4)), ]) @if_has_autograd @ti.test() def test_atan2(tifunc, npfunc): grad_test(tifunc, npfunc) @pytest.mark.parametrize('tifunc,npfunc', [ (lambda x: ti.atan2(0.4, x), lambda x: np.arctan2(0.4, x)), (lambda y: ti.atan2(y, 0.4), lambda y: np.arctan2(y, 0.4)), ]) @if_has_autograd @ti.test(require=ti.extension.data64, default_fp=ti.f64) def test_atan2_f64(tifunc, npfunc):
def getRadialMonotonicWeights(shape, useNearest=True, minGradient=1, center=None): """Create the weights used for the Radial Monotonicity Operator This version of the radial monotonicity operator selects all of the pixels closer to the peak for each pixel and weights their flux based on their alignment with a vector from the pixel to the peak. In order to quickly create this using sparse matrices, its construction is a bit opaque. """ if center is None: center = ((shape[0] - 1) // 2, (shape[1] - 1) // 2) name = "RadialMonotonicWeights" key = tuple(shape) + tuple(center) + (useNearest, minGradient) try: cosNorm = Cache.check(name, key) except KeyError: # Center on the center pixel py, px = int(center[0]), int(center[1]) # Calculate the distance between each pixel and the peak x = np.arange(shape[1]) y = np.arange(shape[0]) X, Y = np.meshgrid(x, y) X = X - px Y = Y - py distance = np.sqrt(X**2 + Y**2) # Find each pixels neighbors further from the peak and mark them as invalid # (to be removed later) distArr, mask = diagonalizeArray(distance, dtype=np.float64) relativeDist = (distance.flatten()[:, None] - distArr.T).T invalidPix = relativeDist <= 0 # Calculate the angle between each pixel and the x axis, relative to the peak position # (also avoid dividing by zero and set the tan(infinity) pixel values to pi/2 manually) inf = X == 0 tX = X.copy() tX[inf] = 1 angles = np.arctan2(-Y, -tX) angles[inf & (Y != 0)] = 0.5 * np.pi * np.sign(angles[inf & (Y != 0)]) # Calcualte the angle between each pixel and it's neighbors xArr, m = diagonalizeArray(X) yArr, m = diagonalizeArray(Y) dx = (xArr.T - X.flatten()[:, None]).T dy = (yArr.T - Y.flatten()[:, None]).T # Avoid dividing by zero and set the tan(infinity) pixel values to pi/2 manually inf = dx == 0 dx[inf] = 1 relativeAngles = np.arctan2(dy, dx) relativeAngles[inf & (dy != 0)] = 0.5 * np.pi * np.sign( relativeAngles[inf & (dy != 0)]) # Find the difference between each pixels angle with the peak # and the relative angles to its neighbors, and take the # cos to find its neighbors weight dAngles = (angles.flatten()[:, None] - relativeAngles.T).T cosWeight = np.cos(dAngles) # Mask edge pixels, array elements outside the operator (for offdiagonal bands with < N elements), # and neighbors further from the peak than the reference pixel cosWeight[invalidPix] = 0 cosWeight[mask] = 0 if useNearest: # Only use a single pixel most in line with peak cosNorm = np.zeros_like(cosWeight) columnIndices = np.arange(cosWeight.shape[1]) maxIndices = np.argmax(cosWeight, axis=0) indices = maxIndices * cosNorm.shape[1] + columnIndices indices = np.unravel_index(indices, cosNorm.shape) cosNorm[indices] = minGradient # Remove the reference for the peak pixel cosNorm[:, px + py * shape[1]] = 0 else: # Normalize the cos weights for each pixel normalize = np.sum(cosWeight, axis=0) normalize[normalize == 0] = 1 cosNorm = (cosWeight.T / normalize[:, None]).T cosNorm[mask] = 0 Cache.set(name, key, cosNorm) return cosNorm
def decompose_thrustvector(self, acc_vec): a_norm = np.linalg.norm(acc_vec) z_body = acc_vec / a_norm theta = np.arctan2(z_body[1], z_body[0]) - np.pi / 2 #theta = np.arcsin(-z_body[0]) return a_norm, z_body, theta
def d_z_d_t_numpy(x, y, z, t, params, ode_params): A = ode_params.A f2 = ode_params.f2 a_p, a_q, a_r, a_s, a_t = params[:, 0], params[:, 3], params[:, 6], params[:, 9], params[:, 12] b_p, b_q, b_r, b_s, b_t = params[:, 1], params[:, 4], params[:, 7], params[:, 10], params[:, 13] theta_p, theta_q, theta_r, theta_s, theta_t = params[:, 2], params[:, 5], params[:, 8], params[:, 11], params[:, 14] a_p = a_p.reshape((-1, 1)) a_q = a_q.reshape((-1, 1)) a_r = a_r.reshape((-1, 1)) a_s = a_s.reshape((-1, 1)) a_t = a_t.reshape((-1, 1)) b_p = b_p.reshape((-1, 1)) b_q = b_q.reshape((-1, 1)) b_r = b_r.reshape((-1, 1)) b_s = b_s.reshape((-1, 1)) b_t = b_t.reshape((-1, 1)) theta_p = theta_p.reshape((-1, 1)) theta_q = theta_q.reshape((-1, 1)) theta_r = theta_r.reshape((-1, 1)) theta_s = theta_s.reshape((-1, 1)) theta_t = theta_t.reshape((-1, 1)) logging.debug("theta p shape: {}".format(theta_p.shape)) theta = np.arctan2(y, x) logging.debug("theta shape: {}".format(theta.shape)) logging.debug("delta before mod: {}".format((theta - theta_p).shape)) delta_theta_p = np.fmod(theta - theta_p, 2 * math.pi) logging.debug("delta theta shape: {}".format(delta_theta_p.shape)) delta_theta_q = np.fmod(theta - theta_q, 2 * math.pi) delta_theta_r = np.fmod(theta - theta_r, 2 * math.pi) delta_theta_s = np.fmod(theta - theta_s, 2 * math.pi) delta_theta_t = np.fmod(theta - theta_t, 2 * math.pi) z_p = a_p * delta_theta_p * \ np.exp((- delta_theta_p * delta_theta_p / (2 * b_p * b_p))) z_q = a_q * delta_theta_q * \ np.exp((- delta_theta_q * delta_theta_q / (2 * b_q * b_q))) z_r = a_r * delta_theta_r * \ np.exp((- delta_theta_r * delta_theta_r / (2 * b_r * b_r))) z_s = a_s * delta_theta_s * \ np.exp((- delta_theta_s * delta_theta_s / (2 * b_s * b_s))) z_t = a_t * delta_theta_t * \ np.exp((- delta_theta_t * delta_theta_t / (2 * b_t * b_t))) z_0_t = (A * np.sin(2 * math.pi * f2 * t)) z_p = z_p z_q = z_q z_r = z_r z_s = z_s z_t = z_t z_0_t = z_0_t f_z = -1 * (z_p + z_q + z_r + z_s + z_t) - (z - z_0_t) return f_z
def optimizeManeuver(self): ''' Sets up the optimization problem then calculates the optimal maneuver poses. Publishes the resulting path if the optimization is successful. Args: ----- msg: ROS Bool message Returns: -------- path: the optimal path as a ROS nav_msgs/Path message ''' # Make sure a target pose exists if (self.target_x is not None): # Grab the current pose from the recent transform if there is no 'odom' topic being published to if (self.current_pose is None): # DEBUG: print( "No 'odom' message received. Waiting for transform from 'odom' to 'base_link'..." ) listener = tf.TransformListener() try: listener.waitForTransform('/odom', '/base_link', rospy.Time(0), rospy.Duration(10)) (trans, rot) = listener.lookupTransform('/odom', '/base_link', rospy.Time(0)) self.current_pose = Pose() self.current_pose.position.x = trans[0] self.current_pose.position.y = trans[1] self.current_pose.position.z = trans[2] self.current_pose.orientation.x = rot[0] self.current_pose.orientation.y = rot[1] self.current_pose.orientation.z = rot[2] self.current_pose.orientation.w = rot[3] except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException): return False, "Error looking up transform from 'odom' to 'base_link'" # DEBUG: print("Running maneuver optimization...") # Initial value for optimization x0 = [self.start_x_s, self.start_y_s] lower_bounds = [-2, -16] upper_bounds = [20, 5] # Set params # TODO: add the forklifts current pose from "/odom" current_pose2D = Pose2D() current_pose2D.x = self.current_pose.position.x current_pose2D.y = self.current_pose.position.y euler_angles = euler_from_quaternion([ self.current_pose.orientation.x, self.current_pose.orientation.y, self.current_pose.orientation.z, self.current_pose.orientation.w ]) current_pose2D.theta = euler_angles[2] params = { "current_pose": [current_pose2D.x, current_pose2D.y, current_pose2D.theta], "forklift_length": (self.base_to_back + self.base_to_clamp), "weights": [10, 1, 0.1, 1], "obstacles": self.obstacles, "min_radius": self.min_radius } print("Using optimization method: %d" % self.optimization_method) #==================================================================# # vvv Add Autograd gradient functions here if you get to it vvv #==================================================================# # Generate Gradient Functions self.grad_maneuverObjective = grad( lambda x: self.maneuverObjective(x, params)) self.hessian_maneuverObjective = hessian( lambda x: self.maneuverObjective(x, params)) self.jac_maneuverIneqConstraints = jacobian( lambda x: self.maneuverIneqConstraints(x, params)) self.hessian_maneuverIneqConstraints = hessian( lambda x: self.maneuverIneqConstraints(x, params)) # # Test Gradients against finite difference method # delta = 0.0000001 # x = np.array([self.start_x_s, self.start_y_s], dtype=np.float) # dx = deepcopy(x) # dx[0] = x[0] + delta # print("Objective: ") # print(self.maneuverObjective(x, params)) # print(self.maneuverObjective(dx, params)) # print("Autograd:") # print(self.grad_maneuverObjective(x)) # print("Finite Difference:") # print((self.maneuverObjective(dx, params) - self.maneuverObjective(x, params))/delta) # print("Hessian:") # print(self.hessian_maneuverObjective(x)) # print("Autograd con:") # print(self.jac_maneuverIneqConstraints(x)) # print("Constraint Jacobian:") # print(self.gradManeuverIneqConstraints(x, params)) # print("Hessian con:") # print(self.hessian_maneuverIneqConstraints(x)) #==================================================================# # ^^^ Add Autograd gradient functions here if you get to it ^^^ #==================================================================# #==================================================================# # scipy.optimize.minimize optimizer #==================================================================# if (self.optimization_method == 1): # Set up optimization problem obj = lambda x: self.maneuverObjective(x, params) obj_bfgs = lambda x: self.maneuverObjective(x, params) ineq_con = { 'type': 'ineq', 'fun': lambda x: self.maneuverIneqConstraints(x, params), 'jac': None } bounds = [(lower_bounds[0], upper_bounds[0]), (lower_bounds[1], upper_bounds[1])] # Optimize tic = time.time() #res = minimize(obj, x0, jac=self.grad_maneuverObjective, method='SLSQP', bounds=bounds, constraints=ineq_con) #res = minimize(obj, x0, method='SLSQP', bounds=bounds, constraints=ineq_con) res = minimize(obj_bfgs, x0, method='COBYLA', bounds=bounds, constraints=ineq_con) toc = time.time() # DEBUG: print("===== Optimization Results =====") print("time: %f(sec)" % (toc - tic)) print("Success: %s" % res.success) print("Message: %s" % res.message) print("Results:\n x: %f, y: %f" % (res.x[0], res.x[1])) # Store result x_s = res.x[0] y_s = res.x[1] # Update starting point to be the current result self.start_x_s = x_s self.start_y_s = y_s self.optimization_success = res.success message = res.message #==================================================================# # scipy.optimize.minimize optimizer #==================================================================# #==================================================================# # IPOPT Optimizer #==================================================================# if (self.optimization_method == 2): # Initial value for optimization x0_ip = np.array([x0[0], x0[1]]) nvar = 2 x_L = np.array(lower_bounds, dtype=np.float_) x_U = np.array(upper_bounds, dtype=np.float_) ncon = 1 g_L = np.array([0], dtype=np.float_) g_U = np.array([0], dtype=np.float_) nnzj = nvar * ncon nnzh = nvar**2 def eval_f(x): return self.maneuverObjective(x, params) def eval_grad_f(x): return self.grad_maneuverObjective(x) def eval_g(x): return self.maneuverIneqConstraints(x, params) def eval_jac_g(x, flag): if flag: rows = np.concatenate( (np.ones(nvar) * 0, np.ones(nvar) * 1)) cols = np.concatenate( (np.linspace(0, nvar - 1, nvar), np.linspace(nvar, 2 * nvar - 1, nvar))) return (rows, cols) else: return self.jac_maneuverIneqConstraints(x) def eval_h(x, lagrange, obj_factor, flag): if flag: rows = np.array([]) for i in range(nvar * ncon): rows = np.concatenate((rows, np.ones(nvar) * i)) cols = np.array([]) for i in range(nvar * ncon): cols = np.concatenate( (cols, np.linspace(0, nvar - 1, nvar))) return (rows, cols) else: constraint_hessian = self.hessian_maneuverIneqConstraints( x) constraint_sum = lagrange[0] * constraint_hessian[ 0, :, :] constraint_sum = constraint_sum + lagrange[ 1] * constraint_hessian[1, :, :] return obj_factor * self.hessian_maneuverObjective( x) + constraint_sum # Not using hessian, remove this line when using it nnzh = 0 nlp = pyipopt.create(nvar, x_L, x_U, ncon, g_L, g_U, nnzj, nnzh, eval_f, eval_grad_f, eval_g, eval_jac_g) pyipopt.set_loglevel(0) tic = time.time() x, zl, zu, constraint_multipliers, obj, status = nlp.solve( x0_ip) nlp.close() toc = time.time() def print_variable(variable_name, value): for i in range(len(value)): print("{} {}".format( variable_name + "[" + str(i) + "] =", value[i])) print("Solution of the primal variables, x") print_variable("x", x) # # print("Solution of the bound multipliers, z_L and z_U") # print_variable("z_L", zl) # print_variable("z_U", zu) # # print("Solution of the constraint multipliers, lambda") # print_variable("lambda", constraint_multipliers) # # print("Objective value") # print("f(x*) = {}".format(obj)) # DEBUG: print("===== Optimization Results (IPOPT) =====") print("time: %f" % (toc - tic)) print("Success: %s" % status) print("Message: %s" % "") print("Results:\n x: %f, y: %f" % (x[0], x[1])) # Store result x_s = x[0] y_s = x[1] self.optimization_success = (status > 0) message = "ipopt optimization finished with status: {0:d}".format( status) #==================================================================# # IPOPT Optimizer #==================================================================# #=================================================================# # Use hardcoded value #=================================================================# if (self.optimization_method == 0): x_s = x0[0] y_s = x0[1] self.optimization_success = 1 message = "used hardcoded starting value" #=================================================================# # Use hardcoded value #=================================================================# # Print optimized point print("Approach starting point: (%0.4f, %0.4f)" % (x_s, y_s)) # Set initial pose angle for the forklift to be the direction facing the roll theta_s = np.arctan2(self.target_y - y_s, self.target_x - x_s) # Initialize path messages current_time = rospy.Time.now() path1_msg = Path() path2_msg = Path() path1_gear_msg = PathWithGear() path2_gear_msg = PathWithGear() path1_msg.header.stamp = current_time path1_msg.header.frame_id = "odom" path2_msg.header.stamp = current_time path2_msg.header.frame_id = "odom" path1_gear_msg.path.header.stamp = current_time path1_gear_msg.path.header.frame_id = "odom" path2_gear_msg.path.header.stamp = current_time path2_gear_msg.path.header.frame_id = "odom" # Publish first segment of maneuver # NOTE: Just set the path to be a single point at the current position. This will make the master controller work the same and quickly move through the two maneuver paths point = PoseStamped() point.header.frame_id = "odom" point.pose.position.x = self.current_pose.position.x point.pose.position.y = self.current_pose.position.y path1_msg.poses.append(point) path1_gear_msg.path.poses.append(point) # Set gear, positive alpha = forward gear self.path1_pub.publish(path1_msg) path1_gear_msg.gear = 1 self.path1_gear_pub.publish(path1_gear_msg) # Publish second segment of maneuver point = PoseStamped() point.header.frame_id = "odom" point.pose.position.x = self.current_pose.position.x point.pose.position.y = self.current_pose.position.y path2_msg.poses.append(point) path2_gear_msg.path.poses.append(point) # Set gear, positive alpha = forward gear self.path2_pub.publish(path2_msg) path2_gear_msg.gear = 1 self.path2_gear_pub.publish(path2_gear_msg) if (self.optimization_success): # If optimization was successful, publish the new target # position for the A* algorithm (you will want to make this a # separate "goal" value distinct from the roll target position) rospy.set_param("/control_panel_node/goal_x", float(x_s)) rospy.set_param("/control_panel_node/goal_y", float(y_s)) self.update_obstacle_end_pose = False # Publish the starting pose for the approach b-spline path approach_start_pose = PoseStamped() approach_start_pose.header.frame_id = "/odom" approach_start_pose.pose.position.x = x_s approach_start_pose.pose.position.y = y_s quat_forklift = quaternion_from_euler(0, 0, wrapToPi(theta_s)) approach_start_pose.pose.orientation.x = quat_forklift[0] approach_start_pose.pose.orientation.y = quat_forklift[1] approach_start_pose.pose.orientation.z = quat_forklift[2] approach_start_pose.pose.orientation.w = quat_forklift[3] self.approach_pose_pub.publish(approach_start_pose) return self.optimization_success, message else: return False, "No target pose exists"
def __init__(self, vec, referenceCurv=None, v_des_func=None, **kwargs): """ vec is the vector with half of it x and half of it y dt is the dt between two time points in the list v_lim is the desired speed of the car, Now the default value is the m/s for 25 mile/hour referenceCurv is the reference curve, [n,i] n is the nth point, i is eigher x or y the private functions returns functions v_des_func: a function that outputs desired speed given location """ dt = DT v_lim = VLIM self.vec = vec self.vectors = { } # vectors[n] is the information of the n'th derivative, for example pos, velocity, acceleration, jerk self.vec_len = int(vec.shape[0] / 2) self.dt = dt # self.inputVector = np.concatenate([self._x(2),self._y(2)]) # the action space is the acceleration of the car self._x = lambda vec: vec[:self.vec_len] self._y = lambda vec: vec[self.vec_len:] self._vx = self._diffdt(self._x) self._vy = self._diffdt(self._y) self._theta = lambda vec: np.arctan2(self._vx(vec), self._vy(vec)) self._v = self._distance(self._vx, self._vy) self._ax = self._diffdt(self._vx) self._ay = self._diffdt(self._vy) self._ds = self._distance(self._diff(self._x), self._diff(self._y)) self._a = self._distance(self._ax, self._ay) self._s = self._cumsum(self._ds) self._alon = self._normalize(self._aPlon(self._x, self._y), self._avrun( self._v)) # (a_x*v_x + a_y*v_y) / v self._alat = self._normalize(self._crossMul(self._x, self._y), self._avrun( self._v)) # (a_x*v_x + a_y*v_y) / v self._jlon = self._normalize(self._jPlon(self._x, self._y), self._avrun(self._avrun( self._v))) # (a_x*v_x + a_y*v_y) / v # smooth J_lon # self._jlon = self._normalize(self._jPlon(self._avrun(self._avrun(self._x)),self._avrun(self._avrun(self._y))), self._avrun(self._avrun(self._avrun(self._avrun(self._v))))) # (a_x*v_x + a_y*v_y) / v self._jlat = self._normalize(self._crossMul(self._vx, self._vy), self._avrun( self._a)) # (a_x*v_x + a_y*v_y) / v self._kappa = self._kappa_(self._x, self._y) self.referenceCurv = referenceCurv # the raw points of the reference Curv # self._ref_ds = self._ref_ds_() self._ref_ds = LazyFunc(self._ref_ds_) # self._ref_d = self._ref_d_() # the deviation with the reference curve self._ref_d = LazyFunc(self._ref_d_) self._ref_s = self._cumsum(self._ref_ds) self.v_lim = v_lim self._final_v = lambda vec: self._v(vec)[-1] # the finale speed self._ref_sinphi = self._normalize( self._ref_ds, self._ds ) # the sin of angel formed between the car trajectory and the ref trajectory self.features = { "L2_a_lon": self._L2(self._alon, self._const(0)), "L1_a_lon": self._L1(self._alon, self._const(0)), "Linf_a_lon": self._Linf(self._alon, self._const(0)), "L2_a_lat": self._L2(self._alat, self._const(0)), "L1_a_lat": self._L1(self._alat, self._const(0)), "Linf_a_lat": self._Linf(self._alat, self._const(0)), "L2_j_lon": self._L2(self._jlon, self._const(0)), "L1_j_lon": self._L1(self._jlon, self._const(0)), "Linf_j_lon": self._Linf(self._jlon, self._const(0)), "L2_j_lat": self._L2(self._jlat, self._const(0)), "L1_j_lat": self._L1(self._jlat, self._const(0)), "Linf_j_lat": self._Linf(self._jlat, self._const(0)), # Note: `v_des` and `abs_v_des` are identical, they are used interchangablly for historical reason "L2_v_des": self._L2(self._v, self._const(self.v_lim)), "L1_v_des": self._L1(self._v, self._const(self.v_lim)), "Linf_v_des": self._Linf(self._v, self._const(self.v_lim)), "L2_abs_v_des": self._L2( self._abs( self._add(self._neg(self._v), self._const(self.v_lim))), self._const(0)), "L1_abs_v_des": self._L1( self._abs( self._add(self._neg(self._v), self._const(self.v_lim))), self._const(0)), "Linf_abs_v_des": self._Linf( self._abs( self._add(self._neg(self._v), self._const(self.v_lim))), self._const(0)), "L2_ref_d": self._L2(self._ref_d, self._const(0)), "L1_ref_d": self._L1(self._ref_d, self._const(0)), "Linf_ref_d": self._Linf(self._ref_d, self._const(0)), "L2_ref_a_d": self._L2(self._diffdt(self._ref_d), self._const(0)), "L1_ref_a_d": self._L1(self._diffdt(self._ref_d), self._const(0)), "Linf_ref_a_d": self._Linf(self._diffdt(self._ref_d), self._const(0)), "L2_ref_a_s": self._L2(self._diff(self._ref_ds), self._const(0)), "L1_ref_a_s": self._L1(self._diff(self._ref_ds), self._const(0)), "Linf_ref_a_s": self._Linf(self._diff(self._ref_ds), self._const(0)), "L2_ref_sinphi": self._L2(self._ref_sinphi, self._const(0)), "L1_ref_sinphi": self._L1(self._ref_sinphi, self._const(0)), "Linf_ref_sinphi": self._Linf(self._ref_sinphi, self._const(0)), "L2_final_v": self._L2(self._final_v, self._const(0)), "L1_final_v": self._L1(self._final_v, self._const(0)), "Linf_final_v": self._Linf(self._final_v, self._const(0)) } if (v_des_func is not None): self.features["L2_v_des_func"] = self._v_des_delta_( v_des_func, self._L2) self.features["L1_v_des_func"] = self._v_des_delta_( v_des_func, self._L1) self.features["Linf_v_des_func"] = self._v_des_delta_( v_des_func, self._Linf)