def householder(x): assert len(x) == 3, 'x=%s' % x assert np.linalg.norm(x) > 1e-8 a = (np.arange(3) == np.argmin(np.abs(x))).astype(float) u = utils.normalized(np.cross(x, a)) v = utils.normalized(np.cross(x, u)) return np.array([u, v])
def normalized_difference(lista, listb): lista = trim_mono_data(normalized(lista)) listb = trim_mono_data(normalized(listb)) compare = min(len(lista), len(listb)) return numpy.sum( numpy.absolute(lista[:compare] - listb[:compare])) / compare
def multilevel_embed(ctrl, graph, match_method, basic_embed, refine_model): '''This method defines the multilevel embedding method.''' start = time.time() # Step-1: Graph Coarsening. original_graph = graph coarsen_level = ctrl.coarsen_level if ctrl.refine_model.double_base: # if it is double-base, it will need to do one more layer of coarsening coarsen_level += 1 for i in range(coarsen_level): match, coarse_graph_size = match_method(ctrl, graph) coarse_graph = create_coarse_graph(ctrl, graph, match, coarse_graph_size) graph = coarse_graph if graph.node_num <= ctrl.embed_dim: ctrl.logger.error( "Error: coarsened graph contains less than embed_dim nodes.") exit(0) if ctrl.debug_mode and graph.node_num < 1e3: assert np.allclose( graph_to_adj(graph).A, graph.A.A), "Coarser graph is not consistent with Adj matrix" print_coarsen_info(ctrl, original_graph) # Step-2 : Base Embedding if ctrl.refine_model.double_base: graph = graph.finer embedding = basic_embed(ctrl, graph) embedding = normalized(embedding, per_feature=False) # Step - 3: Embeddings Refinement. if ctrl.refine_model.double_base: coarse_embed = basic_embed(ctrl, graph.coarser) coarse_embed = normalized(coarse_embed, per_feature=False) else: coarse_embed = None with tf.Session(config=tf.ConfigProto( intra_op_parallelism_threads=ctrl.workers)) as session: model = refine_model(ctrl, session) model.train_model(coarse_graph=graph.coarser, fine_graph=graph, coarse_embed=coarse_embed, fine_embed=embedding) # refinement model training while graph.finer is not None: # apply the refinement model. embedding = model.refine_embedding(coarse_graph=graph, fine_graph=graph.finer, coarse_embed=embedding) graph = graph.finer end = time.time() ctrl.embed_time += end - start return embedding
def forwardbackward(priors, e_seq, hmm): """Compute the smoothed belief states given the observation sequence :param priors: Counter, initial belief distribution over rge states :param e_seq: sequence of observations :param hmm: HMM, contians the transition and emission models :return: sequence of Counters, estimates of belief states for all time slices """ se = list() # Smoothed belief distributions f = list() f.append(priors) f.extend(forward(priors, e_seq, hmm)) b = Counter() prod = Counter() for state in priors: b[state] = 1 e_seq.reverse() i = len(e_seq) for obs in e_seq: #vypocet f_(i-1) x b for x in priors: prod[x] = f[i][x] * b[x] #vypocet s_i se.append(normalized(prod)) #vypocet b b = backward1(b, obs, hmm) i = i - 1 se.reverse() return se
def _reset(self): self.current_target = 0 self.current_step = 0 self.target = (numpy.random.uniform(-0.25 * self.playing_area_width, 0.25 * self.playing_area_width), numpy.random.uniform(-0.25 * self.playing_area_height, 0.25 * self.playing_area_height)) self.car_location = (numpy.random.uniform( -0.25 * self.playing_area_width, 0.25 * self.playing_area_width), numpy.random.uniform( -0.25 * self.playing_area_height, 0.25 * self.playing_area_height)) self.car_heading = numpy.random.uniform(-math.pi, math.pi) self.initial_location = self.car_location desired_vector = (self.target[0] - self.car_location[0], self.target[1] - self.car_location[1]) desired_vector = normalized(desired_vector) current_vector = polar_to_cartesian((1, self.car_heading)) angle_difference_end = angle_signed(desired_vector, current_vector) # create observation, return vel_cart = polar_to_cartesian((1, self.car_heading)) observation = numpy.array([ self.target[0] / (0.5 * self.playing_area_width), self.target[1] / (0.5 * self.playing_area_height), self.car_location[0] / (0.5 * self.playing_area_width), self.car_location[1] / (0.5 * self.playing_area_height), vel_cart[0], vel_cart[1], angle_difference_end / math.pi ]) observation = observation.astype(numpy.float32) return time_step.restart(observation)
def normalize_file(filename): data = read_wave_file(filename, True) if len(data): normalized_data = normalized(data) * (2**(bit_depth - 1) - 1) else: normalized_data = data save_to_file(filename, 2, normalized_data)
def generate(self) -> [[float]]: """ Devuelve una matriz con los datos generados por el ruido :return: matriz de datos """ data = [[random.random() for _ in range(self._size)] for _ in range(self._size)] if self._normalized: data = normalized(data) return data
def graph_ffts(): files = ['A1_v111_15.00s.aif', 'A2_v31_15.00s.aif'] for file in files: stereo = read_wave_file(os.path.join(root_dir, file)) left = stereo[0] right = stereo[1] list = left[:100000] w = numpy.fft.rfft(list) freqs = numpy.fft.fftfreq(len(w)) # Find the peak in the coefficients # idx = numpy.argmax(numpy.abs(w[:len(w) / 2])) idx = numpy.argmax(numpy.abs(w)) freq = freqs[idx] plt.plot(w) print freq print \ fundamental_frequency(normalized(list)), \ fundamental_frequency(normalized(left + right))
def generate(self) -> [[float]]: """ Devuelve una matriz con los datos generados por el ruido :return: matriz de datos """ data = [ [ pnoise2(x / self._freq, y / self._freq, self._octaves) for x in range(self._size) ] for y in range(self._size)] if self._normalized: data = normalized(data) return data
def solve(self): free_positions = self.maze.get_free_positions() cell_vals = normalized({pos: 1 for pos in free_positions}) if self.forward: try: self.f_beliefs = self.forward( cell_vals, self.obs_seq[1:], self.robot) except Exception as e: mb.showerror(e.__class__.__name__, 'An exception occurred when executing ' '"forward()" function. Traceback will be written to ' 'the standard error output.') print(traceback.format_exc(), file=sys.stderr) else: self.f_beliefs = [cell_vals] + self.f_beliefs self._update_showing_state() if self.forwardbackward: try: self.fb_beliefs = self.forwardbackward( cell_vals, self.obs_seq[1:], self.robot) except Exception as e: mb.showerror(e.__class__.__name__, 'An exception occurred when executing ' '"forwardbackward()" function. Traceback will be written to ' 'the standard error output.') print(traceback.format_exc(), file=sys.stderr) else: self.fb_beliefs = [cell_vals] + self.fb_beliefs self._update_showing_state() if self.viterbi: try: path, _ = self.viterbi( cell_vals, self.obs_seq[1:], self.robot) except Exception as e: mb.showerror(e.__class__.__name__, 'An exception occurred when executing ' '"viterbi()" function. Traceback will be written to ' 'the standard error output.') print(traceback.format_exc(), file=sys.stderr) else: # Convert Viterbi positions to beliefs self.v_beliefs = [cell_vals] for pos in path: self.v_beliefs.append(Counter({pos: 1})) self._update_showing_state() self._handle_showing() self._handle_nav_button(0) # Repaint self._update_navigation_state() self.solved_queue.put(True)
def _get_next_state_distr(self, cur_state): """Return the distribution over possible next states Takes the walls around current state into account. """ p = Counter() for dir in ALL_DIRS: next_state = add(cur_state, dir) if not self.maze.is_free(next_state): pass else: p[next_state] = self.move_probs[dir] return normalized(p)
def forward1(prev_f, cur_e, hmm, normalize=False): """Perform a single update of the forward message :param prev_f: Counter, previous belief distribution over states :param cur_e: a single current observation :param hmm: HMM, contains the transition and emission models :param normalize: bool, should we normalize on the fly? :return: Counter, current belief distribution over states """ cur_f = update_belief_by_time_step(prev_f, hmm) cur_f = update_belief_by_evidence(cur_f, cur_e, hmm) if normalize: cur_f = normalized(cur_f) return cur_f
def freq_shift(): files = ['A1_v111_15.00s.aif', 'A1_v95_15.00s.aif'] wavea, waveb = [ read_wave_file(os.path.join(root_dir, file)) for file in files ] louder_a = wavea[0] if ( numpy.amax(wavea[0]) > numpy.amax(wavea[1])) else wavea[1] louder_b = waveb[0] if ( numpy.amax(waveb[0]) > numpy.amax(waveb[1])) else waveb[1] freqd = freq_diff(normalized(louder_a), normalized(louder_b)) waveb_shifted = [shift_freq(channel, freqd) for channel in waveb] louder_shifted_b = waveb_shifted[0] if (numpy.amax(waveb_shifted[0]) > numpy.amax(waveb_shifted[1])) \ else waveb_shifted[1] shifted_freqd = freq_diff(normalized(louder_a), normalized(louder_shifted_b)) # lefts_aligned = aligned_sublists(wavea[0], waveb[0]) rights_aligned = aligned_sublists(wavea[1], waveb[1]) # shifted_lefts_aligned = aligned_sublists(wavea[0], waveb_shifted[0]) diffl = normalized_difference(*aligned_sublists(wavea[0], waveb[0])) diffr = normalized_difference(*aligned_sublists(wavea[1], waveb[1])) plt.plot(normalized(rights_aligned[0][:10000])) plt.plot(normalized(rights_aligned[1][:10000])) plt.plot( numpy.absolute( normalized(rights_aligned[0][:10000]) - normalized(rights_aligned[1][:10000]))) plt.show() shifted_diffl = normalized_difference( *aligned_sublists(wavea[0], waveb_shifted[0])) shifted_diffr = normalized_difference( *aligned_sublists(wavea[1], waveb_shifted[1])) print files print 'diffs\t\t', diffl, diffr print 'shifted diffs\t', shifted_diffl, shifted_diffr print 'freqs', freqd print 'shifted freqs', shifted_freqd
def forwardbackward(priors, e_seq, hmm): """Compute the smoothed belief states given the observation sequence :param priors: Counter, initial belief distribution over rge states :param e_seq: sequence of observations :param hmm: HMM, contians the transition and emission models :return: sequence of Counters, estimates of belief states for all time slices """ se = [] # Smoothed belief distributions fs = forward(priors, e_seq, hmm) b = Counter({Xt: 1.0 for Xt in hmm.get_states()}) for e, f in zip(reversed(e_seq), reversed(fs)): s = normalized(Counter({Xt: f[Xt] * b[Xt] for Xt in hmm.get_states()})) se.append(s) b = backward1(b, e, hmm) return list(reversed(se))
def main(): a = SO3.exp(np.random.rand(3)) b = np.array((2, 2, 2)) norm_x = 1 true_x = np.dot(a.T, normalized(b)) u, s, vt = np.linalg.svd(a) v = vt.T btilde = np.dot(u.T, b) def secular(k): return np.sum(np.square(s*btilde / (s*s + k))) - norm_x*norm_x k = scipy.optimize.fsolve(secular, 1.) estimated_x = np.dot(v, s*btilde / (s*s + k)) print estimated_x print np.dot(a, estimated_x)
def get_next_state_distr(self, cur_state): """Return the distribution over possible next states Takes the walls around current state into account. """ p = Counter() forward_position = add((cur_state[0], cur_state[1]), ALL_DIRS[cur_state[2]]) if self.maze.is_free(forward_position): p[(forward_position[0], forward_position[1], cur_state[2])] = self.move_probs[FWD] p[(cur_state[0], cur_state[1], (cur_state[2] + 1) % 4)] = self.move_probs[TURN_RIGHT] p[(cur_state[0], cur_state[1], (cur_state[2] - 1) % 4)] = self.move_probs[TURN_LEFT] return normalized(p)
def test_matrix_alg_steps(): """Basic test to compare results of naive hmm_inference and hmm_inference_matrix for all algorithm updates""" def test_steps_normal(robot, obs, obs2, initial_belief): globals()["hmm_inference"] = importlib.import_module("hmm_inference") v1 = hmm_inference.forward1(initial_belief, obs, robot) v2 = hmm_inference.forward1(v1, obs2, robot) back_v = hmm_inference.backward1(v2, obs2, robot) viterbi_v_log = hmm_inference.viterbi1_log(initial_belief, obs, robot)[0] viterbi_v = hmm_inference.viterbi1(initial_belief, obs, robot)[0] return [v1, v2, back_v, viterbi_v, viterbi_v_log] def test_steps_matrix(robot, obs, obs2, initial_belief): globals()["hmm_inference"] = importlib.import_module("hmm_inference") initial_belief = pv.ProbabilityVector.initialize_from_dict( initial_belief) v1_m = hmm_inference.forward1(initial_belief, obs, robot) v2_m = hmm_inference.forward1(v1_m, obs2, robot) back_v_m = hmm_inference.backward1(v2_m, obs2, robot) viterbi_v_m_log = hmm_inference.viterbi1_log(initial_belief, obs, robot)[0] viterbi_v_m = hmm_inference.viterbi1(initial_belief, obs, robot)[0] return [v1_m, v2_m, back_v_m, viterbi_v_m, viterbi_v_m_log] robot = init_maze() robot.init_models() obs = ('f', 'n', 'f', 'n') obs2 = ('n', 'n', 'f', 'f') initial_belief = normalized({pos: 1 for pos in robot.get_states()}) for v_normal, v_matrix in zip( test_steps_normal(robot, obs, obs2, initial_belief), test_steps_matrix(robot, obs, obs2, initial_belief)): print(v_normal == v_matrix) # for state in v_matrix.states: # print(state, ": "," ", v_matrix[state], "/", v_normal[state]) print()
def _step(self, action): # calculate difference between current heading and desired heading desired_vector = (self.target[0] - self.car_location[0], self.target[1] - self.car_location[1]) desired_vector = normalized(desired_vector) current_vector = polar_to_cartesian((1, self.car_heading)) angle_difference_start = angle_signed(desired_vector, current_vector) # calculate angle change angle = 0 # if action == 0 continue straight... if action == 1: # turn left angle = -self.car_steering_angle if action == 2: # turn right angle = self.car_steering_angle # calculate location of front and rear axles. front_axle = (self.car_location[0] + (self.car_wheel_base / 2) * math.cos(self.car_heading), self.car_location[1] + (self.car_wheel_base / 2) * math.sin(self.car_heading)) rear_axle = (self.car_location[0] - (self.car_wheel_base / 2) * math.cos(self.car_heading), self.car_location[1] - (self.car_wheel_base / 2) * math.sin(self.car_heading)) # calculate new location of front and rear axles loc_rear = ( rear_axle[0] + self.car_speed * self.time_step * math.cos(self.car_heading), rear_axle[1] + self.car_speed * self.time_step * math.sin(self.car_heading)) loc_front = (front_axle[0] + self.car_speed * self.time_step * math.cos(self.car_heading + angle), front_axle[1] + self.car_speed * self.time_step * math.sin(self.car_heading + angle)) # calculate new location and heading, update environment state values self.car_location = ((loc_front[0] + loc_rear[0]) / 2, (loc_front[1] + loc_rear[1]) / 2) self.car_heading = math.atan2(loc_front[1] - loc_rear[1], loc_front[0] - loc_rear[0]) # calculate difference between current heading and desired heading desired_vector = (self.target[0] - self.car_location[0], self.target[1] - self.car_location[1]) desired_vector = normalized(desired_vector) current_vector = polar_to_cartesian((1, self.car_heading)) angle_difference_end = angle_signed(desired_vector, current_vector) done = False angle_change = abs(angle_difference_start) - abs(angle_difference_end) reward = angle_change / math.radians(self.car_steering_angle) # check if car is within bounds if abs(self.car_location[0]) > 0.5 * self.playing_area_width or abs( self.car_location[1]) > 0.5 * self.playing_area_height: done = True reward = -100 # check if car has reached the current target distance = euclid_dist_2D(self.target, self.car_location) if distance < self.visited_distance: reward = 25 + 75 * (self.current_step / self.max_steps) # add new target and reset step... self.current_target += 1 if self.current_target < self.max_target_count: self.target = (numpy.random.uniform( -0.25 * self.playing_area_width, 0.25 * self.playing_area_width), numpy.random.uniform( -0.25 * self.playing_area_height, 0.25 * self.playing_area_height)) self.current_step = 0 else: done = True # check if max steps were reached if self.current_step >= self.max_steps: done = True distance_initial = euclid_dist_2D(self.target, self.initial_location) if distance_initial: reward = 25 * (1 - distance / distance_initial) else: reward = 2.5 # create observation, return... vel_cart = polar_to_cartesian((1, self.car_heading)) observation = numpy.array([ self.target[0] / (0.5 * self.playing_area_width), self.target[1] / (0.5 * self.playing_area_height), self.car_location[0] / (0.5 * self.playing_area_width), self.car_location[1] / (0.5 * self.playing_area_height), vel_cart[0], vel_cart[1], angle_difference_end / math.radians(180) ]) observation = observation.astype(numpy.float32) # increment step, return self.current_step += 1 # print(reward) if done: return time_step.termination(observation, reward) else: return time_step.transition(observation, reward, discount=0.98)
def gravity_direction_error(true_trajectory, estimated_trajectory): actual = utils.normalized(true_trajectory.gravity) estimated = utils.normalized(estimated_trajectory.gravity) return np.arccos(np.dot(actual, estimated))
def main(): np.random.seed(1) # # Construct ground truth # num_frames = 5 num_landmarks = 10 num_imu_readings = 8 bezier_degree = 3 out = 'out/full_initialization' print 'Num landmarks:', num_landmarks print 'Num frames:', num_frames print 'Num IMU readings:', num_imu_readings print 'Bezier curve degree:', bezier_degree if not os.path.isdir(out): os.mkdir(out) # Both splines should start at 0,0,0 frame_times = np.linspace(0, .9, num_frames) accel_times = np.linspace(0, 1, num_imu_readings) true_pos_controls = np.random.randn(bezier_degree-1, 3) true_orient_controls = np.random.randn(bezier_degree-1, 3) true_landmarks = np.random.randn(num_landmarks, 3) true_frame_positions = np.array([zero_offset_bezier(true_pos_controls, t) for t in frame_times]) true_frame_cayleys = np.array([zero_offset_bezier(true_orient_controls, t) for t in frame_times]) true_frame_orientations = np.array(map(cayley, true_frame_cayleys)) true_imu_cayleys = np.array([zero_offset_bezier(true_orient_controls, t) for t in accel_times]) true_imu_orientations = np.array(map(cayley, true_imu_cayleys)) true_gravity_magnitude = 9.8 true_gravity = normalized(np.random.rand(3)) * true_gravity_magnitude true_accel_bias = np.random.randn(3) true_global_accels = np.array([zero_offset_bezier_second_deriv(true_pos_controls, t) for t in accel_times]) true_accels = np.array([np.dot(R, a + true_gravity) + true_accel_bias for R, a in zip(true_imu_orientations, true_global_accels)]) true_features = np.array([[normalized(np.dot(R, x-p)) for x in true_landmarks] for R, p in zip(true_frame_orientations, true_frame_positions)]) true_vars = np.hstack((true_pos_controls.flatten(), true_orient_controls.flatten(), true_accel_bias, true_gravity)) print np.min(true_features.reshape((-1, 3)), axis=0) print np.max(true_features.reshape((-1, 3)), axis=0) # # Add sensor noise # accel_noise = 0 feature_noise = 0 observed_features = true_features.copy() observed_accels = true_accels.copy() if accel_noise > 0: observed_accels += np.random.randn(*observed_accels.shape) * accel_noise if feature_noise > 0: observed_features += np.random.rand(*observed_features.shape) * feature_noise # # Construct symbolic versions of the above # num_position_vars = (bezier_degree-1)*3 num_orientation_vars = (bezier_degree-1)*3 num_accel_bias_vars = 3 num_gravity_vars = 3 block_sizes = [num_position_vars, num_orientation_vars, num_accel_bias_vars, num_gravity_vars] num_vars = sum(block_sizes) sym_vars = [Polynomial.coordinate(i, num_vars, Fraction) for i in range(num_vars)] sym_pos_controls, sym_orient_controls, sym_accel_bias, sym_gravity = map(np.array, chop(sym_vars, block_sizes)) sym_pos_controls = sym_pos_controls.reshape((-1, 3)) sym_orient_controls = sym_orient_controls.reshape((-1, 3)) assert len(true_vars) == len(sym_vars) # # Accel residuals # residuals = [] print 'Accel residuals:' for i, t in enumerate(accel_times): sym_cayley = zero_offset_bezier(sym_orient_controls, t) sym_orient = cayley_mat(sym_cayley) sym_denom = cayley_denom(sym_cayley) sym_global_accel = zero_offset_bezier_second_deriv(sym_pos_controls, t) sym_accel = np.dot(sym_orient, sym_global_accel + sym_gravity) + sym_denom * sym_accel_bias residual = sym_accel - sym_denom * observed_accels[i] residuals.extend(residual) for r in residual: print ' %f (degree=%d, length=%d)' % (r(*true_vars), r.total_degree, len(r)) # # Epipolar residuals # print 'Epipolar residuals:' for i, ti in enumerate(frame_times): if i == 0: continue sym_Ri = cayley_mat(zero_offset_bezier(sym_orient_controls, ti)) sym_pi = zero_offset_bezier(sym_pos_controls, ti) sym_E = essential_matrix_from_relative_pose(sym_Ri, sym_pi) for k in range(num_landmarks): z1 = observed_features[0][k] zi = observed_features[i][k] residual = np.dot(zi, np.dot(sym_E, z1)) residuals.append(residual) r = residual print ' %f (degree=%d, length=%d)' % (r(*true_vars), r.total_degree, len(r)) # # Construct cost # cost = Polynomial(num_vars) for r in residuals: cost += r*r gradients = cost.partial_derivatives() print '\nNum vars:', num_vars print 'Num residuals:', len(residuals) print '\nCost:' print ' Num terms: %d' % len(cost) print ' Degree: %d' % cost.total_degree # # Output to file # write_polynomials(cost, out+'/cost.txt') write_polynomials(residuals, out+'/residuals.txt') write_polynomials(gradients, out+'/gradients.txt') write_solution(true_vars, out+'/solution.txt') np.savetxt(out+'/feature_measurements.txt', observed_features.reshape((-1, 3))) np.savetxt(out+'/accel_measurements.txt', observed_accels) np.savetxt(out+'/problem_size.txt', [num_frames, num_landmarks, num_imu_readings]) np.savetxt(out+'/frame_times.txt', frame_times) np.savetxt(out+'/accel_times.txt', accel_times) np.savetxt(out+'/true_pos_controls.txt', true_pos_controls) np.savetxt(out+'/true_orient_controls.txt', true_orient_controls) np.savetxt(out+'/true_accel_bias.txt', true_accel_bias) np.savetxt(out+'/true_gravity.txt', true_gravity) return # # Plot # fig = plt.figure(figsize=(14,6)) ax = fig.add_subplot(1, 2, 1, projection='3d') ts = np.linspace(0, 1, 100) true_ps = np.array([zero_offset_bezier(true_pos_controls, t) for t in ts]) ax.plot(true_ps[:, 0], true_ps[:, 1], true_ps[:, 2], '-b') plt.show()
from noises.white_noise_generator import WhiteNoiseGenerator from output_generators.image_generator import ImageGenerator from noises.perlin_noise_generator import PerlinNoiseGenerator from noises.simplex_noise_generator import SimplexNoiseGenerator from output_generators.json_generator import JsonGenerator from post_processing.cellular import CellularAutomata from post_processing.polarize import Polarize from post_processing.soften import Soften from utils import normalized import numpy as np print('Bienvenido a la aplicación de pruebas de algoritmos procedimentales') # Paso 1: Creación de los datos print('Paso 1: generación del ruido base') generator = PerlinNoiseGenerator(7, 1024, True, 1) elevation = generator.generate() data = normalized(elevation) # Paso 2: Procesamiento adicional print('Paso 2: procesamiento adicional') post = Polarize(elevation) elevation = post.do_work() # Paso 3: Creación de la imagen a partir de los datos finales print('Paso 3: exportando datos') out_gen = ImageGenerator(elevation, 'output.png') out_gen.generate_output()
def update(self, commands): #self.logfile.write("frame\n") impact = [False, False] direction = [None, None] stick_0 = self.evaluate_joystick(0) stick_1 = self.evaluate_joystick(1) if self.old_sticks != []: change1 = length(sub(self.old_sticks[0]["stick"], stick_0["stick"])) if change1 > resources.get("impactThreshold"): impact[0] = True #self.logfile.write("p1 {}\n".format(stick_0["stick"])) position = length(stick_0["stick"]) if position < 0.2: direction[0] = "neutral" else: stickdir = utils.normalized(stick_0["stick"]) if stickdir[0] > 0.7: direction[0] = "right" elif stickdir[0] < -0.7: direction[0] = "left" elif stickdir[1] > 0.7: direction[0] = "down" elif stickdir[1] < -0.7: direction[0] = "up" #self.logfile.write("p1 dir {}\n".format(direction[0])) change2 = length(sub(self.old_sticks[1]["stick"], stick_1["stick"])) if change2 > resources.get("impactThreshold"): impact[1] = True position = length(stick_1["stick"]) if position < 0.2: direction[1] = "neutral" else: stickdir = utils.normalized(stick_1["stick"]) if stickdir[0] > 0.7: direction[1] = "right" elif stickdir[0] < -0.7: direction[1] = "left" elif stickdir[1] > 0.7: direction[1] = "down" elif stickdir[1] < -0.7: direction[1] = "up" #self.logfile.write("p2 dir {}\n".format(direction[1])) actor = self.entities.component_for_entity(self.players[0], Actor) actor.direction = direction[0] actor.impact = impact[0] actor2 = self.entities.component_for_entity(self.players[1], Actor) actor2.direction = direction[1] actor2.impact = impact[1] else: self.old_sticks = [None, None] self.old_sticks[0] = stick_0 self.old_sticks[1] = stick_1 for command in commands: if command == "exit": return "finish" if len(command) == 2: if command[1] == "A": self.add_command(command[0], "attack") if command[1] == "L": self.add_command(command[0], "shield") self.systems.update(1.0/resources.get("fps")) position = self.entities.component_for_entity(self.players[0], Position) if position.x < -512 or position.x > 512 or position.y < -500 or position.y > 384: self.lives[0] -= 1 position.x = 0 position.y = 0 velocity = self.entities.component_for_entity(self.players[0], Velocity) velocity.dx = 0 velocity.dy = 0 if self.lives[0] == 0: self.winner = 1 position2 = self.entities.component_for_entity(self.players[1], Position) if position2.x < -512 or position2.x > 512 or position2.y < -500 or position2.y > 384: self.lives[1] -= 1 position2.x = 0 position2.y = 0 velocity2 = self.entities.component_for_entity(self.players[1], Velocity) velocity2.dx = 0 velocity2.dy = 0 if self.lives[1] == 0: self.winner = 0 return None
def run_bezier_position_estimation(): np.random.seed(0) # # Construct ground truth # num_frames = 6 num_landmarks = 10 num_imu_readings = 100 bezier_degree = 4 print 'Num landmarks:', num_landmarks print 'Num frames:', num_frames print 'Num IMU readings:', num_imu_readings print 'Bezier curve degree:', bezier_degree # Both splines should start at 0,0,0 true_frame_timestamps = np.linspace(0, .9, num_frames) true_accel_timestamps = np.linspace(0, 1, num_imu_readings) true_rot_controls = np.random.randn(bezier_degree-1, 3) * .1 true_pos_controls = np.random.randn(bezier_degree-1, 3) true_landmarks = np.random.randn(num_landmarks, 3)*5 + [0., 0., 20.] true_frame_orientations = np.array([cayley.cayley(bezier.zero_offset_bezier(true_rot_controls, t)) for t in true_frame_timestamps]) true_frame_positions = np.array([bezier.zero_offset_bezier(true_pos_controls, t) for t in true_frame_timestamps]) true_gravity_magnitude = 9.8 true_gravity = utils.normalized(np.random.rand(3)) * true_gravity_magnitude true_accel_bias = np.random.randn(3) * .01 print 'True gravity:', true_gravity true_imu_orientations = np.array([cayley.cayley(bezier.zero_offset_bezier(true_rot_controls, t)) for t in true_accel_timestamps]) true_accel_readings = np.array([predict_accel(true_pos_controls, true_rot_controls, true_accel_bias, true_gravity, t) for t in true_accel_timestamps]) true_features = np.array([[predict_feature(true_pos_controls, true_rot_controls, x, t) for x in true_landmarks] for t in true_frame_timestamps]) true_vars = np.hstack((true_pos_controls.flatten(), true_accel_bias, true_gravity, true_landmarks.flatten())) # # Add sensor noise # accel_timestamp_noise = 1e-5 accel_reading_noise = 1e-5 accel_orientation_noise = 1e-5 frame_timestamp_noise = 1e-5 frame_orientation_noise = 1e-5 feature_noise = 1e-5 observed_accel_timestamps = utils.add_white_noise(true_accel_timestamps, accel_timestamp_noise) observed_accel_readings = utils.add_white_noise(true_accel_readings, accel_reading_noise) observed_accel_orientations = utils.add_orientation_noise(true_imu_orientations, accel_orientation_noise) observed_frame_timestamps = utils.add_white_noise(true_frame_timestamps, frame_timestamp_noise) observed_frame_orientations = utils.add_orientation_noise(true_frame_orientations, frame_orientation_noise) observed_features = utils.add_white_noise(true_features, feature_noise) # # Solve # problem = construct_problem( bezier_degree, observed_accel_timestamps, observed_accel_orientations, observed_accel_readings, observed_frame_timestamps, observed_frame_orientations, observed_features, gravity_magnitude=true_gravity_magnitude+.1, accel_tolerance=1e-3, feature_tolerance=1e-3) #problem.evaluate(true_vars) result = socp.solve(problem, sparse=True) if result['x'] is None: print 'Did not find a feasible solution' return estimated_vars = np.squeeze(result['x']) estimated_pos_controls = estimated_vars[:true_pos_controls.size].reshape((-1, 3)) estimated_accel_bias = estimated_vars[true_pos_controls.size:true_pos_controls.size+3] estimated_gravity = estimated_vars[true_pos_controls.size+3:true_pos_controls.size+6] estimated_landmarks = estimated_vars[true_pos_controls.size+6:].reshape((-1, 3)) estimated_frame_positions = np.array([bezier.zero_offset_bezier(estimated_pos_controls, t) for t in true_frame_timestamps]) print 'Position norms:', np.linalg.norm(true_frame_positions, axis=1) print 'Position errors:', np.linalg.norm(estimated_frame_positions - true_frame_positions, axis=1) print 'Gravity error:', np.linalg.norm(estimated_gravity - true_gravity) print 'Accel bias error:', np.linalg.norm(estimated_accel_bias - true_accel_bias) print 'Max error:', np.max(estimated_vars - true_vars) plt.clf() plt.barh(np.arange(len(true_vars)), true_vars, height=.3, alpha=.3, color='g') plt.barh(np.arange(len(true_vars))+.4, estimated_vars, height=.3, alpha=.3, color='r') plt.savefig('out/vars.pdf')
def run_spline_epipolar(): # Construct symbolic problem num_landmarks = 10 num_frames = 3 num_imu_readings = 8 bezier_degree = 4 out = 'out/epipolar_accel_bezier3' if not os.path.isdir(out): os.mkdir(out) # Both splines should start at 0,0,0 frame_times = np.linspace(0, .9, num_frames) imu_times = np.linspace(0, 1, num_imu_readings) true_rot_controls = np.random.rand(bezier_degree-1, 3) true_pos_controls = np.random.rand(bezier_degree-1, 3) true_landmarks = np.random.randn(num_landmarks, 3) true_cayleys = np.array([zero_offset_bezier(true_rot_controls, t) for t in frame_times]) true_positions = np.array([zero_offset_bezier(true_pos_controls, t) for t in frame_times]) true_accels = np.array([zero_offset_bezier_second_deriv(true_pos_controls, t) for t in imu_times]) true_qs = map(cayley_mat, true_cayleys) true_rotations = map(cayley, true_cayleys) true_uprojections = [[np.dot(R, x-p) for x in true_landmarks] for R,p in zip(true_rotations, true_positions)] true_projections = [[normalized(zu) for zu in row] for row in true_uprojections] p0 = true_positions[0] q0 = true_qs[0] for i in range(1, num_frames): p = true_positions[i] q = true_qs[i] E = essential_matrix(q0, p0, q, p) for j in range(num_landmarks): z = true_projections[i][j] z0 = true_projections[0][j] #print np.dot(z, np.dot(E, z0)) # construct symbolic versions of the above s_offs = 0 p_offs = s_offs + (bezier_degree-1)*3 num_vars = p_offs + (bezier_degree-1)*3 sym_vars = [Polynomial.coordinate(i, num_vars, Fraction) for i in range(num_vars)] sym_rot_controls = np.reshape(sym_vars[s_offs:s_offs+(bezier_degree-1)*3], (bezier_degree-1, 3)) sym_pos_controls = np.reshape(sym_vars[p_offs:p_offs+(bezier_degree-1)*3], (bezier_degree-1, 3)) true_vars = np.hstack((true_rot_controls.flatten(), true_pos_controls.flatten())) residuals = [] # Accel residuals for i in range(num_imu_readings): sym_a = zero_offset_bezier_second_deriv(sym_pos_controls, imu_times[i]) residual = sym_a - true_accels[i] residuals.extend(residual) # Epipolar residuals p0 = np.zeros(3) R0 = np.eye(3) for i in range(1, num_frames): sym_s = zero_offset_bezier(sym_rot_controls, frame_times[i]) sym_p = zero_offset_bezier(sym_pos_controls, frame_times[i]) sym_q = cayley_mat(sym_s) #sym_q = np.eye(3) * (1. - np.dot(sym_s, sym_s)) + 2.*skew(sym_s) + 2.*np.outer(sym_s, sym_s) sym_E = essential_matrix(R0, p0, sym_q, sym_p) for j in range(num_landmarks): z = true_projections[i][j] z0 = true_projections[0][j] residual = np.dot(z, np.dot(sym_E, z0)) residuals.append(residual) print 'Num vars:',num_vars print 'Num residuals:',len(residuals) print 'Residuals:', len(residuals) cost = Polynomial(num_vars) for r in residuals: cost += r*r print ' %f (degree=%d, length=%d)' % (r(*true_vars), r.total_degree, len(r)) print '\nCost:' print ' Num terms: %d' % len(cost) print ' Degree: %d' % cost.total_degree print '\nGradients:' gradients = cost.partial_derivatives() for gradient in gradients: print ' %d (degree=%d, length=%d)' % (gradient(*true_vars), gradient.total_degree, len(gradient)) jacobians = [r.partial_derivatives() for r in residuals] J = np.array([[J_ij(*true_vars) for J_ij in row] for row in jacobians]) U, S, V = np.linalg.svd(J) print '\nJacobian singular values:' print J.shape print S null_space_dims = sum(np.abs(S) < 1e-5) if null_space_dims > 0: print '\nNull space:' for i in null_space_dims: print V[-i] print V[-2] print '\nHessian eigenvalues:' H = np.dot(J.T, J) print H.shape print np.linalg.eigvals(H) # Output to file write_polynomials(cost, out+'/cost.txt') write_polynomials(residuals, out+'/residuals.txt') write_polynomials(gradients, out+'/gradients.txt') write_polynomials(jacobians, out+'/jacobians.txt') write_solution(true_vars, out+'/solution.txt')
def run_epipolar(): # Construct symbolic problem num_landmarks = 10 num_frames = 3 true_landmarks = np.random.randn(num_landmarks, 3) true_positions = np.vstack((np.zeros(3), np.random.rand(num_frames-1, 3))) true_cayleys = np.vstack((np.zeros(3), np.random.rand(num_frames-1, 3))) true_qs = map(cayley_mat, true_cayleys) true_rotations = map(cayley, true_cayleys) true_uprojections = [[np.dot(R, x-p) for x in true_landmarks] for R,p in zip(true_rotations, true_positions)] true_projections = [[normalized(zu) for zu in row] for row in true_uprojections] p0 = true_positions[0] q0 = true_qs[0] for i in range(1, num_frames): p = true_positions[i] q = true_qs[i] E = essential_matrix(q0, p0, q, p) for j in range(num_landmarks): z = true_projections[i][j] z0 = true_projections[0][j] print np.dot(z, np.dot(E, z0)) # construct symbolic versions of the above s_offs = 0 p_offs = s_offs + (num_frames-1)*3 num_vars = p_offs + (num_frames-1)*3 sym_vars = [Polynomial.coordinate(i, num_vars, Fraction) for i in range(num_vars)] sym_cayleys = np.reshape(sym_vars[s_offs:s_offs+(num_frames-1)*3], (num_frames-1, 3)) sym_positions = np.reshape(sym_vars[p_offs:p_offs+(num_frames-1)*3], (num_frames-1, 3)) true_vars = np.hstack((true_cayleys[1:].flatten(), true_positions[1:].flatten())) residuals = [] p0 = np.zeros(3) R0 = np.eye(3) for i in range(1, num_frames): sym_p = sym_positions[i-1] sym_s = sym_cayleys[i-1] sym_q = cayley_mat(sym_s) sym_E = essential_matrix(R0, p0, sym_q, sym_p) for j in range(num_landmarks): z = true_projections[i][j] z0 = true_projections[0][j] residual = np.dot(z, np.dot(sym_E, z0)) print 'Residual poly: ',len(residual), residual.total_degree residuals.append(residual) print 'Num sym_vars:',num_vars print 'Num residuals:',len(residuals) print 'Residuals:', len(residuals) cost = Polynomial(num_vars) for residual in residuals: #cost += np.dot(residual, residual) print ' ',residual(*true_vars) #ri.num_vars, len(true_vars) print '\nGradients:' gradient = [cost.partial_derivative(i) for i in range(num_vars)] for gi in gradient: print ' ',gi(*true_vars) J = np.array([[r.partial_derivative(i)(*true_vars) for i in range(num_vars)] for r in residuals]) print '\nJacobian singular values:' print J.shape U,S,V = np.linalg.svd(J) print S print V[-1] print V[-2] print '\nHessian eigenvalues:' H = np.dot(J.T, J) print H.shape print np.linalg.eigvals(H)
def run_position_estimation(): # # Parameters # bezier_degree = 4 num_frames = 8 num_landmarks = 120 num_accel_readings = 50 num_gyro_readings = 60 gyro_timestamp_noise = 0 gyro_reading_noise = 1e-3 accel_timestamp_noise = 0 accel_reading_noise = 1e-3 frame_timestamp_noise = 0 frame_orientation_noise = 1e-3 feature_noise = 1e-4 print 'Num landmarks:', num_landmarks print 'Num frames:', num_frames print 'Num accel readings:', num_accel_readings print 'Num gyro readings:', num_gyro_readings print 'Bezier curve degree:', bezier_degree # # Construct ground truth # true_frame_timestamps = np.linspace(0, 1, num_frames) true_accel_timestamps = np.linspace(0, 1, num_accel_readings) true_gyro_bias = np.random.rand(3) true_accel_bias = np.random.randn(3) true_gravity_magnitude = 9.8 true_gravity = normalized(np.random.rand(3)) * true_gravity_magnitude true_rot_controls = np.random.randn(bezier_degree-1, 3) true_pos_controls = np.random.randn(bezier_degree-1, 3) true_landmarks = np.random.randn(num_landmarks, 3) * 5 true_landmarks[:, 2] += 20 true_frame_orientations = np.array([cayley(zero_offset_bezier(true_rot_controls, t)) for t in true_frame_timestamps]) true_frame_positions = np.array([zero_offset_bezier(true_pos_controls, t) for t in true_frame_timestamps]) true_accel_readings = np.array([predict_accel(true_pos_controls, true_rot_controls, true_accel_bias, true_gravity, t) for t in true_accel_timestamps]) true_features = np.array([[predict_feature(true_pos_controls, true_rot_controls, x, t) for x in true_landmarks] for t in true_frame_timestamps]) true_gyro_timestamps = np.linspace(0, 1, num_gyro_readings) true_gyro_readings = np.array([predict_gyro(true_rot_controls, true_gyro_bias, t) for t in true_gyro_timestamps]) # # Add sensor noise # observed_gyro_timestamps = add_white_noise(true_gyro_timestamps, gyro_timestamp_noise) observed_gyro_readings = add_white_noise(true_gyro_readings, gyro_reading_noise) observed_accel_timestamps = add_white_noise(true_accel_timestamps, accel_timestamp_noise) observed_accel_readings = add_white_noise(true_accel_readings, accel_reading_noise) observed_frame_timestamps = add_white_noise(true_frame_timestamps, frame_timestamp_noise) observed_frame_orientations = add_orientation_noise(true_frame_orientations, frame_orientation_noise) observed_frame_orientations[0] = true_frame_orientations[0] # do not add noise to first frame observed_features = add_white_noise(true_features, feature_noise) # # Plot features # plt.clf() plot_tracks(true_features, 'x-g', limit=10, alpha=.4) plot_tracks(observed_features, 'o-r', limit=10, alpha=.4) plt.show() return # # Solve for orientation and gyro bias # print 'Estimating orientation...' estimated_gyro_bias, estimated_rot_controls = estimate_orientation( bezier_degree, observed_gyro_timestamps, observed_gyro_readings, observed_frame_timestamps, observed_frame_orientations) estimated_accel_orientations = np.array([predict_orientation(estimated_rot_controls, t) for t in observed_accel_timestamps]) # # Solve for position, accel bias, and gravity # print 'Estimating position...' estimated_pos_controls, estimated_accel_bias, estimated_gravity = estimate_position( bezier_degree, observed_accel_timestamps, estimated_accel_orientations, observed_accel_readings, observed_frame_timestamps, observed_frame_orientations, observed_features) estimated_positions = np.array([zero_offset_bezier(estimated_pos_controls, t) for t in true_frame_timestamps]) estimated_pfeatures = np.array([[pr(predict_feature(estimated_pos_controls, true_rot_controls, x, t)) for x in true_landmarks] for t in true_frame_timestamps]) true_pfeatures = pr(true_features) observed_pfeatures = pr(observed_features) # # Report # print 'Gyro bias error:', np.linalg.norm(estimated_gyro_bias - true_gyro_bias) print ' True gyro bias:', true_gyro_bias print ' Estimated gyro bias:', estimated_gyro_bias print 'Accel bias error:', np.linalg.norm(estimated_accel_bias - true_accel_bias) print ' True accel bias:', true_accel_bias print ' Estimated accel bias:', estimated_accel_bias print 'Gravity error:', np.linalg.norm(estimated_gravity - true_gravity) print ' True gravity:', true_gravity print ' Estimated gravity:', estimated_gravity print ' Estimated gravity magnitude:', np.linalg.norm(estimated_gravity) for i in range(num_frames): print 'Frame %d position error: %f' % (i, np.linalg.norm(estimated_positions[i] - true_frame_positions[i])) # # Plot orientation results # plot_timestamps = np.linspace(0, 1, 50) estimated_gyro_readings = np.array([predict_gyro(estimated_rot_controls, true_gyro_bias, t) for t in plot_timestamps]) true_orientations = np.array([SO3.log(predict_orientation(true_rot_controls, t)) for t in plot_timestamps]) estimated_orientations = np.array([SO3.log(predict_orientation(estimated_rot_controls, t)) for t in plot_timestamps]) observed_orientations = np.array(map(SO3.log, observed_frame_orientations)) plt.figure(figsize=(14, 6)) plt.subplot(1, 2, 1) plt.title('Gyro readings') plt.plot(plot_timestamps, estimated_gyro_readings, ':', label='estimated', alpha=1) plt.plot(true_gyro_timestamps, true_gyro_readings, '-', label='true', alpha=.3) plt.plot(true_gyro_timestamps, observed_gyro_readings, 'x', label='observed') plt.xlim(-.1, 1.5) plt.legend() plt.subplot(1, 2, 2) plt.title('Orientation') plt.plot(plot_timestamps, estimated_orientations, ':', label='estimated', alpha=1) plt.plot(plot_timestamps, true_orientations, '-', label='true', alpha=.3) plt.plot(true_frame_timestamps, observed_orientations, 'x', label='observed') plt.xlim(-.1, 1.5) plt.legend() # # Plot position results # plot_timestamps = np.linspace(0, 1, 100) true_positions = np.array([zero_offset_bezier(true_pos_controls, t) for t in plot_timestamps]) estimated_positions = np.array([zero_offset_bezier(estimated_pos_controls, t) for t in plot_timestamps]) true_accels = np.array([predict_accel(true_pos_controls, true_rot_controls, true_accel_bias, true_gravity, t) for t in plot_timestamps]) estimated_accels = np.array([predict_accel(estimated_pos_controls, true_rot_controls, estimated_accel_bias, estimated_gravity, t) for t in plot_timestamps]) fig = plt.figure(figsize=(14, 10)) ax = fig.add_subplot(2, 2, 1, projection='3d') ax.plot(true_positions[:, 0], true_positions[:, 1], true_positions[:, 2], '-b') ax.plot(estimated_positions[:, 0], estimated_positions[:, 1], estimated_positions[:, 2], '-r') #ax.plot(true_landmarks[:,0], true_landmarks[:,1], true_landmarks[:,2], '.k', alpha=.2) ax = fig.add_subplot(2, 2, 2) ax.plot(plot_timestamps, estimated_accels, ':', label='estimated', alpha=1) ax.plot(plot_timestamps, true_accels, '-', label='true', alpha=.3) ax.plot(observed_accel_timestamps, observed_accel_readings, 'x', label='observed') ax.legend() ax.set_xlim(-.1, 1.5) ax = fig.add_subplot(2, 2, 3) ax.plot(true_pfeatures[1, :, 0], true_pfeatures[1, :, 1], 'x', label='true', alpha=.8) ax.plot(estimated_pfeatures[1, :, 0], estimated_pfeatures[1, :, 1], 'o', label='estimated', alpha=.4) ax = fig.add_subplot(2, 2, 4) ax.plot(true_pfeatures[-1, :, 0], true_pfeatures[-1, :, 1], '.', label='true', alpha=.8) ax.plot(observed_pfeatures[-1, :, 0], observed_pfeatures[-1, :, 1], 'x', label='observed', alpha=.8) ax.plot(estimated_pfeatures[-1, :, 0], estimated_pfeatures[-1, :, 1], 'o', label='estimated', alpha=.4) plt.show()
def calibrated(z, k): return utils.normalized(np.linalg.solve(k, utils.unpr(z)))
def run_position_estimation(): # # Construct ground truth # num_frames = 5 num_landmarks = 150 num_imu_readings = 80 bezier_degree = 4 use_epipolar_constraints = False print 'Num landmarks:', num_landmarks print 'Num frames:', num_frames print 'Num IMU readings:', num_imu_readings print 'Bezier curve degree:', bezier_degree # Both splines should start at 0,0,0 true_frame_timestamps = np.linspace(0, .9, num_frames) true_accel_timestamps = np.linspace(0, 1, num_imu_readings) true_rot_controls = np.random.randn(bezier_degree-1, 3) true_pos_controls = np.random.randn(bezier_degree-1, 3) true_landmarks = np.random.randn(num_landmarks, 3) * 5 true_landmarks[:, 2] += 20 true_frame_orientations = np.array([cayley(zero_offset_bezier(true_rot_controls, t)) for t in true_frame_timestamps]) true_frame_positions = np.array([zero_offset_bezier(true_pos_controls, t) for t in true_frame_timestamps]) true_gravity_magnitude = 9.8 true_gravity = normalized(np.random.rand(3)) * true_gravity_magnitude true_accel_bias = np.random.randn(3) print 'True gravity:', true_gravity true_imu_orientations = np.array([cayley(zero_offset_bezier(true_rot_controls, t)) for t in true_accel_timestamps]) true_accel_readings = np.array([predict_accel(true_pos_controls, true_rot_controls, true_accel_bias, true_gravity, t) for t in true_accel_timestamps]) true_features = np.array([[predict_feature(true_pos_controls, true_rot_controls, x, t) for x in true_landmarks] for t in true_frame_timestamps]) true_depths = np.array([[predict_depth(true_pos_controls, true_rot_controls, x, t) for x in true_landmarks] for t in true_frame_timestamps]) # # Add sensor noise # accel_timestamp_noise = 0 accel_reading_noise = 1e-3 accel_orientation_noise = 1e-3 frame_timestamp_noise = 0 frame_orientation_noise = 1e-3 feature_noise = 5e-3 observed_accel_timestamps = add_white_noise(true_accel_timestamps, accel_timestamp_noise) observed_accel_readings = add_white_noise(true_accel_readings, accel_reading_noise) observed_accel_orientations = add_orientation_noise(true_imu_orientations, accel_orientation_noise) observed_frame_timestamps = add_white_noise(true_frame_timestamps, frame_timestamp_noise) observed_frame_orientations = add_orientation_noise(true_frame_orientations, frame_orientation_noise) observed_features = add_white_noise(true_features, feature_noise) # # Plot # #plt.clf() #plot_tracks(true_features, 'g-', alpha=.2, limit=1) #plot_tracks(true_features, 'go', alpha=.6, limit=1) #plot_tracks(observed_features, 'r-', alpha=.2, limit=1) #plot_tracks(observed_features, 'rx', alpha=.6, limit=1) #plt.show() # # Solve # if use_epipolar_constraints: estimated_pos_controls, estimated_accel_bias, estimated_gravity = estimate_position( bezier_degree, observed_accel_timestamps, observed_accel_orientations, observed_accel_readings, observed_frame_timestamps, observed_frame_orientations, observed_features, vision_weight=1.) else: estimated_pos_controls, estimated_accel_bias, estimated_gravity, estimated_landmarks, estimated_depths = \ estimate_structure_and_motion( bezier_degree, observed_accel_timestamps, observed_accel_orientations, observed_accel_readings, observed_frame_timestamps, observed_frame_orientations, observed_features, vision_weight=1.) r, j = structure_and_motion_system( bezier_degree, observed_accel_timestamps, observed_accel_orientations, observed_accel_readings, observed_frame_timestamps, observed_frame_orientations, observed_features, vision_weight=1.) t0 = observed_frame_timestamps[0] r0 = observed_frame_orientations[0] z0 = observed_features[0, 0] p0 = zero_offset_bezier(estimated_pos_controls, t0) pp0 = zero_offset_bezier(true_pos_controls, t0) x0 = estimated_landmarks[0] xx0 = true_landmarks[0] k0 = estimated_depths[0, 0] kk0 = np.linalg.norm(np.dot(r0, xx0 - pp0)) print 'residual:' print reprojection_residual(estimated_pos_controls, x0, k0, t0, z0, r0) print reprojection_residual(true_pos_controls, xx0, kk0, t0, z0, r0) print np.dot(r0, x0 - p0) - k0 * z0 print np.dot(r0, xx0 - pp0) - kk0 * z0 #true_structure = np.hstack((true_landmarks, true_depths[:, None])) #true_params = np.hstack((true_pos_controls.flatten(), true_accel_bias, true_gravity, true_structure.flatten())) #jtj = np.dot(j.T, j) #jtr = np.dot(j.T, r) #print jtj.shape, true_params.shape, jtr.shape #print np.dot(jtj, true_params) - jtr #return estimated_positions = np.array([zero_offset_bezier(estimated_pos_controls, t) for t in true_frame_timestamps]) estimated_accel_readings = np.array([predict_accel(estimated_pos_controls, true_rot_controls, estimated_accel_bias, estimated_gravity, t) for t in true_accel_timestamps]) estimated_pfeatures = np.array([[pr(predict_feature(estimated_pos_controls, true_rot_controls, x, t)) for x in true_landmarks] for t in true_frame_timestamps]) true_pfeatures = pr(true_features) observed_pfeatures = pr(observed_features) # # Report # print 'Accel bias error:', np.linalg.norm(estimated_accel_bias - true_accel_bias) print ' True accel bias:', true_accel_bias print ' Estimated accel bias:', estimated_accel_bias print 'Gravity error:', np.linalg.norm(estimated_gravity - true_gravity) print ' True gravity:', true_gravity print ' Estimated gravity:', estimated_gravity print ' Estimated gravity magnitude:', np.linalg.norm(estimated_gravity) for i in range(num_frames): print 'Frame %d position error: %f' % (i, np.linalg.norm(estimated_positions[i] - true_frame_positions[i])) fig = plt.figure(1, figsize=(14, 10)) ax = fig.add_subplot(2, 2, 1, projection='3d') ts = np.linspace(0, 1, 100) true_ps = np.array([zero_offset_bezier(true_pos_controls, t) for t in ts]) estimated_ps = np.array([zero_offset_bezier(estimated_pos_controls, t) for t in ts]) ax.plot(true_ps[:, 0], true_ps[:, 1], true_ps[:, 2], '-b') ax.plot(estimated_ps[:, 0], estimated_ps[:, 1], estimated_ps[:, 2], '-r') #ax.plot(true_landmarks[:,0], true_landmarks[:,1], true_landmarks[:,2], '.k') ax = fig.add_subplot(2, 2, 2) ax.plot(true_accel_timestamps, true_accel_readings, '-', label='true') ax.plot(observed_accel_timestamps, observed_accel_readings, 'x', label='observed') ax.plot(true_accel_timestamps, estimated_accel_readings, ':', label='estimated') ax.legend() ax.set_xlim(-.1, 1.5) ax = fig.add_subplot(2, 2, 3) ax.plot(true_pfeatures[1, :, 0], true_pfeatures[1, :, 1], 'x', label='true', alpha=.8) ax.plot(estimated_pfeatures[1, :, 0], estimated_pfeatures[1, :, 1], 'o', label='estimated', alpha=.4) ax = fig.add_subplot(2, 2, 4) ax.plot(true_pfeatures[-1, :, 0], true_pfeatures[-1, :, 1], '.', label='true', alpha=.8) ax.plot(observed_pfeatures[-1, :, 0], observed_pfeatures[-1, :, 1], 'x', label='observed', alpha=.8) ax.plot(estimated_pfeatures[-1, :, 0], estimated_pfeatures[-1, :, 1], 'o', label='estimated', alpha=.4) plt.show()
def predict_feature(pos_controls, orient_controls, landmark, t): r = cayley(zero_offset_bezier(orient_controls, t)) p = zero_offset_bezier(pos_controls, t) return normalized(np.dot(r, landmark - p))
def evaluate1(steps=10, maze_name='mazes/rect_6x10_obstacles.map', file_obj=None, VERBOSE=2): """Test to evaluate the localization performance in one maze""" hit_rate_filter, manhattan_dist_filter, euclidean_dist_filter = [], [], [] hit_rate_smooth, manhattan_dist_smooth, euclidean_dist_smooth = [], [], [] hit_rate_viterbi, manhattan_dist_viterbi, euclidean_dist_viterbi = [], [], [] robot = init_maze(maze_name) fp0 = robot.maze.get_free_positions() for ipos in fp: if VERBOSE >= 1: print('------------------------------------') if VERBOSE >= 1: print(np.round(100 * (fp.index(ipos) + 1) / len(fp), 0), '%\tof inital positions') robot.position = ipos if ROBOT_MODEL == 'NESW' else (ipos[0], ipos[1], rnd.randint(0, 3)) states, obs = robot.simulate(n_steps=steps) initial_belief = normalized({pos: 1 for pos in robot.get_states()}) # FILTERING if VERBOSE >= 2: print('Running filtering...') beliefs = forward(initial_belief, obs, copy.deepcopy(robot)) most_beliefs = [] for state, belief in zip(states, beliefs): most_belief_state = sorted(belief.items(), key=lambda x: x[1], reverse=True)[0][0] most_beliefs.append(most_belief_state) if VERBOSE >= 4: print('Real state:', state, '| Best belief:', most_belief_state) hit_rate_filter.append(get_hit_rate(states, most_beliefs)) manhattan_dist_filter.append(get_manhattan_dist(states, most_beliefs)) euclidean_dist_filter.append(get_euclidean_dist(states, most_beliefs)) if VERBOSE >= 3: print('hit rate =', hit_rate_filter, '\nmanhattan dist =', manhattan_dist_filter, '\neuclidean dist =', euclidean_dist_filter) # SMOOTHING if VERBOSE >= 2: print('Running smoothing...') beliefs = forwardbackward(initial_belief, obs, copy.deepcopy(robot)) most_beliefs = [] for state, belief in zip(states, beliefs): most_belief_state = sorted(belief.items(), key=lambda x: x[1], reverse=True)[0][0] most_beliefs.append(most_belief_state) if VERBOSE >= 4: print('Real state:', state, '| Best belief:', most_belief_state) hit_rate_smooth.append(get_hit_rate(states, most_beliefs)) manhattan_dist_smooth.append(get_manhattan_dist(states, most_beliefs)) euclidean_dist_smooth.append(get_euclidean_dist(states, most_beliefs)) if VERBOSE >= 3: print('hit rate =', hit_rate_smooth, '\nmanhattan dist =', manhattan_dist_smooth, '\neuclidean dist =', euclidean_dist_smooth) # VITERBI if VERBOSE >= 2: print('Running Viterbi...') ml_states, max_msgs = viterbi(initial_belief, obs, copy.deepcopy(robot)) if VERBOSE >= 4: for real, est in zip(states, ml_states): print('Real pos:', real, '| ML Estimate:', est) hit_rate_viterbi.append(get_hit_rate(states, ml_states)) manhattan_dist_viterbi.append(get_manhattan_dist(states, ml_states)) euclidean_dist_viterbi.append(get_euclidean_dist(states, ml_states)) if VERBOSE >= 3: print('hit rate =', hit_rate_viterbi, '\nmanhattan dist =', manhattan_dist_viterbi, '\neuclidean dist =', euclidean_dist_viterbi) if VERBOSE >= 2: print('------------------------------------') if VERBOSE >= 2: print('------------------------------------') hit_rate_filter = np.median(sorted(hit_rate_filter)) manhattan_dist_filter = np.median(sorted(manhattan_dist_filter)) euclidean_dist_filter = np.median(sorted(euclidean_dist_filter)) hit_rate_smooth = np.median(sorted(hit_rate_smooth)) manhattan_dist_smooth = np.median(sorted(manhattan_dist_smooth)) euclidean_dist_smooth = np.median(sorted(euclidean_dist_smooth)) hit_rate_viterbi = np.median(sorted(hit_rate_viterbi)) manhattan_dist_viterbi = np.median(sorted(manhattan_dist_viterbi)) euclidean_dist_viterbi = np.median(sorted(euclidean_dist_viterbi)) if VERBOSE >= 2: print('hit rate =', hit_rate_filter, '\nmanhattan dist =', manhattan_dist_filter, '\neuclidean dist =', euclidean_dist_filter) print('hit rate =', hit_rate_smooth, '\nmanhattan dist =', manhattan_dist_smooth, '\neuclidean dist =', euclidean_dist_smooth) print('hit rate =', hit_rate_viterbi, '\nmanhattan dist =', manhattan_dist_viterbi, '\neuclidean dist =', euclidean_dist_viterbi) if file_obj: file_obj.write( str(hit_rate_filter) + '\t' + str(manhattan_dist_filter) + '\t' + str(euclidean_dist_filter) + '\n') file_obj.write( str(hit_rate_smooth) + '\t' + str(manhattan_dist_smooth) + '\t' + str(euclidean_dist_smooth) + '\n') file_obj.write( str(hit_rate_viterbi) + '\t' + str(manhattan_dist_viterbi) + '\t' + str(euclidean_dist_viterbi) + '\n')
def run_simulation(): np.random.seed(1) # # Construct ground truth # num_frames = 5 num_landmarks = 50 num_imu_readings = 80 bezier_degree = 4 out = 'out/position_only_bezier3' print 'Num landmarks:', num_landmarks print 'Num frames:', num_frames print 'Num IMU readings:', num_imu_readings print 'Bezier curve degree:', bezier_degree if not os.path.isdir(out): os.mkdir(out) # Both splines should start at 0,0,0 frame_times = np.linspace(0, .9, num_frames) imu_times = np.linspace(0, 1, num_imu_readings) true_rot_controls = np.random.randn(bezier_degree-1, 3) true_pos_controls = np.random.randn(bezier_degree-1, 3) true_landmarks = np.random.randn(num_landmarks, 3) true_frame_cayleys = np.array([bezier.zero_offset_bezier(true_rot_controls, t) for t in frame_times]) true_frame_orientations = np.array(map(cayley, true_frame_cayleys)) true_frame_positions = np.array([bezier.zero_offset_bezier(true_pos_controls, t) for t in frame_times]) true_imu_cayleys = np.array([bezier.zero_offset_bezier(true_rot_controls, t) for t in imu_times]) true_imu_orientations = np.array(map(cayley, true_imu_cayleys)) true_gravity_magnitude = 9.8 true_gravity = normalized(np.random.rand(3)) * true_gravity_magnitude true_accel_bias = np.random.randn(3) true_global_accels = np.array([bezier.zero_offset_bezier_second_deriv(true_pos_controls, t) for t in imu_times]) true_accels = np.array([np.dot(R, a + true_gravity) + true_accel_bias for R, a in zip(true_imu_orientations, true_global_accels)]) true_features = np.array([[normalized(np.dot(R, x-p)) for x in true_landmarks] for R, p in zip(true_frame_orientations, true_frame_positions)]) print np.min(true_features.reshape((-1, 3)), axis=0) print np.max(true_features.reshape((-1, 3)), axis=0) # # Add sensor noise # accel_noise = 0#0.001 feature_noise = 0#0.01 orientation_noise = 0.01 observed_frame_orientations = true_frame_orientations.copy() observed_imu_orientations = true_imu_orientations.copy() observed_features = true_features.copy() observed_accels = true_accels.copy() if orientation_noise > 0: for i, R in enumerate(observed_frame_orientations): R_noise = SO3.exp(np.random.randn(3)*orientation_noise) observed_frame_orientations[i] = np.dot(R_noise, R) for i, R in enumerate(observed_imu_orientations): R_noise = SO3.exp(np.random.randn(3)*orientation_noise) observed_imu_orientations[i] = np.dot(R_noise, R) if accel_noise > 0: observed_accels += np.random.randn(*observed_accels.shape) * accel_noise if feature_noise > 0: observed_features += np.random.rand(*observed_features.shape) * feature_noise # # Construct symbolic versions of the above # position_offs = 0 accel_bias_offset = position_offs + (bezier_degree-1)*3 gravity_offset = accel_bias_offset + 3 num_vars = gravity_offset + 3 sym_vars = [Polynomial.coordinate(i, num_vars, Fraction) for i in range(num_vars)] sym_pos_controls = np.reshape(sym_vars[position_offs:position_offs+(bezier_degree-1)*3], (bezier_degree-1, 3)) sym_accel_bias = np.asarray(sym_vars[accel_bias_offset:accel_bias_offset+3]) sym_gravity = np.asarray(sym_vars[gravity_offset:gravity_offset+3]) true_vars = np.hstack((true_pos_controls.flatten(), true_accel_bias, true_gravity)) assert len(true_vars) == len(sym_vars) # # Compute residuals # epipolar_residuals = evaluate_epipolar_residuals(sym_pos_controls, frame_times, observed_frame_orientations, observed_features) accel_residuals = evaluate_accel_residuals(sym_pos_controls, sym_accel_bias, sym_gravity, imu_times, observed_accels, observed_imu_orientations) residuals = np.hstack((accel_residuals, epipolar_residuals)) print '\nNum vars:', num_vars print 'Num residuals:', len(residuals) print '\nResiduals:', len(residuals) cost = Polynomial(num_vars) for r in residuals: cost += r*r print ' %f (degree=%d, length=%d)' % (r(*true_vars), r.total_degree, len(r)) print '\nCost:' print ' Num terms: %d' % len(cost) print ' Degree: %d' % cost.total_degree # Solve A, b, k = quadratic_form(cost) estimated_vars = np.squeeze(np.linalg.solve(A*2, -b)) estimated_pos_controls = np.reshape(estimated_vars[position_offs:position_offs+(bezier_degree-1)*3], (bezier_degree-1, 3)) estimated_positions = np.array([bezier.zero_offset_bezier(estimated_pos_controls, t) for t in frame_times]) estimated_accel_bias = np.asarray(estimated_vars[accel_bias_offset:accel_bias_offset+3]) estimated_gravity = np.asarray(estimated_vars[gravity_offset:gravity_offset+3]) re_estimated_gravity = normalized(estimated_gravity) * true_gravity_magnitude print '\nEstimated:' print estimated_vars print '\nGround truth:' print true_vars print '\nTotal Error:', np.linalg.norm(estimated_vars - true_vars) print 'Accel bias error:', np.linalg.norm(estimated_accel_bias - true_accel_bias) print 'Gravity error:', np.linalg.norm(estimated_gravity - true_gravity) print ' True gravity:', true_gravity print ' Estimated gravity:', estimated_gravity print ' Estimated gravity magnitude:', np.linalg.norm(estimated_gravity) print ' Re-normalized gravity error: ', np.linalg.norm(re_estimated_gravity - true_gravity) for i in range(num_frames): print 'Frame %d error: %f' % (i, np.linalg.norm(estimated_positions[i] - true_frame_positions[i])) fig = plt.figure(figsize=(14,6)) ax = fig.add_subplot(1, 2, 1, projection='3d') ts = np.linspace(0, 1, 100) true_ps = np.array([bezier.zero_offset_bezier(true_pos_controls, t) for t in ts]) estimated_ps = np.array([bezier.zero_offset_bezier(estimated_pos_controls, t) for t in ts]) ax.plot(true_ps[:, 0], true_ps[:, 1], true_ps[:, 2], '-b') ax.plot(estimated_ps[:, 0], estimated_ps[:, 1], estimated_ps[:, 2], '-r') plt.show()
def run_sfm(): # Construct symbolic problem num_landmarks = 4 num_frames = 2 print 'Num observations: ', num_landmarks * num_frames * 2 print 'Num vars: ', num_frames*6 + num_landmarks*3 + num_frames*num_landmarks true_landmarks = np.random.randn(num_landmarks, 3) true_positions = np.random.rand(num_frames, 3) true_cayleys = np.random.rand(num_frames, 3) true_qs = map(cayley_mat, true_cayleys) true_betas = map(cayley_denom, true_cayleys) true_rotations = [(q/b) for (q,b) in zip(true_qs, true_betas)] true_uprojections = [[np.dot(R, x-p) for x in true_landmarks] for R,p in zip(true_rotations, true_positions)] true_projections = [[normalized(zu) for zu in row] for row in true_uprojections] true_alphas = [[np.linalg.norm(zu) for zu in row] for row in true_uprojections] true_vars = np.hstack((true_cayleys.flatten(), true_positions.flatten(), true_landmarks.flatten(), np.asarray(true_alphas).flatten())) #true_projection_mat = np.reshape(true_projections, (num_frames, num_landmarks, 2)) for i in range(num_frames): p = true_positions[i] q = true_qs[i] beta = true_betas[i] for j in range(num_landmarks): x = true_landmarks[j] z = true_projections[i][j] alpha = true_alphas[i][j] print alpha * beta * z - np.dot(q, x-p) # construct symbolic versions of the above s_offs = 0 p_offs = s_offs + num_frames*3 x_offs = p_offs + num_frames*3 a_offs = x_offs + num_landmarks*3 num_vars = a_offs + num_landmarks*num_frames sym_vars = [Polynomial.coordinate(i, num_vars, Fraction) for i in range(num_vars)] sym_cayleys = np.reshape(sym_vars[s_offs:s_offs+num_frames*3], (num_frames, 3)) sym_positions = np.reshape(sym_vars[p_offs:p_offs+num_frames*3], (num_frames, 3)) sym_landmarks = np.reshape(sym_vars[x_offs:x_offs+num_landmarks*3], (num_landmarks, 3)) sym_alphas = np.reshape(sym_vars[a_offs:], (num_frames, num_landmarks)) residuals = [] for i in range(num_frames): sym_p = sym_positions[i] sym_s = sym_cayleys[i] for j in range(num_landmarks): sym_x = sym_landmarks[j] sym_a = sym_alphas[i,j] true_z = true_projections[i][j] residual = np.dot(cayley_mat(sym_s), sym_x-sym_p) - sym_a * cayley_denom(sym_s) * true_z residuals.extend(residual) print 'Residuals:' cost = Polynomial(num_vars) for residual in residuals: cost += np.dot(residual, residual) print ' ',residual(*true_vars) #ri.num_vars, len(true_vars) print '\nGradients:' gradient = [cost.partial_derivative(i) for i in range(num_vars)] for gi in gradient: print gi(*true_vars) j = np.array([[r.partial_derivative(i)(*true_vars) for i in range(num_vars)] for r in residuals]) print '\nJacobian singular values:' print j.shape u, s, v = np.linalg.svd(j) print s print '\nHessian eigenvalues:' h = np.dot(j.T, j) print h.shape print np.linalg.eigvals(h)
def run_simulation_nonsymbolic(): np.random.seed(1) # # Construct ground truth # num_frames = 5 num_landmarks = 50 num_imu_readings = 80 bezier_degree = 4 out = 'out/position_only_bezier3' print 'Num landmarks:', num_landmarks print 'Num frames:', num_frames print 'Num IMU readings:', num_imu_readings print 'Bezier curve degree:', bezier_degree if not os.path.isdir(out): os.mkdir(out) # Both splines should start at 0,0,0 frame_times = np.linspace(0, .9, num_frames) accel_timestamps = np.linspace(0, 1, num_imu_readings) true_rot_controls = np.random.randn(bezier_degree-1, 3) true_pos_controls = np.random.randn(bezier_degree-1, 3) true_landmarks = np.random.randn(num_landmarks, 3) true_frame_cayleys = np.array([bezier.zero_offset_bezier(true_rot_controls, t) for t in frame_times]) true_frame_orientations = np.array(map(cayley, true_frame_cayleys)) true_frame_positions = np.array([bezier.zero_offset_bezier(true_pos_controls, t) for t in frame_times]) true_imu_cayleys = np.array([bezier.zero_offset_bezier(true_rot_controls, t) for t in accel_timestamps]) true_imu_orientations = np.array(map(cayley, true_imu_cayleys)) true_gravity_magnitude = 9.8 true_gravity = normalized(np.random.rand(3)) * true_gravity_magnitude true_accel_bias = np.random.randn(3) true_global_accels = np.array([bezier.zero_offset_bezier_second_deriv(true_pos_controls, t) for t in accel_timestamps]) true_accels = np.array([np.dot(R, a + true_gravity) + true_accel_bias for R, a in zip(true_imu_orientations, true_global_accels)]) true_features = np.array([[normalized(np.dot(R, x-p)) for x in true_landmarks] for R, p in zip(true_frame_orientations, true_frame_positions)]) print np.min(true_features.reshape((-1, 3)), axis=0) print np.max(true_features.reshape((-1, 3)), axis=0) # # Add sensor noise # accel_noise = 0#0.001 feature_noise = 0#0.01 orientation_noise = 0.01 observed_frame_orientations = true_frame_orientations.copy() observed_imu_orientations = true_imu_orientations.copy() observed_features = true_features.copy() observed_accels = true_accels.copy() if orientation_noise > 0: for i, R in enumerate(observed_frame_orientations): R_noise = SO3.exp(np.random.randn(3)*orientation_noise) observed_frame_orientations[i] = np.dot(R_noise, R) for i, R in enumerate(observed_imu_orientations): R_noise = SO3.exp(np.random.randn(3)*orientation_noise) observed_imu_orientations[i] = np.dot(R_noise, R) if accel_noise > 0: observed_accels += np.random.randn(*observed_accels.shape) * accel_noise if feature_noise > 0: observed_features += np.random.rand(*observed_features.shape) * feature_noise position_offs = 0 accel_bias_offset = position_offs + (bezier_degree-1)*3 gravity_offset = accel_bias_offset + 3 true_vars = np.hstack((true_pos_controls.flatten(), true_accel_bias, true_gravity)) # # Compute system non-symbolically # accel_r = evaluate_accel_residuals(np.zeros((bezier_degree-1, 3)), np.zeros(3), np.zeros(3), accel_timestamps, observed_accels, observed_imu_orientations) accel_j = evaluate_accel_jacobians(bezier_degree-1, accel_timestamps, observed_imu_orientations) epipolar_r = evaluate_epipolar_residuals(np.zeros((bezier_degree-1, 3)), frame_times, observed_frame_orientations, observed_features) epipolar_j = evaluate_epipolar_jacobians(bezier_degree-1, frame_times, observed_frame_orientations, observed_features) epipolar_j = np.hstack((epipolar_j, np.zeros((epipolar_j.shape[0], 6)))) residual = np.hstack((accel_r, epipolar_r)) jacobian = np.vstack((accel_j, epipolar_j)) # # Solve # JtJ = np.dot(jacobian.T, jacobian) Jtr = np.dot(jacobian.T, residual) estimated_vars = np.squeeze(np.linalg.solve(JtJ, -Jtr)) # # Unpack result and compute error # estimated_pos_controls = np.reshape(estimated_vars[position_offs:position_offs+(bezier_degree-1)*3], (bezier_degree-1, 3)) estimated_positions = np.array([bezier.zero_offset_bezier(estimated_pos_controls, t) for t in frame_times]) estimated_accel_bias = np.asarray(estimated_vars[accel_bias_offset:accel_bias_offset+3]) estimated_gravity = np.asarray(estimated_vars[gravity_offset:gravity_offset+3]) re_estimated_gravity = normalized(estimated_gravity) * true_gravity_magnitude print '\nEstimated:' print estimated_vars print '\nGround truth:' print true_vars print '\nTotal Error:', np.linalg.norm(estimated_vars - true_vars) print 'Accel bias error:', np.linalg.norm(estimated_accel_bias - true_accel_bias) print 'Gravity error:', np.linalg.norm(estimated_gravity - true_gravity) print ' True gravity:', true_gravity print ' Estimated gravity:', estimated_gravity print ' Estimated gravity magnitude:', np.linalg.norm(estimated_gravity) print ' Re-normalized gravity error: ', np.linalg.norm(re_estimated_gravity - true_gravity) for i in range(num_frames): print 'Frame %d error: %f' % (i, np.linalg.norm(estimated_positions[i] - true_frame_positions[i])) fig = plt.figure(figsize=(14,6)) ax = fig.add_subplot(1, 2, 1, projection='3d') ts = np.linspace(0, 1, 100) true_ps = np.array([bezier.zero_offset_bezier(true_pos_controls, t) for t in ts]) estimated_ps = np.array([bezier.zero_offset_bezier(estimated_pos_controls, t) for t in ts]) ax.plot(true_ps[:, 0], true_ps[:, 1], true_ps[:, 2], '-b') ax.plot(estimated_ps[:, 0], estimated_ps[:, 1], estimated_ps[:, 2], '-r') plt.show()
def run_position_only_spline_epipolar(): # # Construct ground truth # num_landmarks = 50 num_frames = 4 num_imu_readings = 80 bezier_degree = 4 out = 'out/position_only_bezier3' print 'Num landmarks:', num_landmarks print 'Num frames:', num_frames print 'Num IMU readings:', num_imu_readings print 'Bezier curve degree:', bezier_degree if not os.path.isdir(out): os.mkdir(out) # Both splines should start at 0,0,0 frame_times = np.linspace(0, .9, num_frames) imu_times = np.linspace(0, 1, num_imu_readings) true_rot_controls = np.random.rand(bezier_degree-1, 3) true_pos_controls = np.random.rand(bezier_degree-1, 3) true_landmarks = np.random.randn(num_landmarks, 3) true_positions = np.array([zero_offset_bezier(true_pos_controls, t) for t in frame_times]) true_cayleys = np.array([zero_offset_bezier(true_rot_controls, t) for t in frame_times]) true_rotations = map(cayley, true_cayleys) true_imu_cayleys = np.array([zero_offset_bezier(true_rot_controls, t) for t in imu_times]) true_imu_rotations = map(cayley, true_imu_cayleys) true_gravity = normalized(np.random.rand(3)) * 9.8 true_accel_bias = np.random.rand(3) true_global_accels = np.array([zero_offset_bezier_second_deriv(true_pos_controls, t) for t in imu_times]) true_accels = [np.dot(R, a + true_gravity) + true_accel_bias for R, a in zip(true_imu_rotations, true_global_accels)] true_uprojections = [[np.dot(R, x-p) for x in true_landmarks] for R, p in zip(true_rotations, true_positions)] true_projections = [[normalized(zu) for zu in row] for row in true_uprojections] # # Construct symbolic versions of the above # position_offs = 0 accel_bias_offset = position_offs + (bezier_degree-1)*3 gravity_offset = accel_bias_offset + 3 num_vars = gravity_offset + 3 sym_vars = [Polynomial.coordinate(i, num_vars, Fraction) for i in range(num_vars)] sym_pos_controls = np.reshape(sym_vars[position_offs:position_offs+(bezier_degree-1)*3], (bezier_degree-1, 3)) sym_accel_bias = np.asarray(sym_vars[accel_bias_offset:accel_bias_offset+3]) sym_gravity = np.asarray(sym_vars[gravity_offset:gravity_offset+3]) true_vars = np.hstack((true_pos_controls.flatten(), true_accel_bias, true_gravity)) assert len(true_vars) == len(sym_vars) residuals = [] # # Accel residuals # print '\nAccel residuals:' for i in range(num_imu_readings): true_R = true_imu_rotations[i] sym_global_accel = zero_offset_bezier_second_deriv(sym_pos_controls, imu_times[i]) sym_accel = np.dot(true_R, sym_global_accel + sym_gravity) + sym_accel_bias residual = sym_accel - true_accels[i] for i in range(3): print ' Degree of global accel = %d, local accel = %d, residual = %d' % \ (sym_global_accel[i].total_degree, sym_accel[i].total_degree, residual[i].total_degree) residuals.extend(residual) # # Epipolar residuals # p0 = np.zeros(3) R0 = np.eye(3) for i in range(1, num_frames): true_s = true_cayleys[i] true_R = cayley_mat(true_s) sym_p = zero_offset_bezier(sym_pos_controls, frame_times[i]) sym_E = essential_matrix(R0, p0, true_R, sym_p) for j in range(num_landmarks): z = true_projections[i][j] z0 = true_projections[0][j] residual = np.dot(z, np.dot(sym_E, z0)) residuals.append(residual) print '\nNum vars:', num_vars print 'Num residuals:', len(residuals) print '\nResiduals:', len(residuals) cost = Polynomial(num_vars) for r in residuals: cost += r*r print ' %f (degree=%d, length=%d)' % (r(*true_vars), r.total_degree, len(r)) print '\nCost:' print ' Num terms: %d' % len(cost) print ' Degree: %d' % cost.total_degree for term in cost: print ' ',term print '\nGradients:' gradients = cost.partial_derivatives() for gradient in gradients: print ' %d (degree=%d, length=%d)' % (gradient(*true_vars), gradient.total_degree, len(gradient)) jacobians = np.array([r.partial_derivatives() for r in residuals]) J = evaluate_array(jacobians, *true_vars) U, S, V = np.linalg.svd(J) print '\nJacobian singular values:' print J.shape print S print '\nHessian eigenvalues:' H = np.dot(J.T, J) print H.shape print np.linalg.eigvals(H) null_space_dims = sum(np.abs(S) < 1e-5) print '\nNull space dimensions:', null_space_dims if null_space_dims > 0: for i in range(null_space_dims): print ' ',V[-i] null_monomial = (0,) * num_vars coordinate_monomials = [list(var.monomials)[0] for var in sym_vars] A, _ = matrix_form(gradients, coordinate_monomials) b, _ = matrix_form(gradients, [null_monomial]) b = np.squeeze(b) AA, bb, kk = quadratic_form(cost) estimated_vars = np.squeeze(numpy.linalg.solve(AA*2, -b)) print '\nEstimated:' print estimated_vars print '\nGround truth:' print true_vars print '\nError:' print np.linalg.norm(estimated_vars - true_vars) # Output to file write_polynomials(cost, out+'/cost.txt') write_polynomials(residuals, out+'/residuals.txt') write_polynomials(gradients, out+'/gradients.txt') write_polynomials(jacobians.flat, out+'/jacobians.txt') write_solution(true_vars, out+'/solution.txt')
gap_data[0, idx, jdx] = eigvalues[1] - eigvalues[0] gap_data[1, idx, jdx] = eigvalues[2] - eigvalues[0] gap_data[2, idx, jdx] = eigvalues[3] - eigvalues[0] _gen_eigenvalue_gap_plot(sigma_vec, gap_data, '../plots/eigenvalue_gap_vs_noise.pdf', 'std. deviation $\sigma$ (m)', 'eigenvalue gap') outlier_rate_vec = np.linspace(0.0, 0.7, 8) gap_data_outlier = np.zeros((3, len(sigma_vec), N_runs)) for idx in range(len(outlier_rate_vec)): sigma = 0.01*np.ones(N) n_outliers = int(N*outlier_rate_vec[idx]) for jdx in range(N_runs): _, x_1, x_2 = gen_sim_data(N, sigma) perm = np.random.permutation(N) outlier_inds = perm[0:n_outliers] x_2[outlier_inds, :] = np.random.rand(n_outliers, 3) x_2[outlier_inds, :] = normalized(x_2[outlier_inds, :], axis=1) A = build_A(x_1, x_2, sigma**2) # A = A/np.linalg.norm(A, ord='fro') eigvalues = np.linalg.eigvalsh(A) gap_data_outlier[0, idx, jdx] = eigvalues[1] - eigvalues[0] gap_data_outlier[1, idx, jdx] = eigvalues[2] - eigvalues[0] gap_data_outlier[2, idx, jdx] = eigvalues[3] - eigvalues[0] _gen_eigenvalue_gap_plot(outlier_rate_vec*100, gap_data_outlier, '../plots/eigenvalue_gap_vs_outlier_rate.pdf', 'outlier rate (\%)', 'eigenvalue gap')
def load_data(dataset, data_dir, batch_size, nc, target_hw=(256, 256), data_type='train2014', shuffle=False, sample_size=None): img_txt = os.path.join(data_dir, data_type, 'images.txt') lbl_txt = os.path.join(data_dir, data_type, 'labels.txt') with open(img_txt) as imgdata, open(lbl_txt) as lbldata: imgfiles = [line.rstrip('\n') for line in imgdata] lblfiles = [line.rstrip('\n') for line in lbldata] assert len(imgfiles) == len(lblfiles) files = zip(imgfiles, lblfiles) if sample_size: assert isinstance(sample_size, int) np.random.shuffle(files) files = files[:sample_size] remainder = len(files)%batch_size assert remainder >= 0 if remainder > 0: yield len(files) - remainder + batch_size else: yield len(files) rgbs = [] lbls = [] # batch_weights = [] cid_to_id_mapper = dataset.cids_to_ids_map() mp = np.arange(0, max(dataset.ids()) + 1) mp[cid_to_id_mapper.keys()] = cid_to_id_mapper.values() while True: remainder = len(files)%batch_size assert remainder >= 0 if shuffle: np.random.shuffle(files) if remainder > 0: # Procrustes files = files + files[:batch_size-remainder] assert len(files) % batch_size == 0 for imgfile, lblfile in files: img = cv2.imread(os.path.join(data_dir, data_type, imgfile)) img = cv2.resize(img, target_hw[:2], interpolation=cv2.INTER_NEAREST) img = normalized(img) rgbs.append(img) # load label image, keep a single channel (all three should be the same) # TODO: make this more robust/fast/efficient lbl = cv2.imread(os.path.join(data_dir, data_type, lblfile))[:, :, 0] lbl = cv2.resize(lbl, target_hw[:2], interpolation=cv2.INTER_NEAREST) lbl = mp[lbl] assert np.max(lbl) < len(dataset.ids()) # print('lbl shape', lbl.shape) # sample_weights = np.full(lbl.shape, 0.5) # print('sample weights shape', sample_weights.shape) # sample_weights[lbl > 0] = 1.5 # hack just for mscoco # batch_weights.append(sample_weights.ravel()) lbl = to_categorical(lbl, nb_classes=nc) lbls.append(lbl) if len(rgbs) == batch_size: yield np.array(rgbs), np.array(lbls)#, np.array(batch_weights) rgbs = [] lbls = []
def simulate_trajectory(calibration, duration=5., num_frames=12, num_landmarks=50, num_imu_readings=100, degree=3, num_controls=8, accel_timestamp_noise=0., accel_reading_noise=0., accel_orientation_noise=0., frame_timestamp_noise=0., frame_orientation_noise=0., feature_noise=0.): num_knots = num_controls - degree + 1 spline_template = spline.SplineTemplate(np.linspace(0, duration, num_knots), degree, 3) print 'Num landmarks:', num_landmarks print 'Num frames:', num_frames print 'Num IMU readings:', num_imu_readings print 'Spline curve degree:', degree # Both splines should start at 0,0,0 true_frame_timestamps = np.linspace(0, duration, num_frames) true_accel_timestamps = np.linspace(0, duration, num_imu_readings) true_rot_curve = spline_template.build_random(.1) true_pos_curve = spline_template.build_random(first_control=np.zeros(3)) landmark_generator = 'normal' if landmark_generator == 'normal': true_landmarks = np.random.randn(num_landmarks, 3)*10 elif landmark_generator == 'near': true_landmarks = [] for i in range(num_landmarks): p = true_pos_curve.evaluate(true_frame_timestamps[i % len(true_frame_timestamps)]) true_landmarks.append(p + np.random.randn()*.1) elif landmark_generator == 'far': true_landmarks = [] for _ in range(num_landmarks): true_landmarks.append(utils.normalized(np.random.randn(3)) * 100000.) true_landmarks = np.asarray(true_landmarks) true_frame_orientations = np.array(map(cayley.cayley, true_rot_curve.evaluate(true_frame_timestamps))) true_gravity_magnitude = 9.8 true_gravity = utils.normalized(np.random.rand(3)) * true_gravity_magnitude true_accel_bias = np.random.randn(3) * .01 # Sample IMU readings true_imu_orientations = np.array(map(cayley.cayley, true_rot_curve.evaluate(true_accel_timestamps))) true_accel_readings = np.array([ sensor_models.predict_accel(true_pos_curve, true_rot_curve, true_accel_bias, true_gravity, t) for t in true_accel_timestamps]) # Sample features num_behind = 0 true_features = [] for frame_id, t in enumerate(true_frame_timestamps): r = cayley.cayley(true_rot_curve.evaluate(t)) p = true_pos_curve.evaluate(t) a = np.dot(calibration.camera_matrix, np.dot(calibration.imu_to_camera, r)) ys = np.dot(true_landmarks - p, a.T) for track_id, y in enumerate(ys): if y[2] > 0: true_features.append(structures.FeatureObservation(frame_id, track_id, geometry.pr(y))) else: num_behind += 1 if num_behind > 0: print '%d landmarks were behind the camera (and %d were in front)' % (num_behind, len(true_features)) true_features, true_landmarks = utils.renumber_tracks(true_features, true_landmarks, min_track_length=2) true_trajectory = structures.PositionEstimate(true_pos_curve, true_gravity, true_accel_bias, true_landmarks) # # Add sensor noise # observed_accel_timestamps = utils.add_white_noise(true_accel_timestamps, accel_timestamp_noise) observed_accel_readings = utils.add_white_noise(true_accel_readings, accel_reading_noise) observed_accel_orientations = utils.add_orientation_noise(true_imu_orientations, accel_orientation_noise) observed_frame_timestamps = utils.add_white_noise(true_frame_timestamps, frame_timestamp_noise) observed_frame_orientations = utils.add_orientation_noise(true_frame_orientations, frame_orientation_noise) observed_features = [] for f in true_features: observed_features.append(structures.FeatureObservation(f.frame_id, f.track_id, utils.add_white_noise(f.position, feature_noise))) measurements = structures.Measurements(observed_accel_timestamps, observed_accel_orientations, observed_accel_readings, observed_frame_timestamps, observed_frame_orientations, observed_features) return true_trajectory, measurements, spline_template