def testGetCamDir2Plane2(self): planes = [self.plane1, self.plane2] mic_loc = np.array([0, 0, 0]) cam_loc = np.array([1, 1, 1]) space = SearchSpace(mic_loc, cam_loc, planes) direction = np.array([-1, 1, 1]) camdir = space.get_camera_dir(direction) self.assertListEqual(list(camdir), list(np.array([-6, 4, 4]) / (68 ** .5)))
def testGetCameraDir3Plane(self): planes = [self.plane1, self.plane2, self.plane3] mic_loc = np.array([0, 0, 0]) cam_loc = np.array([1, 1, 1]) space = SearchSpace(mic_loc, cam_loc, planes) direction = np.array([-1, 1, 1]) camdir = space.get_camera_dir(direction) self.assertListEqual(list(camdir), [-1, 0, 0])
def testGetCamDir2Plane2(self): planes = [self.plane1, self.plane2] mic_loc = np.array([0, 0, 0]) cam_loc = np.array([1, 1, 1]) space = SearchSpace(mic_loc, cam_loc, planes) direction = np.array([-1, 1, 1]) camdir = space.get_camera_dir(direction) self.assertListEqual(list(camdir), list(np.array([-6, 4, 4]) / (68**.5)))
def localize(): # Setup search space # x vector points to front of class, -z vector points to floor teacher_plane = SourcePlane(TEACHER_NORMAL, TEACHER_OFFSET) student_plane = SourcePlane(STUDENT_NORMAL, STUDENT_OFFSET) space = SearchSpace(MIC_LOC, CAMERA_LOC, [teacher_plane, student_plane]) # Setup camera forward = np.array([1, 0, 0]) above = np.array([0, 0, 1]) camera = SonyCamera(URL, forward, above) # Setup pyaudio instances pa = pyaudio.PyAudio() helper = AudioHelper(pa) localizer = DistributionLocalizer(mic_positions=mic_layout, dft_len=FFT_LENGTH, sample_rate=SAMPLE_RATE, n_theta=N_THETA, n_phi=N_PHI) # Setup STFT object stft = StftManager(dft_length=FFT_LENGTH, window_length=WINDOW_LENGTH, hop_length=HOP_LENGTH, use_window_fcn=True, n_channels=NUM_CHANNELS_IN, dtype=DATA_TYPE) # Setup devices in_device = helper.get_input_device_from_user() if PLAY_AUDIO: out_device = helper.get_output_device_from_user() else: out_device = helper.get_default_output_device_info() # Setup streams in_stream = pa.open(rate=SAMPLE_RATE, channels=NUM_CHANNELS_IN, format=SAMPLE_TYPE, frames_per_buffer=FRAMES_PER_BUF, input=True, input_device_index=int(in_device['index']), stream_callback=read_in_data) out_stream = pa.open(rate=SAMPLE_RATE, channels=NUM_CHANNELS_OUT, format=SAMPLE_TYPE, output=True, frames_per_buffer=FRAMES_PER_BUF, output_device_index=int(out_device['index']), stream_callback=write_out_data) # Start recording/playing back in_stream.start_stream() out_stream.start_stream() # Start thread to check for user quit quit_thread = threading.Thread(target=check_for_quit) quit_thread.start() # Plotting setup if PLOT_CARTES: fig = plt.figure() ax = fig.add_subplot(111, projection='3d') plt.show(block=False) scat = [] if PLOT_SPACE: fig = plt.figure() ax = fig.add_subplot(111, projection='3d') # Setup bounds xlo, xhi = (-5, DISTANCE_TO_TEACHER + 5) ylo, yhi = (-15, 15) zlo, zhi = (-15, 5) # Setup grid nx, ny = (200, 100) x = np.linspace(xlo, xhi, nx) y = np.linspace(ylo, yhi, ny) X, Y = np.meshgrid(x, y) n, m = (STUDENT_NORMAL, STUDENT_OFFSET) TP = (n.dot(m) - n[0] * X - n[1] * Y) / n[2] - 2 # Plot markers for mic m = MIC_LOC ax.plot([MIC_LOC[0]], [MIC_LOC[1]], [MIC_LOC[2]], 'r.', markersize=10.) # Plot marker for camera c = CAMERA_LOC ax.plot([CAMERA_LOC[0]], [CAMERA_LOC[1]], [CAMERA_LOC[2]], 'b.', markersize=10.) # Draw lines from camera and mic to source source_loc = np.array([10, 0, 0]) source_point, = ax.plot([source_loc[0]], [source_loc[1]], [source_loc[2]], 'black', marker='.', markersize=10.) s = source_loc camera_dir, = ax.plot([c[0], m[0]], [c[1], m[1]], [c[2], m[2]], 'blue') mic_dir, = ax.plot([m[0], m[0]], [m[1], m[1]], [m[2], m[2]], 'red') #ax.plot_surface(X, Y, TP) ax.set_xlim(xlo, xhi) ax.set_ylim(ylo, yhi) ax.set_zlim(zlo, zhi) ax.view_init(elev=25, azim=-120) plt.show(block=False) if EXTERNAL_PLOT: fig = plt.figure() ax = fig.add_subplot(111) plt.show(block=False) count = 0 prev_direc = np.array([0, 0, 0]) direcs = localizer.get_directions() try: global done while in_stream.is_active() or out_stream.is_active(): data_available = in_buf.wait_for_read(WINDOW_LENGTH, TIMEOUT) if data_available: # Get data from the circular buffer data = in_buf.read_samples(WINDOW_LENGTH) # Perform an stft stft.performStft(data) # Process dfts from windowed segments of input dfts = stft.getDFTs() d = localizer.get_3d_real_distribution(dfts) ind = np.argmax(d) u = 1.5 * direcs[:, ind] # Direction of arrival if DO_TRACK and count % TRACKING_FREQ == 0: #v = np.array([1, 0, 1]) v = u direc = space.get_camera_dir(v) if not direc.any(): direc = prev_direc else: prev_direc = direc # Send camera new direction camera.face_direction(direc) if PLOT_SPACE: if direc.any(): src = space.get_source_loc(u) source_point.set_xdata([src[0]]) source_point.set_ydata([src[1]]) source_point.set_3d_properties(zs=[src[2]]) cam_src = CAMERA_LOC + 30 * direc mic_src = MIC_LOC + 30 * u # Update camera line camera_dir.set_xdata([CAMERA_LOC[0], cam_src[0]]) camera_dir.set_ydata([CAMERA_LOC[1], cam_src[1]]) camera_dir.set_3d_properties(zs=[CAMERA_LOC[2], cam_src[2]]) # Update mic line mic_dir.set_xdata([MIC_LOC[0], mic_src[0]]) mic_dir.set_ydata([MIC_LOC[1], mic_src[1]]) mic_dir.set_3d_properties(zs=[MIC_LOC[2], mic_src[2]]) plt.draw() # Take care of plotting if count % 1 == 0: if PLOT_CARTES: plt.cla() ax.scatter(direcs[0, :], direcs[1, :], direcs[2, :], s=30, c=d[:]) ax.plot([0, u[0]], [0, u[1]], [0, u[2]], c='blue') ax.set_xlim(-1, 1) ax.set_ylim(-1, 1) ax.set_zlim(0, 1) plt.draw() count += 1 # Get the istft of the processed data if PLAY_AUDIO: new_data = stft.performIStft() new_data = out_buf.reduce_channels(new_data, NUM_CHANNELS_IN, NUM_CHANNELS_OUT) # Write out the new, altered data if out_buf.get_available_write() >= WINDOW_LENGTH: out_buf.write_samples(new_data) #time.sleep(.05) except KeyboardInterrupt: print "Program interrupted" done = True print "Cleaning up" in_stream.stop_stream() in_stream.close() out_stream.stop_stream() out_stream.close() pa.terminate() print "Done"
def localize(): global switch_beamforming global DO_BEAMFORM global done # Setup search space source_plane = OrientedSourcePlane(SOURCE_PLANE_NORMAL, SOURCE_PLANE_UP, SOURCE_PLANE_OFFSET) space = SearchSpace(MIC_LOC, CAMERA_LOC, [source_plane], MIC_FORWARD, MIC_ABOVE) # Setup camera camera = SonyCamera(URL, CAM_FORWARD, CAM_ABOVE) prev_direc = np.array([1., 0., 0.]) if DO_TRACK: camera.face_direction(prev_direc) # Will force login # Setup pyaudio instances pa = pyaudio.PyAudio() helper = AudioHelper(pa) listener = CommandListener() plot_manager = PlotManager('3d_vm_srp_') localizer = VonMisesTrackingLocalizer(mic_positions=mic_layout, search_space=space, n_particles=N_PARTICLES, state_kappa=STATE_KAPPA, #observation_kappa=OBS_KAPPA, observation_kappa=5, outlier_prob=.5, dft_len=FFT_LENGTH, sample_rate=SAMPLE_RATE, n_theta=N_THETA, n_phi=N_PHI) localizer2 = VonMisesTrackingLocalizer(mic_positions=mic_layout, search_space=space, n_particles=N_PARTICLES, state_kappa=STATE_KAPPA, #observation_kappa=OBS_KAPPA, observation_kappa=25, outlier_prob=0, dft_len=FFT_LENGTH, sample_rate=SAMPLE_RATE, n_theta=N_THETA, n_phi=N_PHI) localizer3 = VonMisesTrackingLocalizer(mic_positions=mic_layout, search_space=space, n_particles=N_PARTICLES, state_kappa=STATE_KAPPA, observation_kappa=OBS_KAPPA, outlier_prob=.6, dft_len=FFT_LENGTH, sample_rate=SAMPLE_RATE, n_theta=N_THETA, n_phi=N_PHI) beamformer = BeamFormer(mic_layout, SAMPLE_RATE) # Setup STFT object stft = StftManager(dft_length=FFT_LENGTH, window_length=WINDOW_LENGTH, hop_length=HOP_LENGTH, use_window_fcn=True, n_channels=NUM_CHANNELS_IN, dtype=DATA_TYPE) # Setup devices in_device = helper.get_input_device_from_user() if PLAY_AUDIO: out_device = helper.get_output_device_from_user() else: out_device = helper.get_default_output_device_info() # Setup streams in_stream = pa.open(rate=SAMPLE_RATE, channels=NUM_CHANNELS_IN, format=SAMPLE_TYPE, frames_per_buffer=FRAMES_PER_BUF, input=True, input_device_index=int(in_device['index']), stream_callback=read_in_data) out_stream = pa.open(rate=SAMPLE_RATE, channels=NUM_CHANNELS_OUT, format=SAMPLE_TYPE, output=True, frames_per_buffer=FRAMES_PER_BUF, output_device_index=int(out_device['index']), stream_callback=write_out_data) # Start recording/playing back in_stream.start_stream() out_stream.start_stream() # Start thread to check for user quit listener.start_polling() # Setup directions and alignment matrices direcs = localizer.get_directions() align_mats = localizer.get_pos_align_mat() # Plotting setup if PLOT_PARTICLES: ml_color = 'r' color = 'b'; particle_plot = ParticleHemispherePlot( N_PARTICLES, color, n_estimates=2, n_past_estimates=100, plot_lines=[False, True], elev=60, azim=45, estim_colors=[ml_color, color]) #color = 'b' #particle_plot2 = ParticleHemispherePlot( # N_PARTICLES, color, n_estimates=2, n_past_estimates=100, # plot_lines=[False, True], elev=60, azim=45, estim_colors=[ml_color, color]) #color = 'r' #particle_plot3 = ParticleHemispherePlot( # N_PARTICLES, color, n_estimates=2, n_past_estimates=100, # plot_lines=[False, True], elev=60, azim=45, estim_colors=[ml_color, color]) if PLOT_POLAR: fig = plt.figure() ax = fig.add_subplot(111, projection='polar') ax.set_rlim(0, 1) plt.show(block=False) # Setup space for plotting in new coordinates spher_coords = localizer.get_spher_directions() theta = spher_coords[1, :] pol_plot, = plt.plot(theta, np.ones(theta.shape)) post_plot, = plt. plot(theta, np.ones(theta.shape), 'green') ax.set_ylim(0, 1) if DO_BEAMFORM: pol_beam_plot, = plt.plot(theta, np.ones(theta.shape), 'red') if PLOT_CARTES: fig = plt.figure() ax = fig.add_subplot(111, projection='3d') plt.show(block=False) x = localizer.to_spher_grid(direcs[0, :]) y = localizer.to_spher_grid(direcs[1, :]) z = localizer.to_spher_grid(direcs[2, :]) #scat = ax.scatter(x, y, z, s=100) if EXTERNAL_PLOT: fig = plt.figure() ax = fig.add_subplot(111) plt.show(block=False) count = 0 estimate = np.array([1., 0., 0.]) estimate2 = np.array([1., 0., 0.]) try: while in_stream.is_active() or out_stream.is_active(): done = listener.quit() data_available = in_buf.wait_for_read(WINDOW_LENGTH, TIMEOUT) if data_available: if switch_beamforming: DO_BEAMFORM = not DO_BEAMFORM switch_beamforming = False # Get data from the circular buffer data = in_buf.read_samples(WINDOW_LENGTH) # Perform an stft stft.performStft(data) # Process dfts from windowed segments of input dfts = stft.getDFTs() rffts = mat.to_all_real_matlab_format(dfts) d, energy = localizer.get_distribution_real(rffts[:, :, 0], 'gcc') # Use first hop # Find ml_est ml_est = direcs[:, np.argmax(d)] #print energy #if energy < 1500: # continue post = localizer.get_distribution(rffts[:, :, 0]) # PyBayes EmpPdf post2 = localizer2.get_distribution(rffts[:, :, 0]) post3 = localizer3.get_distribution(rffts[:, :, 0]) # Get estimate from particles w = np.asarray(post.weights) ps = np.asarray(post.particles) w2 = np.asarray(post2.weights) ps2 = np.asarray(post2.particles) w3 = np.asarray(post3.weights) ps3 = np.asarray(post3.particles) #estimate2 = w2.dot(ps2) if DO_TRACK and count % TRACKING_FREQ == 0: #v = np.array([1, 0, 1]) v = estimate direc = space.get_camera_dir(v) if direc is None or not direc.any(): direc = prev_direc else: direc[2] = -.5 prev_direc = direc # Send camera new direction camera.face_direction(direc) # Do beam forming if DO_BEAMFORM: align_mat = align_mats[:, :, ind] filtered = beamformer.filter_real(rffts, align_mat) mat.set_dfts_real(dfts, filtered, n_channels=2) # Take care of plotting if count % 1 == 0: if PLOT_PARTICLES: estimate = w.dot(ps) estimate /= (mat.norm2(estimate) + consts.EPS) particle_plot.update(ps, w, [ml_est, estimate]) #estimate2 = w2.dot(ps2) #estimate2 /= (mat.norm2(estimate2) + consts.EPS) #particle_plot2.update(ps2, w2, [ml_est, estimate2]) #estimate3 = w3.dot(ps3) #estimate3 /= (mat.norm2(estimate3) + consts.EPS) #particle_plot3.update(ps3, w3, [ml_est, estimate3]) if listener.savefig(): plot_manager.savefig(particle_plot.get_figure()) #plot_manager.savefig(particle_plot2.get_figure()) #plot_manager.savefig(particle_plot3.get_figure()) if PLOT_CARTES: ax.cla() ax.grid(False) #d = localizer.to_spher_grid(post / (np.max(post) + consts.EPS)) #d = localizer.to_spher_grid(d / (np.max(d) + consts.EPS)) ax.scatter(x, y, z, c=d, s=40) #ax.plot_surface(x, y, z, rstride=1, cstride=1, facecolor=plt.cm.gist_heat(d)) u = estimate ax.plot([0, u[0]], [0, u[1]], [0, u[2]], c='black', linewidth=3) if DO_BEAMFORM: if np.max(np.abs(response)) > 1: response /= np.max(np.abs(response)) X = response * x Y = response * y Z = response * z ax.plot_surface(X, Y, Z, rstride=1, cstride=1, color='white') ax.set_xlim(-1, 1) ax.set_ylim(-1, 1) ax.set_zlim(0, 1) #ax.view_init(90, -90) fig.canvas.draw() if PLOT_2D: # Get unconditional distribution dist = localizer.to_spher_grid(d) dist -= np.min(dist) dist /= (np.sum(dist) + consts.EPS) sample_mat[:, :-1] = sample_mat[:, 1:] sample_mat[:, -1] = dist # Get kalman estimate maxind = np.argmax(post) estimate_mat[:-1] = estimate_mat[1:] estimate_mat[-1] = maxind plot_2d.set_array(sample_mat) state_est_plot.set_ydata(estimate_mat) plt.draw() count += 1 # Get the istft of the processed data if PLAY_AUDIO or RECORD_AUDIO: new_data = stft.performIStft() new_data = out_buf.reduce_channels(new_data, NUM_CHANNELS_IN, NUM_CHANNELS_OUT) # Write out the new, altered data if PLAY_AUDIO: if out_buf.get_available_write() >= WINDOW_LENGTH: out_buf.write_samples(new_data) if RECORD_AUDIO: if record_buf.get_available_write() >= WINDOW_LENGTH: record_buf.write_samples(new_data) except KeyboardInterrupt: print "Program interrupted" listener.set_quit(True) print "Cleaning up" in_stream.stop_stream() in_stream.close() out_stream.stop_stream() out_stream.close() pa.terminate() # Take care of output file if RECORD_AUDIO: print "Writing output file" make_wav() print "Done"