Example #1
0
def filter_dart(line):

    # append newest sample
    for i, sample in enumerate(line):
        input_buffers[i].append(sample)

    if len(input_buffers[0]) == BUFFER_SIZE:
        for i in range(N_MIC):
            output_buffers[i] = filtering_utils.process(
                    input_buffers[i], LOW_FREQ, HIGH_FREQ)
            fresh_buffers(input_buffers)

    if output_buffers[0]:
        output = [buf[len(input_buffers[0]) - (BUFFER_SIZE/4)]
                for buf in output_buffers]
        pipe_util.join_output("dd", output)
    #filtered_graph = fig.add_subplot(grid + 5)
    #filteredfft_graph = fig.add_subplot(grid + 6)

    harmonic_pattern = [(10 - i) / 10. for i in range(10)]
    signal = make_signal(1900, harmonic_pattern)
    noisy = add_noise(signal, 0.4)
    filtered = violent_multi_band_pass(noisy, prepare_multi_band_filter([(1500, 2500)]))
    mask = make_mask_for_signal(noisy, 1000, 2500, 0.05)
    multifiltered = violent_multi_band_pass(noisy, mask)

    plot(signal, sig_graph, fft_graph)
    plot(noisy, noisy_graph, noisyfft_graph)
    #plot(filtered, filtered_graph, filteredfft_graph)
    plot(multifiltered, multifiltered_graph, multifilteredfft_graph)
    plt.show()


def draw_signal(signal):
    fig = plt.figure()
    grid = 210
    sig_graph = fig.add_subplot(grid + 1)
    fft_graph = fig.add_subplot(grid + 2)
    plot(signal, sig_graph, fft_graph)
    plt.show()


if __name__ == '__main__':
    for value in sine(DART_FREQ_HERTZ, 10000):
        values = [value] * CHANNEL_COUNT
        join_output(ZERO_DETECTION_INPUT_FORMAT, values)
Example #3
0
	mic_positions = mic_positions - origin

	for time__mic_dart_distances in time__mic_dart_distances_stream:
		time_seconds = time__mic_dart_distances[0]
		mic_dart_distances = array(time__mic_dart_distances[1:])

		# The algorithm fails on any 0 - m distance degeneracies. Add some
		# wiggle if so.
		degeneracies = (mic_dart_distances[1:] == mic_dart_distances[0])
		mic_dart_distances[1:] += 1.0e-12 * degeneracies

		vt = mic_dart_distances - mic_dart_distances[0]
		free_vt = vt[2:]

		A = 2.0 * mic_positions[2:, 0] / free_vt - 2.0 * mic_positions[1, 0] / vt[1]
		B = 2.0 * mic_positions[2:, 1] / free_vt - 2.0 * mic_positions[1, 1] / vt[1]
		C = 2.0 * mic_positions[2:, 2] / free_vt - 2.0 * mic_positions[1, 2] / vt[1]
		D = free_vt - vt[1] - sum(mic_positions[2:, :] ** 2, axis=1) / free_vt + sum(mic_positions[1] ** 2) / vt[1]

		M = concatenate([transpose_1D(A), transpose_1D(B), transpose_1D(C)], axis=1)

		try:
			yield time_seconds, pinv(M).dot(-transpose_1D(D)).reshape(3) + origin
		except LinAlgError:
			sys.stderr.write('Could not multilaterate at t = %f\n' % time_seconds)


if __name__ == '__main__':
	for time_seconds, coordinates in multilaterate(MIC_COORDS_METERS, split_fileinput(MULTILATERATE_INPUT_FORMAT)):
		join_output(DISPLAY_INPUT_FORMAT, [time_seconds] + list(coordinates))
Example #4
0
    multifiltered_graph = fig.add_subplot(grid + 7)
    multifilteredfft_graph = fig.add_subplot(grid + 8)
    noisy = add_noise(signal, 0.5)
    filtered = violent_multi_band_pass(noisy, prepare_multi_band_filter([(1500, 2500)]))
    mask = make_mask_for_signal(noisy, 1500, 3000, 0.05)
    multifiltered = violent_multi_band_pass(noisy, mask)
    plot(signal, sig_graph, fft_graph)
    plot(noisy, noisy_graph, noisyfft_graph)
    plot(filtered, filtered_graph, filteredfft_graph)
    plot(multifiltered, multifiltered_graph, multifilteredfft_graph)
    plt.show()


# f = scipy.fft(noisy)[:BUFFER_SIZE/2]

# plt.plot(noisy)
# plt.show()
# plt.plot(f)
# plt.show()

# max_index, max_value = max(enumerate(f), key=lambda x:x[1]
#         if freq_from_index(x[0])>200 else 0)


# print "Freq:", freq_from_index(max_index)


if __name__ == '__main__':
    for timestep, value in enumerate(sine(DART_FREQ_HERTZ, 10000)):
        join_output([value] * CHANNELS)
Example #5
0
WAVELENGTH_SAMPLES = SAMPLE_RATE_HERTZ / DART_FREQ_HERTZ


def zero_detection(sample_stream):
	"""Make events for 0-crossings.

	sample_stream must be a generator of CHANNEL_COUNT-tuples of values that
		represent the current microphone level.
	Yields channel ID, time tuples.
	"""
	last_samples_sign = None
	samples_since_zero = array([WAVELENGTH_SAMPLES] * CHANNEL_COUNT)

	for timestep, samples in enumerate(sample_stream):
		samples_sign = signbit(samples)
		if last_samples_sign is not None:
			sign_changes = logical_and(samples_sign, ~last_samples_sign)
			sign_changes = logical_and(sign_changes, samples_since_zero > 2 * WAVELENGTH_SAMPLES / 3)
			for channel in nonzero(sign_changes)[0]:
				yield channel, timestep / float(SAMPLE_RATE_HERTZ)

			samples_since_zero[sign_changes] = 0
			samples_since_zero[~sign_changes] += 1

		last_samples_sign = samples_sign


if __name__ == '__main__':
	for channel, event_time_seconds in zero_detection(split_fileinput(ZERO_DETECTION_INPUT_FORMAT)):
		join_output(ALIGN_INPUT_FORMAT, (channel, event_time_seconds))
Example #6
0
	Yields time, 3D coords of dart.
	"""
	mic_positions = array(mic_positions)
	origin = mic_positions[0]
	mic_positions = mic_positions - origin

	for time__mic_dart_distances in time__mic_dart_distances_stream:
		time = time__mic_dart_distances[0]
		mic_dart_distances = array(time__mic_dart_distances[1:])

		# The algorithm fails on any 0 - m distance degeneracies. Add some wiggle if so.
		while any(mic_dart_distances[1:] == mic_dart_distances[0]):
			mic_dart_distances[1:] += 1.0e-12

		vt = mic_dart_distances - mic_dart_distances[0]
		free_vt = vt[2:]

		A = 2.0 * mic_positions[2:, 0] / free_vt - 2.0 * mic_positions[1, 0] / vt[1]
		B = 2.0 * mic_positions[2:, 1] / free_vt - 2.0 * mic_positions[1, 1] / vt[1]
		C = 2.0 * mic_positions[2:, 2] / free_vt - 2.0 * mic_positions[1, 2] / vt[1]
		D = free_vt - vt[1] - sum(mic_positions[2:, :] ** 2, axis=1) / free_vt + sum(mic_positions[1] ** 2) / vt[1]

		M = concatenate([transpose_1D(A), transpose_1D(B), transpose_1D(C)], axis=1)

		yield time, pinv(M).dot(-transpose_1D(D)).reshape(3) + origin


if __name__ == '__main__':
	for time, coordinates in multilaterate(MIC_COORDS, split_fileinput([float] + [float] * CHANNELS)):
		join_output([time] + list(coordinates))
Example #7
0
from numpy import signbit
from numpy import logical_xor

from pipe_util import split_fileinput
from pipe_util import join_output
from world_params import CHANNELS
from world_params import SAMPLE_RATE_HERTZ


def generate_events(sample_stream):
	"""Make events for 0-crossings.

	sample_stream must be a generator of CHANNELS-tuples of values that
		represent the current microphone level.
	Yields channel ID, time tuples.
	"""
	last_samples_sign = None
	for timestep, samples in enumerate(sample_stream):
		samples_sign = signbit(samples)
		if last_samples_sign is not None:
			sign_changes = logical_xor(last_samples_sign, samples_sign)
			for channel, sign_change in enumerate(sign_changes):
				if sign_change:
					yield channel, float(timestep) / float(SAMPLE_RATE_HERTZ)
		last_samples_sign = samples_sign


if __name__ == '__main__':
	for channel_event_t in generate_events(split_fileinput([float] * CHANNELS)):
		join_output(channel_event_t)
Example #8
0
        vt = mic_dart_distances - mic_dart_distances[0]
        free_vt = vt[2:]

        A = 2.0 * mic_positions[2:, 0] / free_vt - 2.0 * mic_positions[
            1, 0] / vt[1]
        B = 2.0 * mic_positions[2:, 1] / free_vt - 2.0 * mic_positions[
            1, 1] / vt[1]
        C = 2.0 * mic_positions[2:, 2] / free_vt - 2.0 * mic_positions[
            1, 2] / vt[1]
        D = free_vt - vt[1] - sum(mic_positions[
            2:, :]**2, axis=1) / free_vt + sum(mic_positions[1]**2) / vt[1]

        M = concatenate([transpose_1D(A),
                         transpose_1D(B),
                         transpose_1D(C)],
                        axis=1)

        try:
            yield time_seconds, pinv(M).dot(-transpose_1D(D)).reshape(
                3) + origin
        except LinAlgError:
            sys.stderr.write('Could not multilaterate at t = %f\n' %
                             time_seconds)


if __name__ == '__main__':
    for time_seconds, coordinates in multilaterate(
            MIC_COORDS_METERS, split_fileinput(MULTILATERATE_INPUT_FORMAT)):
        join_output(DISPLAY_INPUT_FORMAT, [time_seconds] + list(coordinates))
Example #9
0
		# Check to see if any peaks could be missing at this point. This
		# should be detectable as an empty channel queue.
		if (allow_dropped_events and
			detect_missed_event(
				event_time_seconds,
				time_seconds_queues,
				MAX_MIC_DELAY_SECONDS
			)):
			for time_seconds_queue in time_seconds_queues:
				if len(time_seconds_queue) < 1:
					time_seconds_queue.append(None)

		# If we've seen a peak on every channel,
		if all(
			len(time_seconds_queue) > 0
			for time_seconds_queue
			in time_seconds_queues
		):
			# yield all those relative times.
			yield time_seconds_to_time_seconds_distances(tuple(
				# Out on the left.
				time_seconds_queue.popleft()
				for time_seconds_queue
				in time_seconds_queues
			))


if __name__ == '__main__':
	for time_seconds, aligned_distances in align(split_fileinput(ALIGN_INPUT_FORMAT)):
		join_output(MULTILATERATE_INPUT_FORMAT, [time_seconds] + aligned_distances)
Example #10
0
def zero_detection(sample_stream):
    """Make events for 0-crossings.

	sample_stream must be a generator of CHANNEL_COUNT-tuples of values that
		represent the current microphone level.
	Yields channel ID, time tuples.
	"""
    last_samples_sign = None
    samples_since_zero = array([WAVELENGTH_SAMPLES] * CHANNEL_COUNT)

    for timestep, samples in enumerate(sample_stream):
        samples_sign = signbit(samples)
        if last_samples_sign is not None:
            sign_changes = logical_and(samples_sign, ~last_samples_sign)
            sign_changes = logical_and(
                sign_changes, samples_since_zero > 2 * WAVELENGTH_SAMPLES / 3)
            for channel in nonzero(sign_changes)[0]:
                yield channel, timestep / float(SAMPLE_RATE_HERTZ)

            samples_since_zero[sign_changes] = 0
            samples_since_zero[~sign_changes] += 1

        last_samples_sign = samples_sign


if __name__ == '__main__':
    for channel, event_time_seconds in zero_detection(
            split_fileinput(ZERO_DETECTION_INPUT_FORMAT)):
        join_output(ALIGN_INPUT_FORMAT, (channel, event_time_seconds))