コード例 #1
0
def main():
    test_signal = os.path.join(DIR_PATH,
                               'sweep-6.15s-48000Hz-32bit-2.93Hz-24000Hz.pkl')
    estimator = ImpulseResponseEstimator.from_pickle(test_signal)

    for group in ['volume2', 'volume2-48-52', 'objective2', 'None']:
        fig, ax = plt.subplots()
        fig.set_size_inches(12, 9)
        config_fr_axis(ax)
        ax.set_title(group)

        files = sorted(
            list(glob(os.path.join(DIR_PATH, group, 'headphones*.wav'))),
            key=lambda x: float(re.search(r'\d+',
                                          os.path.split(x)[1])[0]))
        for file_path in files:
            hp = HRIR(estimator)
            hp.open_recording(file_path, ['FL', 'FR'])
            left = hp.irs['FL']['left'].frequency_response()
            right = hp.irs['FR']['right'].frequency_response()
            ax.plot(left.frequency,
                    right.raw - left.raw,
                    label=os.path.split(file_path)[1].replace('.wav', ''),
                    linewidth=0.5)
        ax.legend()
        ax.set_ylim([-5, 5])
        plt.show()
        save_fig_as_png(os.path.join(DIR_PATH, f'{group}.png'), fig)
コード例 #2
0
def open_room_measurements(estimator, dir_path):
    """Opens speaker-ear specific room measurements.

    Args:
        estimator: ImpulseResponseEstimator instance
        dir_path: Path to directory

    Returns:
        HRIR instance with the room measurements
    """
    # Read room measurement files
    rir = HRIR(estimator)
    # room-BL,SL.wav, room-left-FL,FR.wav, room-right-FC.wav, etc...
    pattern = rf'^room-{SPEAKER_LIST_PATTERN}(-(left|right))?\.wav$'
    for i, file_name in enumerate(
        [f for f in os.listdir(dir_path) if re.match(pattern, f)]):
        # Read the speaker names from the file name into a list
        speakers = re.search(SPEAKER_LIST_PATTERN, file_name)
        if speakers is not None:
            speakers = speakers[0].split(',')
        # Form absolute path
        file_path = os.path.join(dir_path, file_name)
        # Read side if present
        side = re.search(r'(left|right)', file_name)
        if side is not None:
            side = side[0]
        # Read file
        rir.open_recording(file_path, speakers, side=side)
    return rir
コード例 #3
0
def open_binaural_measurements(estimator, dir_path):
    """Opens binaural measurement WAV files.

    Args:
        estimator: ImpulseResponseEstimator
        dir_path: Path to directory

    Returns:
        HRIR instance
    """
    hrir = HRIR(estimator)
    pattern = r'^{pattern}\.wav$'.format(pattern=SPEAKER_LIST_PATTERN)  # FL,FR.wav
    for file_name in [f for f in os.listdir(dir_path) if re.match(pattern, f)]:
        # Read the speaker names from the file name into a list
        speakers = re.search(SPEAKER_LIST_PATTERN, file_name)[0].split(',')
        # Form absolute path
        file_path = os.path.join(dir_path, file_name)
        # Open the file and add tracks to HRIR
        hrir.open_recording(file_path, speakers=speakers)
    if len(hrir.irs) == 0:
        raise ValueError('No HRIR recordings found in the directory.')
    return hrir
コード例 #4
0
def main():
    # Open HRIR
    estimator = ImpulseResponseEstimator.from_pickle(TEST_SIGNAL)
    hrir = HRIR(estimator)
    hrir.open_recording(os.path.join(DIR_PATH, 'FL,FR.wav'), speakers=['FL', 'FR'])
    hrir.crop_heads()
    hrir.crop_tails()
    
    # Create test signal sequence
    speakers = ['FL', 'FR']
    seq_data = estimator.sweep_sequence(speakers, 'stereo')

    fig, ax = plot_stereo_track(seq_data, estimator.fs)
    fig.suptitle('Sweep sequence')

    left = np.vstack([
        hrir.irs['FL']['left'].convolve(seq_data[0]),
        hrir.irs['FL']['right'].convolve(seq_data[0])
    ])
    right = np.vstack([
        hrir.irs['FR']['left'].convolve(seq_data[1]),
        hrir.irs['FR']['right'].convolve(seq_data[1])
    ])
    virtualized = left + right

    fig, ax = plot_stereo_track(virtualized, estimator.fs)
    fig.suptitle('Sweep sequence convolved with HRIR')
    plt.show()

    # Virtualize sine sweep sequence with HRIR
    # virtualized = []
    # for i, speaker in enumerate(speakers):
    #     track = seq_data[i, :]
    #     virtualized.append(np.sum([
    #         hrir.irs[speaker]['left'].convolve(track),
    #         hrir.irs[speaker]['right'].convolve(track)
    #     ], axis=0))

    virtualized = np.vstack(virtualized)

    # Normalized to 0 dB
    virtualized /= np.max(np.abs(virtualized))

    # Write virtualized sequence to disk
    file_path = os.path.join(DIR_PATH, f'headphones-sweep-seq-{",".join(speakers)}-stereo-{estimator.file_name(32)}.wav')
    write_wav(file_path, estimator.fs, virtualized, bit_depth=32)
コード例 #5
0
def main(test_signal):
    estimator = ImpulseResponseEstimator.from_pickle(test_signal)

    # Room mic calibration
    room_mic_calibration = os.path.join(DIR_PATH, 'room-mic-calibration.csv')
    if not os.path.isfile(room_mic_calibration):
        room_mic_calibration = os.path.join(DIR_PATH,
                                            'room-mic-calibration.txt')
    if os.path.isfile(room_mic_calibration):
        # File found, create frequency response
        room_mic_calibration = FrequencyResponse.read_from_csv(
            room_mic_calibration)
        room_mic_calibration.interpolate(f_step=1.01, f_min=10, f_max=20e3)
        room_mic_calibration.center()
    else:
        room_mic_calibration = None

    # Room measurement mic
    rooms = []
    for file_path in glob(os.path.join(DIR_PATH, 'room*.wav')):
        room = HRIR(estimator)
        room.open_recording(file_path, speakers=['FL'], side='left')
        fr = room.irs['FL']['left'].frequency_response()
        fr.interpolate(f_step=1.01, f_min=10, f_max=20e3)
        rooms.append(fr)
        if room_mic_calibration is not None:
            # Adjust by calibration data
            rooms[-1].raw -= room_mic_calibration.raw

    # Binaural mics
    lefts = []
    rights = []
    for file_path in glob(os.path.join(DIR_PATH, 'binaural*.wav')):
        binaural = HRIR(estimator)
        binaural.open_recording(file_path, speakers=['FL'])
        lefts.append(binaural.irs['FL']['left'].frequency_response())
        rights.append(binaural.irs['FL']['right'].frequency_response())

    # Setup plot
    fig, ax = plt.subplots()
    fig.set_size_inches(18, 9)
    ax.set_title('Microphone calibration')
    ax.set_xlabel('Frequency (Hz)')
    ax.semilogx()
    ax.set_xlim([20, 20e3])
    ax.set_ylabel('Amplitude (dB)')
    ax.grid(True, which='major')
    ax.grid(True, which='minor')
    ax.xaxis.set_major_formatter(ticker.StrMethodFormatter('{x:.0f}'))

    # Room measurement mic
    room = FrequencyResponse(name='Room measurement mic',
                             frequency=rooms[0].frequency,
                             raw=np.mean(np.vstack([x.raw for x in rooms]),
                                         axis=0))
    room.interpolate(f_step=1.01, f_min=10, f_max=20e3)
    room.smoothen_fractional_octave(window_size=1 / 6,
                                    treble_window_size=1 / 6)
    room.raw = room.smoothed.copy()
    room.smoothed = []
    room.center([60, 10000])
    ax.plot(room.frequency, room.raw, color='#680fb9', linewidth=0.5)

    # Left binaural mic
    left = FrequencyResponse(name='Left binaural mic',
                             frequency=lefts[0].frequency,
                             raw=np.mean(np.vstack([x.raw for x in lefts]),
                                         axis=0))
    left.interpolate(f_step=1.01, f_min=10, f_max=20e3)
    left.smoothen_fractional_octave(window_size=1 / 6,
                                    treble_window_size=1 / 6)
    left.raw = left.smoothed.copy()
    left.smoothed = []
    gain = left.center([60, 10000])
    ax.plot(left.frequency, left.raw, color='#7db4db', linewidth=0.5)
    ax.plot(left.frequency, left.raw - room.raw, color='#1f77b4')
    left.write_to_csv(os.path.join(DIR_PATH, 'left-mic-calibration.csv'))

    # Right binaural mic
    right = FrequencyResponse(name='Right binaural mic',
                              frequency=rights[0].frequency,
                              raw=np.mean(np.vstack([x.raw for x in rights]),
                                          axis=0))
    right.interpolate(f_step=1.01, f_min=10, f_max=20e3)
    right.smoothen_fractional_octave(window_size=1 / 6,
                                     treble_window_size=1 / 6)
    right.raw = right.smoothed.copy()
    right.smoothed = []
    right.raw += gain
    ax.plot(right.frequency, right.raw, color='#dd8081', linewidth=0.5)
    ax.plot(right.frequency, right.raw - room.raw, color='#d62728')
    right.write_to_csv(os.path.join(DIR_PATH, 'right-mic-calibration.csv'))

    ax.legend(
        ['Room', 'Left', 'Left calibration', 'Right', 'Right calibration'])

    # Save figure
    file_path = os.path.join(DIR_PATH, 'Results.png')
    fig.savefig(file_path, bbox_inches='tight')
    optimize_png_size(file_path)

    plt.show()
コード例 #6
0
def headphone_compensation(estimator, dir_path):
    """Equalizes HRIR tracks with headphone compensation measurement.

    Args:
        estimator: ImpulseResponseEstimator instance
        dir_path: Path to output directory

    Returns:
        None
    """
    # Read WAV file
    hp_irs = HRIR(estimator)
    hp_irs.open_recording(os.path.join(dir_path, 'headphones.wav'), speakers=['FL', 'FR'])
    hp_irs.write_wav(os.path.join(dir_path, 'headphone-responses.wav'))

    # Frequency responses
    left = hp_irs.irs['FL']['left'].frequency_response()
    right = hp_irs.irs['FR']['right'].frequency_response()

    # Center by left channel
    gain = left.center([100, 10000])
    right.raw += gain

    # Compensate
    zero = FrequencyResponse(name='zero', frequency=left.frequency, raw=np.zeros(len(left.frequency)))
    left.compensate(zero, min_mean_error=False)
    right.compensate(zero, min_mean_error=False)

    # Headphone plots
    fig = plt.figure()
    gs = fig.add_gridspec(2, 3)
    fig.set_size_inches(22, 10)
    fig.suptitle('Headphones')

    # Left
    axl = fig.add_subplot(gs[0, 0])
    left.plot_graph(fig=fig, ax=axl, show=False)
    axl.set_title('Left')
    # Right
    axr = fig.add_subplot(gs[1, 0])
    right.plot_graph(fig=fig, ax=axr, show=False)
    axr.set_title('Right')
    # Sync axes
    sync_axes([axl, axr])

    # Combined
    _left = left.copy()
    _right = right.copy()
    gain_l = _left.center([100, 10000])
    gain_r = _right.center([100, 10000])
    ax = fig.add_subplot(gs[:, 1:])
    ax.plot(_left.frequency, _left.raw, linewidth=1, color='#1f77b4')
    ax.plot(_right.frequency, _right.raw, linewidth=1, color='#d62728')
    ax.plot(_left.frequency, _left.raw - _right.raw, linewidth=1, color='#680fb9')
    sl = np.logical_and(_left.frequency > 20, _left.frequency < 20000)
    stack = np.vstack([_left.raw[sl], _right.raw[sl], _left.raw[sl] - _right.raw[sl]])
    ax.set_ylim([np.min(stack) * 1.1, np.max(stack) * 1.1])
    axl.set_ylim([np.min(stack) * 1.1, np.max(stack) * 1.1])
    axr.set_ylim([np.min(stack) * 1.1, np.max(stack) * 1.1])
    ax.set_title('Comparison')
    ax.legend([f'Left raw {gain_l:+.1f} dB', f'Right raw {gain_r:+.1f} dB', 'Difference'], fontsize=8)
    ax.set_xlabel('Frequency (Hz)')
    ax.semilogx()
    ax.set_xlim([20, 20000])
    ax.set_ylabel('Amplitude (dBr)')
    ax.grid(True, which='major')
    ax.grid(True, which='minor')
    ax.xaxis.set_major_formatter(ticker.StrMethodFormatter('{x:.0f}'))

    # Save headphone plots
    file_path = os.path.join(dir_path, 'plots', 'headphones.png')
    os.makedirs(os.path.split(file_path)[0], exist_ok=True)
    save_fig_as_png(file_path, fig)
    plt.close(fig)

    return left, right
def main():
    estimator = ImpulseResponseEstimator.from_pickle(TEST_SIGNAL)

    # Open feedback measurement
    feedback = HRIR(estimator)
    feedback.open_recording(os.path.join(DIR_PATH, 'headphones-FL,FR.wav'), speakers=['FL', 'FR'])
    feedback.crop_heads()
    feedback.crop_tails()

    # Open feedforward measurement
    # Only FL-left and FR-right are needed here
    feedforward = HRIR(estimator)
    feedforward.open_recording(os.path.join(DIR_PATH, 'headphones.wav'), speakers=['FL', 'FR'])
    ffl = feedforward.irs['FL']['left'].frequency_response()
    ff_gain = ffl.center([100, 10000])
    zero = FrequencyResponse(name='zero', frequency=ffl.frequency, raw=np.zeros(ffl.frequency.shape))
    ffl.compensate(zero)
    ffl.smoothen_heavy_light()
    ffr = feedforward.irs['FR']['right'].frequency_response()
    ffr.raw += ff_gain
    ffr.compensate(zero)
    ffr.smoothen_heavy_light()
    feedforward_errors = {'left': ffl, 'right': ffr}

    # Open HRIR measurement
    hrir = HRIR(estimator)
    hrir.open_recording(os.path.join(DIR_PATH, 'FL,FR.wav'), speakers=['FL', 'FR'])
    hrir.crop_heads()
    hrir.crop_tails()
    fllfr = hrir.irs['FL']['left'].frequency_response()
    gain = fllfr.center([100, 10000])

    # Feedback vs HRIR
    fig, ax = plt.subplots(3, 2)
    fig.set_size_inches(18, 12)
    fig.suptitle('Feedback Compensation')
    i = 0
    feedback_errors = {'left': None, 'right': None}
    for speaker, pair in feedback.irs.items():
        j = 0
        for side, ir in pair.items():
            # HRIR is the target
            target = hrir.irs[speaker][side].frequency_response()
            target.raw += gain
            target.smoothen_fractional_octave(window_size=1/3, treble_window_size=1/3)

            # Frequency response of the headphone feedback measurement
            fr = ir.frequency_response()
            fr.raw += gain
            fr.error = fr.raw - target.raw
            fr.smoothen_heavy_light()
            # Add to this side average
            if feedback_errors[side] is None:
                feedback_errors[side] = fr.error_smoothed
            else:
                feedback_errors[side] += fr.error_smoothed

            # Plot
            ir.plot_fr(fr=fr, fig=fig, ax=ax[i, j])
            ax[i, j].set_title(f'{speaker}-{side}')
            ax[i, j].set_ylim([np.min(fr.error_smoothed), np.max(fr.error_smoothed)])

            j += 1
        i += 1

    for i, side in enumerate(['left', 'right']):
        feedback_errors[side] = FrequencyResponse(
            name=side,
            frequency=fllfr.frequency.copy(),
            error=feedback_errors[side] / 2
        )
        feedback_errors[side].plot_graph(fig=fig, ax=ax[2, i], show=False)

    sync_axes([ax[i, j] for i in range(ax.shape[0]) for j in range(ax.shape[1])])
    save_fig_as_png(os.path.join(DIR_PATH, 'feedback.png'), fig)

    # Feedforward
    fig, ax = plt.subplots(1, 2)
    fig.set_size_inches(18, 9)
    fig.suptitle('Feedforward Compensation')
    ffl.plot_graph(fig=fig, ax=ax[0], show=False)
    ffr.plot_graph(fig=fig, ax=ax[1], show=False)
    save_fig_as_png(os.path.join(DIR_PATH, 'feedforward.png'), fig)

    # Feedback compensation vs Feedforward compensation
    feedback_errors['left'].raw = feedback_errors['left'].error
    fbg = feedback_errors['left'].center([200, 2000])
    feedback_errors['left'].error = feedback_errors['left'].raw
    feedback_errors['left'].raw = []
    feedback_errors['right'].error += fbg

    feedforward_errors['left'].raw = feedforward_errors['left'].error_smoothed
    ffg = feedforward_errors['left'].center([200, 2000])
    feedforward_errors['left'].error_smoothed = feedforward_errors['left'].raw
    feedforward_errors['left'].raw = []
    feedforward_errors['right'].error_smoothed += ffg

    fig, ax = plt.subplots(1, 2)
    fig.set_size_inches(18, 9)
    fig.suptitle('Feedback vs Feedforward')
    sl = np.logical_and(feedback_errors['left'].frequency > 20, feedback_errors['left'].frequency < 20000)
    stack = [
        feedback_errors['left'].error[sl],
        feedback_errors['right'].error[sl],
        feedforward_errors['left'].error_smoothed[sl],
        feedforward_errors['right'].error_smoothed[sl],
    ]
    for i, side in enumerate(['left', 'right']):
        config_fr_axis(ax[i])
        ax[i].plot(feedback_errors[side].frequency, feedback_errors[side].error)
        ax[i].plot(feedforward_errors[side].frequency, feedforward_errors[side].error_smoothed)
        difference = feedback_errors[side].error - feedforward_errors[side].error_smoothed
        stack.append(difference[sl])
        ax[i].plot(feedback_errors[side].frequency, difference, color=COLORS['red'])
        ax[i].set_title(side)
        ax[i].legend(['Feedback', 'Feedforward', 'Difference'])

    stack = np.concatenate(stack)
    ax[0].set_ylim([np.min(stack), np.max(stack)])
    ax[1].set_ylim([np.min(stack), np.max(stack)])

    save_fig_as_png(os.path.join(DIR_PATH, 'comparison.png'), fig)
    plt.show()