예제 #1
0
def equalization(estimator, dir_path):
    """Reads equalization FIR filter or CSV settings

    Args:
        estimator: ImpulseResponseEstimator
        dir_path: Path to directory

    Returns:
        - Left side FIR as Numpy array or FrequencyResponse or None
        - Right side FIR as Numpy array or FrequencyResponse or None
    """
    if os.path.isfile(os.path.join(dir_path, 'eq.wav')):
        print('eq.wav is no longer supported, use eq.csv!')
    # Default for both sides
    eq_path = os.path.join(dir_path, 'eq.csv')
    eq_fr = None
    if os.path.isfile(eq_path):
        eq_fr = FrequencyResponse.read_from_csv(eq_path)

    # Left
    left_path = os.path.join(dir_path, 'eq-left.csv')
    left_fr = None
    if os.path.isfile(left_path):
        left_fr = FrequencyResponse.read_from_csv(left_path)
    elif eq_fr is not None:
        left_fr = eq_fr
    if left_fr is not None:
        left_fr.interpolate(f_step=1.01, f_min=10, f_max=estimator.fs / 2)

    # Right
    right_path = os.path.join(dir_path, 'eq-right.csv')
    right_fr = None
    if os.path.isfile(right_path):
        right_fr = FrequencyResponse.read_from_csv(right_path)
    elif eq_fr is not None:
        right_fr = eq_fr
    if right_fr is not None and right_fr != left_fr:
        right_fr.interpolate(f_step=1.01, f_min=10, f_max=estimator.fs / 2)

    # Plot
    if left_fr is not None or right_fr is not None:
        if left_fr == right_fr:
            # Both are the same, plot only one graph
            fig, ax = plt.subplots()
            fig.set_size_inches(12, 9)
            left_fr.plot_graph(fig=fig, ax=ax, show=False)
        else:
            # Left and right are different, plot two graphs in the same figure
            fig, ax = plt.subplots(1, 2)
            fig.set_size_inches(22, 9)
            if left_fr is not None:
                left_fr.plot_graph(fig=fig, ax=ax[0], show=False)
            if right_fr is not None:
                right_fr.plot_graph(fig=fig, ax=ax[1], show=False)
        save_fig_as_png(os.path.join(dir_path, 'plots', 'eq.png'), fig)

    return left_fr, right_fr
예제 #2
0
 def frequency_response(self):
     """Creates FrequencyResponse instance."""
     f, m = self.magnitude_response()
     n = self.fs / 2 / 4  # 4 Hz resolution
     step = int(len(f) / n)
     fr = FrequencyResponse(name='Frequency response',
                            frequency=f[1::step],
                            raw=m[1::step])
     fr.interpolate(f_step=1.01, f_min=10, f_max=self.fs / 2)
     return fr
예제 #3
0
def open_mic_calibration(estimator, dir_path, mic_calibration=None):
    """Opens room measurement microphone calibration file

    Args:
        estimator: ImpulseResponseEstimator instance
        dir_path: Path to directory
        mic_calibration: Path to explicitly given (if any) room measurement microphone calibration file

    Returns:
        Microphone calibration FrequencyResponse
    """
    if mic_calibration is None:
        # Room mic calibration file path not given, try csv first then txt
        mic_calibration = os.path.join(dir_path, 'room-mic-calibration.csv')
        if not os.path.isfile(mic_calibration):
            mic_calibration = os.path.join(dir_path,
                                           'room-mic-calibration.txt')
    elif not os.path.isfile(mic_calibration):
        # Room mic calibration file path given, but the file doesn't exist
        raise FileNotFoundError(
            f'Room mic calibration file doesn\'t exist at "{mic_calibration}"')
    if os.path.isfile(mic_calibration):
        # File found, create frequency response
        mic_calibration = FrequencyResponse.read_from_csv(mic_calibration)
        mic_calibration.interpolate(f_step=1.01,
                                    f_min=10,
                                    f_max=estimator.fs / 2)
        mic_calibration.center()
    else:
        # File not found, skip calibration
        mic_calibration = None
    return mic_calibration
예제 #4
0
def open_room_target(estimator, dir_path, target=None):
    """Opens room frequency response target file.

    Args:
        estimator: ImpulseResponseEstimator instance
        dir_path: Path to directory
        target: Path to explicitly given (if any) room response target file

    Returns:
        Room response target FrequencyResponse
    """
    # Room target
    if target is None:
        target = os.path.join(dir_path, 'room-target.csv')
    if os.path.isfile(target):
        # File exists, create frequency response
        target = FrequencyResponse.read_from_csv(target)
        target.interpolate(f_step=1.01, f_min=10, f_max=estimator.fs / 2)
        target.center()
    else:
        # No room target specified, use flat
        target = FrequencyResponse(name='room-target')
        target.raw = np.zeros(target.frequency.shape)
        target.interpolate(f_step=1.01, f_min=10, f_max=estimator.fs / 2)
    return target
예제 #5
0
def main():
    # Open files
    loudspeaker = FrequencyResponse.read_from_csv(
        os.path.join(DIR_PATH, 'harman-in-room-loudspeaker-target.csv'))
    headphone = FrequencyResponse.read_from_csv(
        os.path.join(DIR_PATH, 'harman-in-room-headphone-target.csv'))
    headphone.raw += headphone.create_target(bass_boost_gain=2.2,
                                             bass_boost_fc=105,
                                             bass_boost_q=0.76)

    fig, ax = loudspeaker.plot_graph(show=False)
    headphone.plot_graph(fig=fig, ax=ax, color='blue')
    plt.show()

    for fr in [loudspeaker, headphone]:
        #for fr in [loudspeaker]:
        fr.interpolate(f_step=1.01, f_min=10, f_max=20000)
        fr.center()
        smooth = fr.copy()
        smooth.smoothen_fractional_octave(window_size=1, treble_window_size=1)
        smooth.raw = smooth.smoothed.copy()
        smooth.smoothed = np.array([])
        smooth.write_to_csv(
            os.path.join(DIR_PATH, os.pardir, os.pardir, 'data',
                         f'{fr.name}.csv'))
        smooth.plot_graph()
        fr.raw[fr.frequency < 300] = fr.raw[np.argmin(
            np.abs(fr.frequency - 300))]
        fr.smoothen_fractional_octave(window_size=1, treble_window_size=1)
        fr.raw = fr.smoothed.copy()
        fr.smoothed = np.array([])
        #fr.raw += fr.create_target(bass_boost_gain=6.8, bass_boost_fc=105, bass_boost_q=0.76)
        fr.write_to_csv(
            os.path.join(DIR_PATH, os.pardir, os.pardir, 'data',
                         f'{fr.name}-wo-bass.csv'))
        fig, ax = fr.plot_graph(show=False)
        #smooth.plot_graph(fig=fig, ax=ax, show=False, color='blue')
        #ax.legend(['Shelf', 'Original'])
        plt.show()
예제 #6
0
def create_target(estimator, bass_boost_gain, bass_boost_fc, bass_boost_q, tilt):
    """Creates target frequency response with bass boost, tilt and high pass at 20 Hz"""
    target = FrequencyResponse(
        name='bass_and_tilt',
        frequency=FrequencyResponse.generate_frequencies(f_min=10, f_max=estimator.fs / 2, f_step=1.01)
    )
    target.raw = target.create_target(
        bass_boost_gain=bass_boost_gain,
        bass_boost_fc=bass_boost_fc,
        bass_boost_q=bass_boost_q,
        tilt=tilt
    )
    high_pass = FrequencyResponse(
        name='high_pass',
        frequency=[10, 18, 19, 20, 21, 22, 20000],
        raw=[-80, -5, -1.6, -0.6, -0.2, 0, 0]
    )
    high_pass.interpolate(f_min=10, f_max=estimator.fs / 2, f_step=1.01)
    target.raw += high_pass.raw
    return target
예제 #7
0
def open_generic_room_measurement(estimator,
                                  dir_path,
                                  mic_calibration,
                                  target,
                                  method='average',
                                  limit=1000,
                                  plot=False):
    """Opens generic room measurment file

    Args:
        estimator: ImpulseResponseEstimator instance
        dir_path: Path to directory
        mic_calibration: Measurement microphone calibration FrequencyResponse
        target: Room response target FrequencyResponse
        method: Combination method. "average" or "conservative"
        limit: Upper limit in Hertz for equalization. Gain will ramp down to 0 dB in the octave leading to this.
               0 disables limit.
        plot: Plot frequency response?

    Returns:
        Generic room measurement FrequencyResponse
    """
    file_path = os.path.join(dir_path, 'room.wav')
    if not os.path.isfile(file_path):
        return None

    # Read the file
    fs, data = read_wav(file_path, expand=True)

    if fs != estimator.fs:
        raise ValueError(f'Sampling rate of "{file_path}" doesn\'t match!')

    # Average frequency responses of all tracks of the generic room measurement file
    irs = []
    for track in data:
        n_cols = int(
            round((len(track) / estimator.fs - 2) / (estimator.duration + 2)))
        for i in range(n_cols):
            # Starts at 2 seconds in the beginning plus previous sweeps and their tails
            start = int(2 * estimator.fs + i *
                        (2 * estimator.fs + len(estimator)))
            # Ends at start plus one more (current) sweep
            end = int(start + 2 * estimator.fs + len(estimator))
            end = min(end, len(track))
            # Select current sweep
            sweep = track[start:end]
            # Deconvolve as impulse response
            ir = ImpulseResponse(estimator.estimate(sweep), estimator.fs,
                                 sweep)
            # Crop harmonic distortion from the head
            # Noise in the tail should not affect frequency response so it doesn't have to be cropped
            ir.crop_head(head_ms=1)
            irs.append(ir)

    # Frequency response for the generic room measurement
    room_fr = FrequencyResponse(
        name='generic_room',
        frequency=FrequencyResponse.generate_frequencies(f_min=10,
                                                         f_max=estimator.fs /
                                                         2,
                                                         f_step=1.01),
        raw=0,
        error=0,
        target=target.raw)

    # Calculate and stack errors
    raws = []
    errors = []
    for ir in irs:
        fr = ir.frequency_response()
        if mic_calibration is not None:
            fr.raw -= mic_calibration.raw
        fr.center([100, 10000])
        room_fr.raw += fr.raw
        raws.append(fr.copy())
        fr.compensate(target, min_mean_error=True)
        if method == 'conservative' and len(irs) > 1:
            fr.smoothen_fractional_octave(window_size=1 / 3,
                                          treble_window_size=1 / 3)
            errors.append(fr.error_smoothed)
        else:
            errors.append(fr.error)
    room_fr.raw /= len(irs)
    errors = np.vstack(errors)

    if errors.shape[0] > 1:
        # Combine errors
        if method == 'conservative':
            # Conservative error curve is zero everywhere else but on indexes where both have the same sign,
            # at these indexes the smaller absolute value is selected.
            # This ensures that no curve will be adjusted to the other side of zero
            mask = np.mean(errors > 0,
                           axis=0)  # Average from boolean values per column
            positive = mask == 1  # Mask for columns with only positive values
            negative = mask == 0  # Mask for columns with only negative values
            # Minimum value for columns with only positive values
            room_fr.error[positive] = np.min(errors[:, positive], axis=0)
            # Maximum value for columns with only negative values (minimum absolute value)
            room_fr.error[negative] = np.max(errors[:, negative], axis=0)
            # Smoothen out kinks
            room_fr.smoothen_fractional_octave(window_size=1 / 6,
                                               treble_window_size=1 / 6)
            room_fr.error = room_fr.error_smoothed.copy()
        elif method == 'average':
            room_fr.error = np.mean(errors, axis=0)
            room_fr.smoothen_fractional_octave(window_size=1 / 3,
                                               treble_window_size=1 / 3)
        else:
            raise ValueError(
                f'Invalid value "{method}" for method. Supported values are "conservative" and "average"'
            )
    else:
        room_fr.error = errors[0, :]
        room_fr.smoothen_fractional_octave(window_size=1 / 3,
                                           treble_window_size=1 / 3)

    if limit > 0:
        # Zero error above limit
        start = np.argmax(room_fr.frequency > limit / 2)
        end = np.argmax(room_fr.frequency > limit)
        mask = np.concatenate([
            np.ones(start if start > 0 else 0),
            signal.windows.hann(end - start),
            np.zeros(len(room_fr.frequency) - end)
        ])
        room_fr.error *= mask
        room_fr.error_smoothed *= mask

    if plot:
        # Create dir
        room_plots_dir = os.path.join(dir_path, 'plots', 'room')
        os.makedirs(room_plots_dir, exist_ok=True)

        # Create generic FR plot
        fr = room_fr.copy()
        fr.name = 'Generic room measurement'
        fr.raw = fr.smoothed.copy()
        fr.error = fr.error_smoothed.copy()

        # Create figure and axes
        fig, ax = plt.subplots()
        fig.set_size_inches(15, 9)
        config_fr_axis(ax)
        ax.set_title('Generic room measurement')

        # Plot target, raw and error
        ax.plot(fr.frequency,
                fr.target,
                color=COLORS['lightpurple'],
                linewidth=5,
                label='Target')
        for raw in raws:
            raw.smoothen_fractional_octave(window_size=1 / 3,
                                           treble_window_size=1 / 3)
            ax.plot(raw.frequency, raw.smoothed, color='grey', linewidth=0.5)
        ax.plot(fr.frequency,
                fr.raw,
                color=COLORS['blue'],
                label='Raw smoothed')
        ax.plot(fr.frequency,
                fr.error,
                color=COLORS['red'],
                label='Error smoothed')
        ax.legend()

        # Set y limits
        sl = np.logical_and(fr.frequency >= 20, fr.frequency <= 20000)
        stack = np.vstack([fr.raw[sl], fr.error[sl], fr.target[sl]])
        ax.set_ylim(get_ylim(stack, padding=0.1))

        # Save FR figure
        save_fig_as_png(os.path.join(room_plots_dir, 'room.png'), fig)
        plt.close(fig)

    return room_fr
예제 #8
0
def main(test_signal):
    estimator = ImpulseResponseEstimator.from_pickle(test_signal)

    # Room mic calibration
    room_mic_calibration = os.path.join(DIR_PATH, 'room-mic-calibration.csv')
    if not os.path.isfile(room_mic_calibration):
        room_mic_calibration = os.path.join(DIR_PATH,
                                            'room-mic-calibration.txt')
    if os.path.isfile(room_mic_calibration):
        # File found, create frequency response
        room_mic_calibration = FrequencyResponse.read_from_csv(
            room_mic_calibration)
        room_mic_calibration.interpolate(f_step=1.01, f_min=10, f_max=20e3)
        room_mic_calibration.center()
    else:
        room_mic_calibration = None

    # Room measurement mic
    rooms = []
    for file_path in glob(os.path.join(DIR_PATH, 'room*.wav')):
        room = HRIR(estimator)
        room.open_recording(file_path, speakers=['FL'], side='left')
        fr = room.irs['FL']['left'].frequency_response()
        fr.interpolate(f_step=1.01, f_min=10, f_max=20e3)
        rooms.append(fr)
        if room_mic_calibration is not None:
            # Adjust by calibration data
            rooms[-1].raw -= room_mic_calibration.raw

    # Binaural mics
    lefts = []
    rights = []
    for file_path in glob(os.path.join(DIR_PATH, 'binaural*.wav')):
        binaural = HRIR(estimator)
        binaural.open_recording(file_path, speakers=['FL'])
        lefts.append(binaural.irs['FL']['left'].frequency_response())
        rights.append(binaural.irs['FL']['right'].frequency_response())

    # Setup plot
    fig, ax = plt.subplots()
    fig.set_size_inches(18, 9)
    ax.set_title('Microphone calibration')
    ax.set_xlabel('Frequency (Hz)')
    ax.semilogx()
    ax.set_xlim([20, 20e3])
    ax.set_ylabel('Amplitude (dB)')
    ax.grid(True, which='major')
    ax.grid(True, which='minor')
    ax.xaxis.set_major_formatter(ticker.StrMethodFormatter('{x:.0f}'))

    # Room measurement mic
    room = FrequencyResponse(name='Room measurement mic',
                             frequency=rooms[0].frequency,
                             raw=np.mean(np.vstack([x.raw for x in rooms]),
                                         axis=0))
    room.interpolate(f_step=1.01, f_min=10, f_max=20e3)
    room.smoothen_fractional_octave(window_size=1 / 6,
                                    treble_window_size=1 / 6)
    room.raw = room.smoothed.copy()
    room.smoothed = []
    room.center([60, 10000])
    ax.plot(room.frequency, room.raw, color='#680fb9', linewidth=0.5)

    # Left binaural mic
    left = FrequencyResponse(name='Left binaural mic',
                             frequency=lefts[0].frequency,
                             raw=np.mean(np.vstack([x.raw for x in lefts]),
                                         axis=0))
    left.interpolate(f_step=1.01, f_min=10, f_max=20e3)
    left.smoothen_fractional_octave(window_size=1 / 6,
                                    treble_window_size=1 / 6)
    left.raw = left.smoothed.copy()
    left.smoothed = []
    gain = left.center([60, 10000])
    ax.plot(left.frequency, left.raw, color='#7db4db', linewidth=0.5)
    ax.plot(left.frequency, left.raw - room.raw, color='#1f77b4')
    left.write_to_csv(os.path.join(DIR_PATH, 'left-mic-calibration.csv'))

    # Right binaural mic
    right = FrequencyResponse(name='Right binaural mic',
                              frequency=rights[0].frequency,
                              raw=np.mean(np.vstack([x.raw for x in rights]),
                                          axis=0))
    right.interpolate(f_step=1.01, f_min=10, f_max=20e3)
    right.smoothen_fractional_octave(window_size=1 / 6,
                                     treble_window_size=1 / 6)
    right.raw = right.smoothed.copy()
    right.smoothed = []
    right.raw += gain
    ax.plot(right.frequency, right.raw, color='#dd8081', linewidth=0.5)
    ax.plot(right.frequency, right.raw - room.raw, color='#d62728')
    right.write_to_csv(os.path.join(DIR_PATH, 'right-mic-calibration.csv'))

    ax.legend(
        ['Room', 'Left', 'Left calibration', 'Right', 'Right calibration'])

    # Save figure
    file_path = os.path.join(DIR_PATH, 'Results.png')
    fig.savefig(file_path, bbox_inches='tight')
    optimize_png_size(file_path)

    plt.show()
예제 #9
0
def headphone_compensation(estimator, dir_path):
    """Equalizes HRIR tracks with headphone compensation measurement.

    Args:
        estimator: ImpulseResponseEstimator instance
        dir_path: Path to output directory

    Returns:
        None
    """
    # Read WAV file
    hp_irs = HRIR(estimator)
    hp_irs.open_recording(os.path.join(dir_path, 'headphones.wav'), speakers=['FL', 'FR'])
    hp_irs.write_wav(os.path.join(dir_path, 'headphone-responses.wav'))

    # Frequency responses
    left = hp_irs.irs['FL']['left'].frequency_response()
    right = hp_irs.irs['FR']['right'].frequency_response()

    # Center by left channel
    gain = left.center([100, 10000])
    right.raw += gain

    # Compensate
    zero = FrequencyResponse(name='zero', frequency=left.frequency, raw=np.zeros(len(left.frequency)))
    left.compensate(zero, min_mean_error=False)
    right.compensate(zero, min_mean_error=False)

    # Headphone plots
    fig = plt.figure()
    gs = fig.add_gridspec(2, 3)
    fig.set_size_inches(22, 10)
    fig.suptitle('Headphones')

    # Left
    axl = fig.add_subplot(gs[0, 0])
    left.plot_graph(fig=fig, ax=axl, show=False)
    axl.set_title('Left')
    # Right
    axr = fig.add_subplot(gs[1, 0])
    right.plot_graph(fig=fig, ax=axr, show=False)
    axr.set_title('Right')
    # Sync axes
    sync_axes([axl, axr])

    # Combined
    _left = left.copy()
    _right = right.copy()
    gain_l = _left.center([100, 10000])
    gain_r = _right.center([100, 10000])
    ax = fig.add_subplot(gs[:, 1:])
    ax.plot(_left.frequency, _left.raw, linewidth=1, color='#1f77b4')
    ax.plot(_right.frequency, _right.raw, linewidth=1, color='#d62728')
    ax.plot(_left.frequency, _left.raw - _right.raw, linewidth=1, color='#680fb9')
    sl = np.logical_and(_left.frequency > 20, _left.frequency < 20000)
    stack = np.vstack([_left.raw[sl], _right.raw[sl], _left.raw[sl] - _right.raw[sl]])
    ax.set_ylim([np.min(stack) * 1.1, np.max(stack) * 1.1])
    axl.set_ylim([np.min(stack) * 1.1, np.max(stack) * 1.1])
    axr.set_ylim([np.min(stack) * 1.1, np.max(stack) * 1.1])
    ax.set_title('Comparison')
    ax.legend([f'Left raw {gain_l:+.1f} dB', f'Right raw {gain_r:+.1f} dB', 'Difference'], fontsize=8)
    ax.set_xlabel('Frequency (Hz)')
    ax.semilogx()
    ax.set_xlim([20, 20000])
    ax.set_ylabel('Amplitude (dBr)')
    ax.grid(True, which='major')
    ax.grid(True, which='minor')
    ax.xaxis.set_major_formatter(ticker.StrMethodFormatter('{x:.0f}'))

    # Save headphone plots
    file_path = os.path.join(dir_path, 'plots', 'headphones.png')
    os.makedirs(os.path.split(file_path)[0], exist_ok=True)
    save_fig_as_png(file_path, fig)
    plt.close(fig)

    return left, right
예제 #10
0
def main(dir_path=None,
         test_signal=None,
         room_target=None,
         room_mic_calibration=None,
         fs=None,
         plot=False,
         channel_balance=None,
         decay=None,
         target_level=None,
         fr_combination_method='average',
         specific_limit=20000,
         generic_limit=1000,
         bass_boost_gain=0.0,
         bass_boost_fc=105,
         bass_boost_q=0.76,
         tilt=0.0,
         do_room_correction=True,
         do_headphone_compensation=True,
         do_equalization=True):
    """"""
    if dir_path is None or not os.path.isdir(dir_path):
        raise NotADirectoryError(f'Given dir path "{dir_path}"" is not a directory.')

    # Dir path as absolute
    dir_path = os.path.abspath(dir_path)

    # Impulse response estimator
    print('Creating impulse response estimator...')
    estimator = open_impulse_response_estimator(dir_path, file_path=test_signal)

    # Room correction frequency responses
    room_frs = None
    if do_room_correction:
        print('Running room correction...')
        _, room_frs = room_correction(
            estimator, dir_path,
            target=room_target,
            mic_calibration=room_mic_calibration,
            fr_combination_method=fr_combination_method,
            specific_limit=specific_limit,
            generic_limit=generic_limit,
            plot=plot
        )

    # Headphone compensation frequency responses
    hp_left, hp_right = None, None
    if do_headphone_compensation:
        print('Running headphone compensation...')
        hp_left, hp_right = headphone_compensation(estimator, dir_path)

    # Equalization
    eq_left, eq_right = None, None
    if do_equalization:
        print('Creating headphone equalization...')
        eq_left, eq_right = equalization(estimator, dir_path)

    # Bass boost and tilt
    print('Creating frequency response target...')
    target = create_target(estimator, bass_boost_gain, bass_boost_fc, bass_boost_q, tilt)

    # HRIR measurements
    print('Opening binaural measurements...')
    hrir = open_binaural_measurements(estimator, dir_path)

    # Write info and stats in readme
    write_readme(os.path.join(dir_path, 'README.md'), hrir, fs)

    if plot:
        # Plot graphs pre processing
        os.makedirs(os.path.join(dir_path, 'plots', 'pre'), exist_ok=True)
        print('Plotting BRIR graphs before processing...')
        hrir.plot(dir_path=os.path.join(dir_path, 'plots', 'pre'))

    # Crop noise and harmonics from the beginning
    print('Cropping impulse responses...')
    hrir.crop_heads()

    # Crop noise from the tail
    hrir.crop_tails()

    # Write multi-channel WAV file with sine sweeps for debugging
    hrir.write_wav(os.path.join(dir_path, 'responses.wav'))

    # Equalize all
    if do_headphone_compensation or do_room_correction or do_equalization:
        print('Equalizing...')
        for speaker, pair in hrir.irs.items():
            for side, ir in pair.items():
                fr = FrequencyResponse(
                    name=f'{speaker}-{side} eq',
                    frequency=FrequencyResponse.generate_frequencies(f_step=1.01, f_min=10, f_max=estimator.fs / 2),
                    raw=0, error=0
                )

                if room_frs is not None and speaker in room_frs and side in room_frs[speaker]:
                    # Room correction
                    fr.error += room_frs[speaker][side].error

                hp_eq = hp_left if side == 'left' else hp_right
                if hp_eq is not None:
                    # Headphone compensation
                    fr.error += hp_eq.error

                eq = eq_left if side == 'left' else eq_right
                if eq is not None and type(eq) == FrequencyResponse:
                    # Equalization
                    fr.error += eq.error

                # Remove bass and tilt target from the error
                fr.error -= target.raw

                # Smoothen and equalize
                fr.smoothen_heavy_light()
                fr.equalize(max_gain=40, treble_f_lower=10000, treble_f_upper=estimator.fs / 2)

                # Create FIR filter and equalize
                fir = fr.minimum_phase_impulse_response(fs=estimator.fs, normalize=False, f_res=5)
                ir.equalize(fir)

    # Adjust decay time
    if decay:
        print('Adjusting decay time...')
        for speaker, pair in hrir.irs.items():
            for side, ir in pair.items():
                if speaker in decay:
                    ir.adjust_decay(decay[speaker])

    # Correct channel balance
    if channel_balance is not None:
        print('Correcting channel balance...')
        hrir.correct_channel_balance(channel_balance)

    # Normalize gain
    print('Normalizing gain...')
    hrir.normalize(peak_target=None if target_level is not None else -0.1, avg_target=target_level)

    if plot:
        print('Plotting BRIR graphs after processing...')
        # Convolve test signal, re-plot waveform and spectrogram
        for speaker, pair in hrir.irs.items():
            for side, ir in pair.items():
                ir.recording = ir.convolve(estimator.test_signal)
        # Plot post processing
        hrir.plot(os.path.join(dir_path, 'plots', 'post'))

    # Plot results, always
    print('Plotting results...')
    hrir.plot_result(os.path.join(dir_path, 'plots'))

    # Re-sample
    if fs is not None and fs != hrir.fs:
        print(f'Resampling BRIR to {fs} Hz')
        hrir.resample(fs)
        hrir.normalize(peak_target=None if target_level is not None else -0.1, avg_target=target_level)

    # Write multi-channel WAV file with standard track order
    print('Writing BRIRs...')
    hrir.write_wav(os.path.join(dir_path, 'hrir.wav'))

    # Write multi-channel WAV file with HeSuVi track order
    hrir.write_wav(os.path.join(dir_path, 'hesuvi.wav'), track_order=HESUVI_TRACK_ORDER)
예제 #11
0
    def channel_balance_firs(self, left_fr, right_fr, method):
        """Creates FIR filters for correcting channel balance

        Args:
            left_fr: Left side FrequencyResponse instance
            right_fr: Right side FrequencyResponse instance
            method: "trend" equalizes right side by the difference trend of right and left side. "left" equalizes
                    right side to left side fr, "right" equalizes left side to right side fr, "avg" equalizes both
                    to the average fr, "min" equalizes both to the minimum of left and right side frs. Number
                    values will boost or attenuate right side relative to left side by the number of dBs. "mids" is
                    the same as the numerical values but guesses the value automatically from mid frequency levels.

        Returns:
            List of two FIR filters as numpy arrays, first for left and second for right
        """
        if method == 'mids':
            # Find gain for right side
            # R diff - L diff = L mean - R mean
            gain = right_fr.copy().center([100, 3000]) - left_fr.copy().center(
                [100, 3000])
            gain = 10**(gain / 20)
            n = int(round(self.fs * 0.1))  # 100 ms
            firs = [signal.unit_impulse(n), signal.unit_impulse(n) * gain]

        elif method == 'trend':
            trend = FrequencyResponse(name='trend',
                                      frequency=left_fr.frequency,
                                      raw=left_fr.raw - right_fr.raw)
            trend.smoothen_fractional_octave(window_size=2,
                                             treble_f_lower=20000,
                                             treble_f_upper=int(
                                                 round(self.fs / 2)))
            # Trend is the equalization target
            right_fr.equalization = trend.smoothed
            # Unit impulse for left side and equalization FIR filter for right side
            fir = right_fr.minimum_phase_impulse_response(fs=self.fs,
                                                          normalize=False)
            firs = [signal.unit_impulse((len(fir))), fir]

        elif method == 'left' or method == 'right':
            if method == 'left':
                ref = left_fr
                subj = right_fr
            else:
                ref = right_fr
                subj = left_fr

            # Smoothen reference
            ref.smoothen_fractional_octave(window_size=1 / 3,
                                           treble_f_lower=20000,
                                           treble_f_upper=int(
                                               round(self.fs / 2)))
            # Center around 0 dB
            gain = ref.center([100, 10000])
            subj.raw += gain
            # Compensate and equalize to reference
            subj.target = ref.smoothed
            subj.error = subj.raw - subj.target
            subj.smoothen_heavy_light()
            subj.equalize(max_gain=15,
                          treble_f_lower=20000,
                          treble_f_upper=self.fs / 2)
            # Unit impulse for left side and equalization FIR filter for right side
            fir = subj.minimum_phase_impulse_response(fs=self.fs,
                                                      normalize=False)
            if method == 'left':
                firs = [signal.unit_impulse((len(fir))), fir]
            else:
                firs = [fir, signal.unit_impulse((len(fir)))]

        elif method == 'avg' or method == 'min':
            # Center around 0 dB
            left_gain = left_fr.copy().center([100, 10000])
            right_gain = right_fr.copy().center([100, 10000])
            gain = (left_gain + right_gain) / 2
            left_fr.raw += gain
            right_fr.raw += gain

            # Smoothen
            left_fr.smoothen_fractional_octave(window_size=1 / 3,
                                               treble_f_lower=20000,
                                               treble_f_upper=23999)
            right_fr.smoothen_fractional_octave(window_size=1 / 3,
                                                treble_f_lower=20000,
                                                treble_f_upper=23999)

            # Target
            if method == 'avg':
                # Target is the average between the two FRs
                target = (left_fr.raw + right_fr.raw) / 2
            else:
                # Target is the  frequency-vise minimum of the two FRs
                target = np.min([left_fr.raw, right_fr.raw], axis=0)

            # Compensate and equalize both to the target
            firs = []
            for fr in [left_fr, right_fr]:
                fr.target = target.copy()
                fr.error = fr.raw - fr.target
                fr.smoothen_fractional_octave(window_size=1 / 3,
                                              treble_f_lower=20000,
                                              treble_f_upper=23999)
                fr.equalize(max_gain=15,
                            treble_f_lower=2000,
                            treble_f_upper=self.fs / 2)
                firs.append(
                    fr.minimum_phase_impulse_response(fs=self.fs,
                                                      normalize=False))

        else:
            # Must be numerical value
            try:
                gain = 10**(float(method) / 20)
                n = int(round(self.fs * 0.1))  # 100 ms
                firs = [signal.unit_impulse(n), signal.unit_impulse(n) * gain]
            except ValueError:
                raise ValueError(
                    f'"{method}" is not valid value for channel balance method.'
                )

        return firs
def main():
    # Flat loudspeaker in room respones
    flat_in_room = FrequencyResponse.read_from_csv(
        os.path.join(DIR_PATH, 'harman-flat-loudspeaker-in-room.csv'))
    flat_in_room.interpolate(f_step=1.01, f_min=10, f_max=20000)

    # Harman room target
    room_target = FrequencyResponse.read_from_csv(
        os.path.join(DIR_PATH, 'harman-room-target-original.csv'))
    room_target.interpolate(f_step=1.01, f_min=10, f_max=20000)
    room_target.center()
    room_target.smoothen_fractional_octave(window_size=1 / 3)
    room_target.raw = room_target.smoothed
    room_target.smoothed = []
    # Drob infra bass
    drop = room_target._sigmoid(f_lower=12,
                                f_upper=24,
                                a_normal=-70,
                                a_treble=0)
    room_target.raw += drop

    # Harman 2018 over-ear headphone target
    over_ear = FrequencyResponse.read_from_csv(
        os.path.join(DIR_PATH, 'harman-over-ear-2018-without-bass.csv'))
    over_ear.interpolate(f_step=1.01, f_min=10, f_max=20000)
    over_ear.compensate(flat_in_room)
    over_ear.smoothen_fractional_octave(window_size=1 / 3)
    over_ear.raw = over_ear.smoothed.copy()
    over_ear.smoothed = []
    over_ear.error = over_ear.error_smoothed.copy()
    over_ear.error_smoothed = []

    # Virtual room target is Harman room target and the difference between Harman flat speaker in room and over-ear
    virtual_room_target = room_target.copy()
    virtual_room_target.raw += over_ear.error
    virtual_room_target_light = room_target.copy()
    virtual_room_target_light.raw[
        virtual_room_target.frequency > 1000] += over_ear.error[
            over_ear.frequency > 1000]

    # Save room targets
    room_target.write_to_csv(os.path.join(DIR_PATH, 'harman-room-target.csv'))
    virtual_room_target.write_to_csv(
        os.path.join(DIR_PATH, 'virtual-room-target.csv'))
    virtual_room_target_light.write_to_csv(
        os.path.join(DIR_PATH, 'virtual-room-target-light.csv'))

    # Plot
    fig, ax = over_ear.plot_graph(show=False)
    room_target.plot_graph(fig=fig, ax=ax, show=False, color='#1f77b4')
    virtual_room_target.plot_graph(fig=fig, ax=ax, show=False, color='#680fb9')
    virtual_room_target_light.plot_graph(fig=fig,
                                         ax=ax,
                                         show=False,
                                         color='#c17dff')
    plt.legend([
        'Harman flat loudspeaker in room', 'Harman over-ear 2018',
        'Difference', 'Harman room target', 'Virtual room target',
        'Virtual room target light'
    ])
    plt.xlim([10, 20000])
    plt.ylim([-65, 15])
    plt.title('Virtual Room Target')

    # Save figure
    figure_path = os.path.join(DIR_PATH, 'Results.png')
    fig.savefig(figure_path)
    optimize_png_size(figure_path)

    plt.show()
def main():
    estimator = ImpulseResponseEstimator.from_pickle(TEST_SIGNAL)

    # Open feedback measurement
    feedback = HRIR(estimator)
    feedback.open_recording(os.path.join(DIR_PATH, 'headphones-FL,FR.wav'), speakers=['FL', 'FR'])
    feedback.crop_heads()
    feedback.crop_tails()

    # Open feedforward measurement
    # Only FL-left and FR-right are needed here
    feedforward = HRIR(estimator)
    feedforward.open_recording(os.path.join(DIR_PATH, 'headphones.wav'), speakers=['FL', 'FR'])
    ffl = feedforward.irs['FL']['left'].frequency_response()
    ff_gain = ffl.center([100, 10000])
    zero = FrequencyResponse(name='zero', frequency=ffl.frequency, raw=np.zeros(ffl.frequency.shape))
    ffl.compensate(zero)
    ffl.smoothen_heavy_light()
    ffr = feedforward.irs['FR']['right'].frequency_response()
    ffr.raw += ff_gain
    ffr.compensate(zero)
    ffr.smoothen_heavy_light()
    feedforward_errors = {'left': ffl, 'right': ffr}

    # Open HRIR measurement
    hrir = HRIR(estimator)
    hrir.open_recording(os.path.join(DIR_PATH, 'FL,FR.wav'), speakers=['FL', 'FR'])
    hrir.crop_heads()
    hrir.crop_tails()
    fllfr = hrir.irs['FL']['left'].frequency_response()
    gain = fllfr.center([100, 10000])

    # Feedback vs HRIR
    fig, ax = plt.subplots(3, 2)
    fig.set_size_inches(18, 12)
    fig.suptitle('Feedback Compensation')
    i = 0
    feedback_errors = {'left': None, 'right': None}
    for speaker, pair in feedback.irs.items():
        j = 0
        for side, ir in pair.items():
            # HRIR is the target
            target = hrir.irs[speaker][side].frequency_response()
            target.raw += gain
            target.smoothen_fractional_octave(window_size=1/3, treble_window_size=1/3)

            # Frequency response of the headphone feedback measurement
            fr = ir.frequency_response()
            fr.raw += gain
            fr.error = fr.raw - target.raw
            fr.smoothen_heavy_light()
            # Add to this side average
            if feedback_errors[side] is None:
                feedback_errors[side] = fr.error_smoothed
            else:
                feedback_errors[side] += fr.error_smoothed

            # Plot
            ir.plot_fr(fr=fr, fig=fig, ax=ax[i, j])
            ax[i, j].set_title(f'{speaker}-{side}')
            ax[i, j].set_ylim([np.min(fr.error_smoothed), np.max(fr.error_smoothed)])

            j += 1
        i += 1

    for i, side in enumerate(['left', 'right']):
        feedback_errors[side] = FrequencyResponse(
            name=side,
            frequency=fllfr.frequency.copy(),
            error=feedback_errors[side] / 2
        )
        feedback_errors[side].plot_graph(fig=fig, ax=ax[2, i], show=False)

    sync_axes([ax[i, j] for i in range(ax.shape[0]) for j in range(ax.shape[1])])
    save_fig_as_png(os.path.join(DIR_PATH, 'feedback.png'), fig)

    # Feedforward
    fig, ax = plt.subplots(1, 2)
    fig.set_size_inches(18, 9)
    fig.suptitle('Feedforward Compensation')
    ffl.plot_graph(fig=fig, ax=ax[0], show=False)
    ffr.plot_graph(fig=fig, ax=ax[1], show=False)
    save_fig_as_png(os.path.join(DIR_PATH, 'feedforward.png'), fig)

    # Feedback compensation vs Feedforward compensation
    feedback_errors['left'].raw = feedback_errors['left'].error
    fbg = feedback_errors['left'].center([200, 2000])
    feedback_errors['left'].error = feedback_errors['left'].raw
    feedback_errors['left'].raw = []
    feedback_errors['right'].error += fbg

    feedforward_errors['left'].raw = feedforward_errors['left'].error_smoothed
    ffg = feedforward_errors['left'].center([200, 2000])
    feedforward_errors['left'].error_smoothed = feedforward_errors['left'].raw
    feedforward_errors['left'].raw = []
    feedforward_errors['right'].error_smoothed += ffg

    fig, ax = plt.subplots(1, 2)
    fig.set_size_inches(18, 9)
    fig.suptitle('Feedback vs Feedforward')
    sl = np.logical_and(feedback_errors['left'].frequency > 20, feedback_errors['left'].frequency < 20000)
    stack = [
        feedback_errors['left'].error[sl],
        feedback_errors['right'].error[sl],
        feedforward_errors['left'].error_smoothed[sl],
        feedforward_errors['right'].error_smoothed[sl],
    ]
    for i, side in enumerate(['left', 'right']):
        config_fr_axis(ax[i])
        ax[i].plot(feedback_errors[side].frequency, feedback_errors[side].error)
        ax[i].plot(feedforward_errors[side].frequency, feedforward_errors[side].error_smoothed)
        difference = feedback_errors[side].error - feedforward_errors[side].error_smoothed
        stack.append(difference[sl])
        ax[i].plot(feedback_errors[side].frequency, difference, color=COLORS['red'])
        ax[i].set_title(side)
        ax[i].legend(['Feedback', 'Feedforward', 'Difference'])

    stack = np.concatenate(stack)
    ax[0].set_ylim([np.min(stack), np.max(stack)])
    ax[1].set_ylim([np.min(stack), np.max(stack)])

    save_fig_as_png(os.path.join(DIR_PATH, 'comparison.png'), fig)
    plt.show()