Exemple #1
0
def main():
    iem19 = FrequencyResponse.read_from_csv(os.path.join(ROOT_DIR, 'compensation', 'harman_in-ear_2019v2_orig.csv'))
    iem19.interpolate()
    iem19.center()
    iem19.name = 'Harman in-ear 2019v2'
    iem19.write_to_csv(os.path.join(ROOT_DIR, 'compensation', 'harman_in-ear_2019v2.csv'))

    iem19_wo_bass = iem19.copy()
    iem19_wo_bass.name = 'Harman in-ear 2019v2 without bass boost'
    ind = np.argmin(iem19_wo_bass.raw[iem19_wo_bass.frequency < 1000])
    iem19_wo_bass.raw[:ind] = iem19_wo_bass.raw[ind]
    iem19_wo_bass.plot_graph(
        show=False, color='C0',
        file_path=os.path.join(ROOT_DIR, 'compensation', 'harman_in-ear_2019v2_wo_bass.png')
    )
    iem19_wo_bass.write_to_csv(os.path.join(ROOT_DIR, 'compensation', 'harman_in-ear_2019v2_wo_bass.csv'))

    iem17 = FrequencyResponse.read_from_csv(os.path.join(ROOT_DIR, 'compensation', 'harman_in-ear_2017-1.csv'))
    usound = FrequencyResponse.read_from_csv(os.path.join(ROOT_DIR, 'compensation', 'usound.csv'))
    oe18 = FrequencyResponse.read_from_csv(os.path.join(ROOT_DIR, 'compensation', 'harman_over-ear_2018.csv'))

    fig, ax = iem19.plot_graph(
        show=False, color='C0',
        file_path=os.path.join(ROOT_DIR, 'compensation', 'harman_in-ear_2019v2.png')
    )
    iem17.plot_graph(fig=fig, ax=ax, show=False, color='C1')
    usound.plot_graph(fig=fig, ax=ax, color='C2', show=False)
    oe18.plot_graph(fig=fig, ax=ax, color='C3', show=False)

    ax.legend(['Harman in-ear 2019v2', 'Harman in-ear 2017-1', 'Usound', 'Harman over-ear 2018'])
    ax.set_title('In-ear Headphone Targets')
    plt.show()
    def limited_delta(cls, x, y, limit):
        peak_finder = cls(name='peak_finder', frequency=x, raw=y)
        peak_finder.smoothen_fractional_octave(window_size=1 / 12,
                                               treble_window_size=1 / 3,
                                               treble_f_lower=9000,
                                               treble_f_upper=11000)
        peaks = scipy.signal.find_peaks(-peak_finder.smoothed)[0]
        forward_start = peaks[0]
        backward_start = len(x) - peaks[-1] - 1

        limited_forward = cls.limited_forward_delta(x,
                                                    y,
                                                    limit,
                                                    start_index=forward_start)
        limited_backward = cls.limited_forward_delta(
            np.flip(x), np.flip(y), -limit, start_index=backward_start)
        limited_backward = np.flip(limited_backward)

        _fr = FrequencyResponse(name='limiter',
                                frequency=x.copy(),
                                raw=np.min(np.vstack(
                                    [limited_forward, limited_backward]),
                                           axis=0))
        _fr.smoothen_fractional_octave(window_size=1 / 6,
                                       treble_window_size=1 / 6)
        return _fr.smoothed.copy()
Exemple #3
0
    def parse_json(json_data):
        header = json_data['header']
        data = np.array(json_data['data'])
        frequency = data[:, header.index('Frequency')]
        target = data[:, header.index('Target Response')]
        left_col = header.index('Left')
        right_col = header.index('Right')

        for row in data:
            if row[left_col] == np.array(None):
                row[left_col] = row[len(header) - header[::-1].index('Left') -
                                    1]
            if row[right_col] == np.array(None):
                row[right_col] = row[len(header) -
                                     header[::-1].index('Right') - 1]

        left = data[:, left_col]
        right = data[:, right_col]

        raw = np.mean([left, right], axis=0)
        if np.std(target) > 0:
            raw += target
        fr = FrequencyResponse(name='fr', frequency=frequency, raw=raw)
        target = FrequencyResponse(name='target',
                                   frequency=frequency,
                                   raw=target)
        return fr, target
Exemple #4
0
def main():
    serious = FrequencyResponse.read_from_csv(
        'resources/rtings_compensation_sbaf-serious.csv')
    native = FrequencyResponse.read_from_csv(
        'resources/rtings_compensation.csv')
    avg = measurements_avg()
    hd650 = FrequencyResponse.read_from_csv(
        'data/onear/Sennheiser HD 650/Sennheiser HD 650.csv')

    fig, ax = native.plot_graph(show=False, color=None)
    serious.plot_graph(fig=fig, ax=ax, show=False, color=None)
    avg.plot_graph(fig=fig, ax=ax, show=False, color=None)
    hd650.plot_graph(fig=fig, ax=ax, show=False, color=None)

    plt.legend([
        'Rtings native', 'SBAF Serious', 'Measurements Avg',
        'Sennheiser HD 650'
    ])
    plt.title('Rtings Targets')
    plt.ylim([-10.0, 12.0])
    plt.show()
    fig.savefig('resources/rtings_targets_comparison.png', dpi=120)

    avg.raw[avg.frequency < 2500] = native.raw[native.frequency < 2500]
    avg.write_to_csv('resources/rtings_compensation_avg.csv')
    avg.plot_graph(show=False,
                   file_path='resources/rtings_compensation_avg.png',
                   color=None)
def main():
    harman = FrequencyResponse.read_from_csv(
        os.path.join(ROOT_DIR, 'compensation', 'harman_in-ear_2019v2.csv'))
    harman.name = 'oratory1990 vs Harman in-ear 2019'
    oratory1990 = FrequencyResponse.read_from_csv(
        os.path.join(ROOT_DIR, 'compensation', 'usound.csv'))
    harman.compensate(oratory1990)
    harman.smoothen_fractional_octave(window_size=1 / 6,
                                      treble_window_size=1 / 6)
    harman.equalize()
    filters, _, _ = harman.optimize_parametric_eq(max_filters=2, fs=48000)
    filters_formatted = []
    for i in range(len(filters)):
        filters_formatted.append(['Peaking'] +
                                 [f'{x:.2f}' for x in filters[i]])
    filters_table_str = tabulate(filters_formatted,
                                 headers=['Type', 'Fc', 'Q', 'Gain'],
                                 tablefmt='orgtbl').replace('+', '|').replace(
                                     '|-', '|:')
    print(filters_table_str)
    fig, ax = harman.plot_graph(show=False)
    ax.legend([
        'oratory1990', 'Harman in-ear 2019 1/6 oct smoothed',
        'Difference 1/6 oct smoothed', 'Harman ine-ear 2019',
        'Difference 1/6 oct smoothed', 'Parametric EQ with 2 filters',
        'Equalization target', 'Equalized'
    ])
    plt.show()
Exemple #6
0
def write_ranking_table():
    harman_inear = os.path.join(ROOT_DIR, 'compensation', 'harman_in-ear_2019v2.csv')
    harman_inear = FrequencyResponse.read_from_csv(harman_inear)
    harman_overear = os.path.join(ROOT_DIR, 'compensation', 'harman_over-ear_2018.csv')
    harman_overear = FrequencyResponse.read_from_csv(harman_overear)

    onear_rows = []
    # Over-ear
    files = dict()
    for fp in glob(os.path.join(ROOT_DIR, 'results', 'crinacle', 'gras_43ag-7_harman_over-ear_2018', '*', '*.csv')):
        files[os.path.split(fp)[1]] = fp
    for fp in glob(os.path.join(ROOT_DIR, 'results', 'oratory1990', 'harman_over-ear_2018', '*', '*.csv')):
        files[os.path.split(fp)[1]] = fp
    for fp in files.values():
        row = ranking_row(fp, harman_overear, 'onear')
        if row:
            onear_rows.append(row)
    onear_rows = sorted(onear_rows, key=lambda row: float(row[1]), reverse=True)
    onear_str = tabulate(onear_rows, headers=['Name', 'Score', 'STD (dB)', 'Slope'], tablefmt='orgtbl')
    onear_str = onear_str.replace('+', '|').replace('|-', '|:')

    inear_rows = []
    # In-ear
    files = dict()
    for fp in glob(os.path.join(ROOT_DIR, 'results', 'crinacle', 'harman_in-ear_2019v2', '*', '*.csv')):
        files[os.path.split(fp)[1]] = fp
    for fp in glob(os.path.join(ROOT_DIR, 'results', 'oratory1990', 'harman_in-ear_2019v2', '*', '*.csv')):
        files[os.path.split(fp)[1]] = fp
    for fp in files.values():
        row = ranking_row(fp, harman_inear, 'inear')
        if row:
            inear_rows.append(row)
    inear_str = sorted(inear_rows, key=lambda row: float(row[1]), reverse=True)
    inear_str = tabulate(inear_str, headers=['Name', 'Score', 'STD (dB)', 'Slope', 'Average (dB)'], tablefmt='orgtbl')
    inear_str = inear_str.replace('-+-', '-|-').replace('|-', '|:')

    s = f'''# Headphone Ranking
    Headphones ranked by Harman headphone listener preference scores.

    Tables include the preference score (Score), standard deviation of the error (STD), slope of the logarithimc
    regression fit of the error (Slope) for both headphone types and average of the absolute error (Average) for in-ear
    headphones. STD tells how much the headphone deviates from neutral and slope tells if the headphone is warm (< 0) or
    bright (> 0).

    Keep in mind that these numbers are calculated with deviations from Harman targets. The linked results use different
    levels of bass boost so the slope numbers here won't match the error curves you see in the linked results.

    Over-ear table includes headphones measured by oratory1990 and Crinacle using GRAS systems. Measurements from
    other databases and systems are not included because they are not compatible with measurements, targets and
    preference scoring developed by Sean Olive et al.
    
    ## Over-ear Headphones    
    {onear_str}

    ## In-ear Headphones
    {inear_str}

    '''
    with open(os.path.join(ROOT_DIR, 'results', 'RANKING.md'), 'w', encoding='utf-8') as fh:
        fh.write(re.sub('\n[ \t]+', '\n', s).strip())
    def parse_image(im, model):
        """Parses graph image downloaded from innerfidelity.com"""
        # Crop by left and right edges
        box = (69, 31, 550, 290)
        im = im.crop(box)

        px_a_max = 0
        px_a_min = im.size[1]
        # im.show()

        # X axis
        f_min = 20
        f_max = 20000
        f_step = (f_max / f_min)**(1 / im.size[0])
        f = [f_min]
        for _ in range(1, im.size[0]):
            f.append(f[-1] * f_step)

        # Y axis
        a_max = 150
        a_min = 66
        a_res = (a_max - a_min) / (px_a_min - px_a_max)

        # Try blue curve
        _im = im.copy()
        inspection = _im.load()
        amplitude, _im, _inspection = ReferenceAudioAnalyzerCrawler.find_curve(
            _im, inspection, 203, 206, 0.8, 1.0, a_max, a_res)
        if len([x for x in amplitude if x is None]) >= 0.5 * len(amplitude):
            # More than half of the pixels were discarded, try green curve
            _im = im.copy()
            inspection = _im.load()
            amplitude, _im, _inspection = ReferenceAudioAnalyzerCrawler.find_curve(
                _im, inspection, 119, 121, 0.8, 1.0, a_max, a_res)

        # Inspection image
        draw = ImageDraw.Draw(_im)
        x0 = np.log(30 / f_min) / np.log(f_step)
        x1 = np.log(10000 / f_min) / np.log(f_step)
        y_0 = px_a_max + 12 / a_res
        y_1 = px_a_min - 12 / a_res
        draw.rectangle(((x0, y_0), (x1, y_1)), outline='magenta')
        draw.rectangle(((x0 + 1, y_0 + 1), (x1 - 1, y_1 - 1)),
                       outline='magenta')

        # Create frequency response
        fr = FrequencyResponse(model, f, amplitude)
        fr.interpolate()
        if len(fr.frequency) < 2:
            im.show()
            raise ValueError(f'Failed to parse image for {fr.name}')
        fr.smoothen_fractional_octave(window_size=1 / 3,
                                      treble_window_size=1 / 3)
        fr.raw = fr.smoothed.copy()
        fr.smoothed = np.array([])
        fr.center()

        return fr, _im
Exemple #8
0
def main():
    fr = FrequencyResponse.read_from_csv(
        'my_data/HiFiMAN HE400S/HiFiMAN HE400S.csv')
    fr.compensate(FrequencyResponse.read_from_csv(
        'innerfidelity/resources/innerfidelity_compensation_sbaf-serious.csv'),
                  bass_boost=4.0,
                  bass_boost_f_lower=35,
                  bass_boost_f_upper=280)
    fr.equalize()
    ir = fr.minimum_phase_impulse_response(fs=48000, filter_length=2**14)
Exemple #9
0
def pop_frequency_distribution():
    # Approximation of pop music frequency distribution
    fr = FrequencyResponse(name='Pop Music Frequency Distribution',
                           frequency=[
                               20, 100, 100 * math.sqrt(10), 1000,
                               1000 * math.sqrt(10), 10000, 20000
                           ],
                           raw=[-30, 0, -3, -6, -10, -20, -50])
    fr.raw += 80
    fr.interpolate(pol_order=3, f_min=20, f_max=12500)
    #fr.center()
    return fr
Exemple #10
0
def main():
    compensation = FrequencyResponse.read_from_csv(
        os.path.join('resources', 'innerfidelity_compensation_2016.csv'))
    compensation.center()
    for file in glob(os.path.join('data', 'adhoc', '**', '*.csv'),
                     recursive=True):
        fr = FrequencyResponse.read_from_csv(file)
        print(fr.name)
        fr.interpolate()
        fr.center()
        fr.raw += compensation.raw
        fr.write_to_csv(file)
Exemple #11
0
def parse_json(json_data, name):
    header = json_data['header']
    data = np.array(json_data['data'])
    frequency = data[:, header.index('Frequency')]
    left = data[:, header.index('Left')]
    right = data[:, header.index('Right')]
    target = data[:, header.index('Target Response')]
    raw = np.mean([left, right], axis=0)
    if np.std(target) > 0:
        raw += target
    fr = FrequencyResponse(name=name, frequency=frequency, raw=raw)
    target = FrequencyResponse(name='', frequency=frequency, raw=target)
    return fr, target
Exemple #12
0
def main():
    models = {}
    for file_path in glob(os.path.join(DIR, '*')):
        model = os.path.split(file_path)[-1]
        if not (re.search(' sample [a-zA-Z0-9]$', model)
                or re.search(' sn[a-zA-Z0-9]+$', model)):
            # Skip measurements with sample or serial number, those have averaged results
            continue
        norm = re.sub(' sample [a-zA-Z0-9]$', '', model)
        norm = re.sub(' sn[a-zA-Z0-9]+$', '', norm)
        try:
            models[norm].append(model)
        except KeyError as err:
            models[norm] = [model]

    for norm, origs in models.items():
        if len(origs) > 1:
            print(norm, origs)
            avg = np.zeros(613)
            f = FrequencyResponse.generate_frequencies()
            for model in origs:
                fr = FrequencyResponse.read_from_csv(
                    os.path.join(DIR, model, model + '.csv'))
                fr.interpolate()
                fr.center()
                avg += fr.raw
            avg /= len(origs)
            fr = FrequencyResponse(name=norm, frequency=f, raw=avg)
            d = os.path.join(OUT_DIR, norm)
            if not os.path.isdir(d):
                os.makedirs(d)
            fr.write_to_csv(os.path.join(d, norm + '.csv'))
            fr.plot_graph()
Exemple #13
0
def main():
    for file in glob('oratory1990/data/*/*/*.csv'):
        fr = FrequencyResponse.read_from_csv(file)
        fr.interpolate()
        fr.center()
        fr.write_to_csv(file)
        print(fr.name)
Exemple #14
0
def peq2fr(fc, q, gain, filts):
    if type(fc) in [float, int]:
        fc = np.array([fc])
    if type(q) in [float, int]:
        q = np.array([q])
    if type(gain) in [float, int]:
        gain = np.array([gain])
    if type(filts) == str:
        filts = [filts] * len(fc)
    fr = FrequencyResponse(name='PEG')
    c = np.zeros(fr.frequency.shape)
    for i, filt in enumerate(filts):
        a0, a1, a2, b0, b1, b2 = fns[filt](fc[i], q[i], gain[i], fs=fs)
        c += digital_coeffs(fr.frequency, fs, a0, a1, a2, b0, b1, b2)
    fr.raw = c
    return fr
Exemple #15
0
def main():
    with open('results/README.md', 'r') as f:
        lines = f.read().split('\n')[1:]

    models = []
    rmses = []

    for line in lines:
        # Parse file path
        model = re.search('\[.+\]', line)[0][1:-1]
        url = re.search('\]\(.+\)', line)[0][2:-1]
        path = os.path.abspath(url[52:].replace('%20', ' '))
        specific_model = os.path.split(path)[-1]
        path = os.path.join(path, specific_model + '.csv')
        # Record model and RMSE of frequency response
        fr = FrequencyResponse.read_from_csv(path)
        models.append(model)
        rmses.append(np.sqrt(np.mean(np.square(fr.error))))

    # Sort models by RMSE
    models = np.array(models)
    rmses = np.array(rmses)
    sorted_inds = np.argsort(rmses)
    rmses = rmses[sorted_inds]
    models = models[sorted_inds]

    data = np.transpose(np.vstack((models, rmses)))
    print(data.shape)
    s = '# Hall of Fame\nHeadphones with smallest deviation (RMSE) from ideal frequency target.\n'
    for row in data:
        s += '- {model}: {rmse:.2f}dB\n'.format(model=row[0], rmse=float(row[1]))
    with open('Hall of Fame.md', 'w') as f:
        f.write(s)
Exemple #16
0
def main():
    fs = 48000
    f_res = 5
    input_dir = os.path.join('oratory1990', 'data', 'onear')
    glob_files = glob(os.path.join(input_dir, '**', '*.csv'), recursive=True)
    for input_file_path in glob_files:
        fr = FrequencyResponse.read_from_csv(input_file_path)
        fr.equalization = fr.raw
        fr.raw = np.array([])

        mp = fr.minimum_phase_impulse_response(fs=fs, f_res=f_res)
        f_mp, mp = fft(mp, fs)

        lp = fr.linear_phase_impulse_response(fs=fs, f_res=f_res)
        f_lp, lp = fft(lp, fs)

        plt.plot(f_lp, lp)
        plt.plot(f_mp, mp)
        plt.legend(['Linear phase', 'Minimum phase'])
        plt.semilogx()
        plt.xlabel('Frequency (Hz)')
        plt.xlim([20, 20000])
        plt.ylabel('Gain (dBr)')
        plt.ylim([-40, 0])
        plt.title(fr.name)
        plt.show()
Exemple #17
0
def ranking_row(file_path, target, form='onear'):
    dir_path = os.path.abspath(os.path.join(file_path, os.pardir))
    rel_path = os.path.relpath(dir_path, os.path.join(ROOT_DIR, 'results'))
    url = form_url(rel_path)
    fr = FrequencyResponse.read_from_csv(file_path)
    if re.search(MOD_REGEX, fr.name):
        return None
    fr.interpolate()
    fr.compensate(target, bass_boost_gain=0.0
                  )  # Pre-computed results are with Harman target without bass
    if form == 'onear':
        score, std, slope = fr.harman_onear_preference_score()
        return [
            f'[{fr.name}]({url})', f'{score:.0f}', f'{std:.2f}', f'{slope:.2f}'
        ]
    elif form == 'inear':
        score, std, slope, mean = fr.harman_inear_preference_score()
        return [
            f'[{fr.name}]({url})', f'{score:.0f}', f'{std:.2f}',
            f'{slope:.2f}', f'{mean:.2f}'
        ]
    if '|' in f'[{fr.name}]({url})':
        print(file_path)
        print(fr.name)
        print(f'[{fr.name}]({url})')
Exemple #18
0
    def parse_cropped(im,
                      name='fr',
                      f_min=20,
                      f_max=20000,
                      a_min=-20,
                      a_max=20):
        """Parses an image which has been cropped tightly to given boundaries. Image left boundary must be cropped
        to f_min, right boundary to f_max, bottom boundary to a_min and top boundary to a_max. Only colored pixels will
        be scanned.

        Args:
            im: Image
            name: Name of the image / produced FrequencyResponse
            f_min: Frequency at left boundary of the image
            f_max: Frequency at right boundary of the image
            a_min: Amplitude at bottom boundary of the image
            a_max: Amplitude at top boundary of the image

        Returns:
            FrequencyResponse created from colored pixels in the image
        """
        # X axis (frequencies)
        f_step = (f_max / f_min)**(1 / im.size[0])
        f = [f_min]
        for _ in range(1, im.size[0]):
            f.append(f[-1] * f_step)

        # Y axis (amplitude)
        a_res = (a_max - a_min) / im.size[1]  # dB / px

        _im = im.copy()
        pix = _im.load()
        amplitude = []
        for x in range(im.size[0]):
            pxs = []  # Graph pixels
            # Iterate each row (pixel in column)
            for y in range(im.size[1]):
                # Convert read RGB pixel values and convert to HSV
                h, s, v = colorsys.rgb_to_hsv(
                    *[v / 255.0 for v in im.getpixel((x, y))])
                # Graph pixels are colored
                if s > 0.8:
                    pxs.append(float(y))
                else:
                    p = im.getpixel((x, y))
                    pix[x, y] = (int(0.9 * p[0]), int(255 * 0.1 + 0.9 * p[1]),
                                 int(0 + 0.9 * p[2]))
            if not pxs:
                # No graph pixels found on this column
                amplitude.append(None)
            else:
                # Mean of recorded pixels
                v = np.mean(pxs)
                # Convert to dB value
                v = a_max - v * a_res
                amplitude.append(v)
        return FrequencyResponse(name=name, frequency=f, raw=amplitude)
Exemple #19
0
def main():
    paths = list(
        glob(os.path.join(ROOT_DIR, 'measurements', '**', '*.csv'),
             recursive=True))
    paths += list(
        glob(os.path.join(ROOT_DIR, 'compensation', '*.csv'), recursive=True))
    for file_path in paths:
        fr = FrequencyResponse.read_from_csv(file_path)
        fr.interpolate()
        fr.write_to_csv(file_path)
    def parse_json(json_data):
        """Parses Rtings.com JSON data

        The columns should be "Frequency", "Left", "Right", "Target Response", "Left", "Right". Some rows might have
        data in the first Left and Right, some might have it in the second left and right

        Args:
            json_data: JSON data object as returned by Rtings API

        Returns:
            - Parsed raw frequency response as FrequencyResponse
            - Parsed target response as FrequencyResponse
        """
        header = json_data['header']
        data = np.array(json_data['data'])
        frequency = data[:, header.index('Frequency')]
        target = data[:, header.index('Target Response')]
        left_col = header.index('Left')
        right_col = header.index('Right')

        for row in data:
            if row[left_col] == np.array(None):
                # Data missing at the first "Left" column, use the last "Left" column
                row[left_col] = row[len(header) - header[::-1].index('Left') -
                                    1]
            if row[right_col] == np.array(None):
                # Data missing at the first "Right" column, use the last "Right" column
                row[right_col] = row[len(header) -
                                     header[::-1].index('Right') - 1]

        left = data[:, left_col]
        right = data[:, right_col]

        raw = np.mean([left, right], axis=0)
        if np.std(target) > 0:
            raw += target
        fr = FrequencyResponse(name='fr', frequency=frequency, raw=raw)
        target = FrequencyResponse(name='target',
                                   frequency=frequency,
                                   raw=target)
        return fr, target
Exemple #21
0
def average_measurements(input_dir=None, output_dir=None):
    if input_dir is None:
        raise TypeError('Input directory path is required!')
    if output_dir is None:
        output_dir = os.path.abspath(input_dir)
    input_dir = os.path.abspath(input_dir)
    output_dir = os.path.abspath(output_dir)

    models = {}
    for file_path in glob(os.path.join(input_dir, '*')):
        model = os.path.split(file_path)[-1]
        if not re.search(MOD_REGEX, model, re.IGNORECASE):
            continue
        norm = re.sub(MOD_REGEX, '', model, 0, flags=re.IGNORECASE)
        try:
            models[norm].append(model)
        except KeyError as err:
            models[norm] = [model]

    for norm, origs in models.items():
        if len(origs) > 1:
            f = FrequencyResponse.generate_frequencies()
            avg = np.zeros(len(f))
            for model in origs:
                fr = FrequencyResponse.read_from_csv(os.path.join(input_dir, model, model + '.csv'))
                fr.interpolate()
                fr.center()
                avg += fr.raw
            avg /= len(origs)
            fr = FrequencyResponse(name=norm, frequency=f, raw=avg)
            d = os.path.join(output_dir, norm)
            os.makedirs(d, exist_ok=True)
            file_path = os.path.join(d, norm + '.csv')
            fr.write_to_csv(file_path)
Exemple #22
0
def main():
    if_compensation = os.path.join(ROOT_DIR, 'innerfidelity', 'resources', 'innerfidelity_compensation_sbaf-serious.csv')
    hp_compensation = os.path.join(ROOT_DIR, 'headphonecom', 'resources', 'headphonecom_compensation_sbaf-serious.csv')

    # Innerfidelity on-ear SBAF-Serious
    FrequencyResponse.main(
        input_dir=os.path.join(ROOT_DIR, 'innerfidelity', 'data', 'onear'),
        output_dir=os.path.join(ROOT_DIR, 'results', 'innerfidelity', 'sbaf-serious'),
        compensation=if_compensation,
        equalize=True,
        parametric_eq=True,
        max_filters=[5, 5],
        bass_boost=4.0
    )

    # Innerfidelity in-ear SBAF-Serious
    FrequencyResponse.main(
        input_dir=os.path.join(ROOT_DIR, 'innerfidelity', 'data', 'inear'),
        output_dir=os.path.join(ROOT_DIR, 'results', 'innerfidelity', 'sbaf-serious'),
        compensation=if_compensation,
        equalize=True,
        parametric_eq=True,
        max_filters=[5, 5],
        iem_bass_boost=6.0
    )

    # Innerfidelity earbud SBAF-Serious
    FrequencyResponse.main(
        input_dir=os.path.join(ROOT_DIR, 'innerfidelity', 'data', 'earbud'),
        output_dir=os.path.join(ROOT_DIR, 'results', 'innerfidelity', 'sbaf-serious'),
        compensation=if_compensation,
        equalize=True,
        parametric_eq=True,
        max_filters=[5, 5],
    )
Exemple #23
0
    def parse_headphonecom(im, model, scale=40):
        """Parses graph image downloaded from headphone.com"""
        # Crop out everything but graph area
        px_top = 24  # Pixels from top to +30dB
        px_bottom = 125  # Pixels from bottom to -30dB
        px_left = 51  # Pixels from left to 10Hz
        px_right = 50  # Pixels from right edge
        box = (px_left, px_top, im.size[0] - px_right, im.size[1] - px_bottom)
        im = im.crop(box)

        # X axis
        f_min = 10
        f_max = 20000
        px_f_max = 71
        f_step = (f_max / f_min)**(1 / (im.size[0] - (px_f_max - px_right)))
        f = [f_min]
        for _ in range(1, im.size[0]):
            f.append(f[-1] * f_step)

        # Y axis
        a_max = scale
        a_min = -scale
        a_res = (a_max - a_min) / im.size[1]  # dB / px

        amplitude = []
        # Iterate each column
        for x in range(im.size[0]):
            pxs = []  # Graph pixels
            # Iterate each row (pixel in column)
            for y in range(im.size[1]):
                # Convert read RGB pixel values and convert to HSV
                h, l, s = colorsys.rgb_to_hls(
                    *[v / 255.0 for v in im.getpixel((x, y))])
                # Scale hue to 0-255
                h *= 255
                # Graph pixels are blue
                if s > 0.5 and 140 < h < 160:
                    pxs.append(float(y))
            if not pxs:
                # No graph pixels found on this column
                amplitude.append(None)
            else:
                # Mean of recorded pixels
                v = sum(pxs) / len(pxs)
                # Convert to dB value
                v = a_max - v * a_res
                amplitude.append(v)

        fr = FrequencyResponse(model, f, amplitude)
        return fr
Exemple #24
0
def main():
    ht18 = FrequencyResponse.read_from_csv(
        'compensation/harman_over-ear_2018_wo_bass.csv')
    df = FrequencyResponse.read_from_csv('compensation/diffuse_field.csv')
    df.raw += df._tilt(-1)
    eht = ht18.copy()
    eht.raw -= df.raw

    ifss = FrequencyResponse.read_from_csv(
        'innerfidelity/resources/innerfidelity_compensation_sbaf-serious.csv')
    #ifdf = FrequencyResponse.read_from_csv('innerfidelity/resources/innerfidelity_compensation_2016.csv')
    ifdf = FrequencyResponse.read_from_csv(
        'headphonecom/resources/headphonecom_compensation.csv')
    ifdf.raw += df._tilt(-1)
    #ifdf.raw -= 2.5
    eif = ifss.copy()
    eif.raw -= ifdf.raw

    fig, axs = plt.subplots(1, 2)
    fig.set_size_inches(18, 9)

    ht18.plot_graph(fig=fig, ax=axs[0], show=False, color='blue')
    df.plot_graph(fig=fig, ax=axs[0], show=False, color='orange')
    eht.plot_graph(fig=fig, ax=axs[0], show=False, color='red')
    axs[0].legend(
        ['Harman target 2018', 'Diffuse field (-1 dB/oct)', 'Difference'])
    axs[0].set_title('Harman target vs Diffuse field')

    ifss.plot_graph(fig=fig, ax=axs[1], show=False, color='blue')
    ifdf.plot_graph(fig=fig, ax=axs[1], show=False, color='orange')
    eif.plot_graph(fig=fig, ax=axs[1], show=False, color='red')
    #axs[1].legend(['SBAF-Serious', 'Innerfidelity diffuse field (-1 dB/oct)', 'Difference'])
    axs[1].legend(
        ['SBAF-Serious', 'Headphonecom target (-1 dB/oct)', 'Difference'])
    axs[1].set_title('Innerfidelity SBAF-Serious vs Headphonecom')

    plt.show()
Exemple #25
0
def main():
    fs = 48000
    f_res = 60
    input_dir = os.path.join('oratory1990', 'data', 'onear')
    glob_files = glob(os.path.join(input_dir, '**', '*.csv'), recursive=True)
    for input_file_path in glob_files:
        fr = FrequencyResponse.read_from_csv(input_file_path)
        fr.equalization = fr.raw
        fr.raw = np.array([])

        mp = fr.minimum_phase_impulse_response(fs=fs, f_res=f_res, normalize=False)
        mp = np.concatenate([mp, np.zeros(fs//10 - len(mp))])
        f_mp, mp = fft(mp, fs)
        f_mp[0] = 0.1
        mp = FrequencyResponse(name='Minimum phase', frequency=f_mp, raw=mp)
        mp.center()

        lp = fr.linear_phase_impulse_response(fs=fs, f_res=f_res, normalize=False)
        lp = np.concatenate([lp, np.zeros(fs//10 - len(lp))])
        f_lp, lp = fft(lp, fs)
        f_lp[0] = 0.1
        lp = FrequencyResponse(name='Linear phase', frequency=f_lp, raw=lp)
        lp.center()

        fig, ax = plt.subplots()
        fig.set_size_inches(15, 10)
        plt.plot(fr.frequency, fr.equalization)
        plt.plot(mp.frequency, mp.raw, '.-')
        plt.plot(lp.frequency, lp.raw, '.-')
        plt.legend(['Raw', 'Minimum phase', 'Linear phase'])
        plt.semilogx()
        plt.xlabel('Frequency (Hz)')
        plt.xlim([1, 20000])
        plt.ylabel('Gain (dBr)')
        plt.ylim([-20, 20])
        plt.title(fr.name)
        plt.grid(True, which='major')
        plt.grid(True, which='minor')
        plt.show()
Exemple #26
0
def main():
    oe18 = FrequencyResponse.read_from_csv(
        'compensation/harman_over-ear_2018.csv')
    oe18_nobass = FrequencyResponse.read_from_csv(
        'compensation/harman_over-ear_2018_wo_bass.csv')
    oe18_bass = oe18_nobass.copy()
    oe18_bass.raw += digital_coeffs(oe18_bass.frequency, 48000,
                                    *low_shelf(100, 0.65, 6, 48000))

    iem19 = FrequencyResponse.read_from_csv(
        'compensation/harman_in-ear_2019v2.csv')
    iem19_nobass = FrequencyResponse.read_from_csv(
        'compensation/harman_in-ear_2019v2_wo_bass.csv')
    iem19_bass = iem19_nobass.copy()
    iem19_bass.raw += digital_coeffs(iem19_bass.frequency, 48000,
                                     *low_shelf(100, 0.65, 9.5, 48000))

    usound = FrequencyResponse.read_from_csv('compensation/usound.csv')
    usound_nobass = FrequencyResponse.read_from_csv(
        'compensation/usound_wo_bass.csv')
    usound_bass = usound_nobass.copy()
    usound_bass.raw += digital_coeffs(usound_bass.frequency, 48000,
                                      *low_shelf(150, 0.65, 9.4, 48000))

    fig, ax = oe18.plot_graph(show=False, color='C0')
    oe18_nobass.plot_graph(show=False, color='C1', fig=fig, ax=ax)
    oe18_bass.plot_graph(show=False, color='C2', fig=fig, ax=ax)
    ax.legend(['Original', 'Without bass', 'Shelf bass'])

    fig, ax = iem19.plot_graph(show=False, color='C0')
    iem19_nobass.plot_graph(show=False, color='C1', fig=fig, ax=ax)
    iem19_bass.plot_graph(show=False, color='C2', fig=fig, ax=ax)
    ax.legend(['Original', 'Without bass', 'Shelf bass'])

    fig, ax = usound.plot_graph(show=False, color='C0')
    usound_nobass.plot_graph(show=False, color='C1', fig=fig, ax=ax)
    usound_bass.plot_graph(show=False, color='C2', fig=fig, ax=ax)
    ax.legend(['Original', 'Without bass', 'Shelf bass'])

    plt.show()
Exemple #27
0
def main(input_dir=None, output_dir=None):
    if input_dir is None:
        raise TypeError('Input directory path is required!')
    if output_dir is None:
        raise TypeError('Output directory path is required!')
    input_dir = os.path.abspath(input_dir)
    output_dir = os.path.abspath(output_dir)

    models = {}
    for file_path in glob(os.path.join(input_dir, '*')):
        model = os.path.split(file_path)[-1]
        if not (re.search(' sample [a-zA-Z0-9]$', model, re.IGNORECASE)
                or re.search(' sn[a-zA-Z0-9]+$', model, re.IGNORECASE)):
            continue
        norm = re.sub(' sample [a-zA-Z0-9]$', '', model, 0, re.IGNORECASE)
        norm = re.sub(' sn[a-zA-Z0-9]+$', '', norm, 0, re.IGNORECASE)
        try:
            models[norm].append(model)
        except KeyError as err:
            models[norm] = [model]

    for norm, origs in models.items():
        if len(origs) > 1:
            print(norm, origs)
            avg = np.zeros(613)
            f = FrequencyResponse.generate_frequencies()
            for model in origs:
                fr = FrequencyResponse.read_from_csv(
                    os.path.join(input_dir, model, model + '.csv'))
                fr.interpolate()
                fr.center()
                avg += fr.raw
            avg /= len(origs)
            fr = FrequencyResponse(name=norm, frequency=f, raw=avg)
            d = os.path.join(output_dir, norm)
            if not os.path.isdir(d):
                os.makedirs(d)
            fr.write_to_csv(os.path.join(d, norm + '.csv'))
Exemple #28
0
def equal_loudness_contour(phon):
    f = [
        20, 25, 31.5, 40, 50, 63, 80, 100, 125, 160, 200, 250, 315, 400, 500,
        630, 800, 1000, 1250, 1600, 2000, 2500, 3150, 4000, 5000, 6300, 8000,
        10000, 12500
    ]
    af = [
        0.532, 0.506, 0.480, 0.455, 0.432, 0.409, 0.387, 0.367, 0.349, 0.330,
        0.315, 0.301, 0.288, 0.276, 0.267, 0.259, 0.253, 0.250, 0.246, 0.244,
        0.243, 0.243, 0.243, 0.242, 0.242, 0.245, 0.254, 0.271, 0.301
    ]
    Lu = [
        -31.6, -27.2, -23.0, -19.1, -15.9, -13.0, -10.3, -8.1, -6.2, -4.5,
        -3.1, -2.0, -1.1, -0.4, 0.0, 0.3, 0.5, 0.0, -2.7, -4.1, -1.0, 1.7, 2.5,
        1.2, -2.1, -7.1, -11.2, -10.7, -3.1
    ]
    Tf = [
        78.5, 68.7, 59.5, 51.1, 44.0, 37.5, 31.5, 26.5, 22.1, 17.9, 14.4, 11.4,
        8.6, 6.2, 4.4, 3.0, 2.2, 2.4, 3.5, 1.7, -1.3, -4.2, -6.0, -5.4, -1.5,
        6.0, 12.6, 13.9, 12.3
    ]

    Ln = phon
    Lps = []
    for i in range(0, len(f)):
        Af = 0.00447 * (10.0**(Ln / 40.0) - 1.15) + (10.0**((
            (Tf[i] + Lu[i]) / 10.0) - 9.0) / 2.5)**af[i]
        Lp = ((10.0 / af[i]) * math.log10(Af)) - Lu[i] + 94.0
        Lps.append(Lp)
    Lps = np.array(Lps)

    fr = FrequencyResponse(name='{} phon Equal Loudness Contour'.format(phon),
                           frequency=f,
                           raw=Lps)
    fr.interpolate(pol_order=3, f_min=20, f_max=12500)
    fr.center()
    return fr
Exemple #29
0
    def process(self, item, file_paths, target_dir=None):
        if item.form == 'ignore':
            return

        if target_dir is None:
            raise TypeError('"target_dir" must be given')

        avg_fr = FrequencyResponse(name=item.true_name)
        avg_fr.raw = np.zeros(avg_fr.frequency.shape)
        for fp in file_paths:
            with open(fp, 'r', encoding='utf-8') as fh:
                s = fh.read()

            freq = []
            raw = []
            for line in s.split('\n'):
                if len(line) == 0 or line[0] == '*':
                    # Skip empty lines and comments
                    if 'C-weighting compensation: On' in line:
                        print(f'C-weighted measurement: {item.false_name}')
                    continue

                frp = line.split(', ')
                if len(frp) == 1:
                    frp = line.split('\t')
                if len(frp) == 1:
                    frp = line.split(' ')
                if len(frp) == 2:
                    f, r = frp
                elif len(frp) == 3:
                    f, r, p = frp
                else:
                    # Must be comment line
                    continue

                if f == '?' or r == '?':
                    # Skip lines with missing data
                    continue

                try:
                    freq.append(float(f))
                    raw.append(float(r))
                except ValueError as err:
                    # Failed to convert values to floats, must be header or comment row, skip
                    continue

            # Create standard fr object
            fr = FrequencyResponse(name=item.true_name, frequency=freq, raw=raw)
            fr.interpolate()
            fr.center()
            avg_fr.raw += fr.raw

        avg_fr.raw /= len(file_paths)

        # Save
        dir_path = os.path.join(target_dir, avg_fr.name)
        os.makedirs(dir_path, exist_ok=True)
        file_path = os.path.join(dir_path, f'{avg_fr.name}.csv')
        avg_fr.write_to_csv(file_path)
        print(f'Saved "{avg_fr.name}" to "{file_path}"')
Exemple #30
0
def main():
    fig, ax = plt.subplots()
    diffs = []
    # Calculate differences for all models
    for file in glob(os.path.join('compensation', 'compensated', '**',
                                  '*.csv'),
                     recursive=True):
        file = os.path.abspath(file)
        comp = FrequencyResponse.read_from_csv(file)
        comp.interpolate()
        comp.center()
        raw_data_path = file.replace('compensated', 'raw')
        raw = FrequencyResponse.read_from_csv(raw_data_path)
        raw.interpolate()
        raw.center()
        diff = FrequencyResponse(name=comp.name,
                                 frequency=comp.frequency,
                                 raw=raw.raw - comp.raw)
        plt.plot(diff.frequency, diff.raw)
        diffs.append(diff.raw)

    # Average and smoothen difference
    f = FrequencyResponse.generate_frequencies()
    diffs = np.vstack(diffs)
    diff = np.mean(diffs, axis=0)
    diff = FrequencyResponse(name='Headphone.com Compensation',
                             frequency=f,
                             raw=diff)
    diff.smoothen_fractional_octave(window_size=1 / 9, iterations=10)
    diff.raw = diff.smoothed
    diff.smoothed = np.array([])

    plt.xlabel('Frequency (Hz)')
    plt.semilogx()
    plt.xlim([20, 20000])
    plt.ylabel('Amplitude (dBr)')
    plt.ylim([-15, 15])
    plt.grid(which='major')
    plt.grid(which='minor')
    plt.title('Headphone.com Compensation Function')
    ax.xaxis.set_major_formatter(ticker.StrMethodFormatter('{x:.0f}'))
    plt.show()

    diff.write_to_csv('headphonecom_compensation.csv')
    diff.plot_graph(show=True,
                    f_min=10,
                    f_max=20000,
                    file_path='headphonecom_compensation.png')