def process(self, item, file_paths): fr = FrequencyResponse(name=item.true_name) fr.raw = np.zeros(fr.frequency.shape) for fp in file_paths: if re.search(r'\.mdat$', fp): # Read mdat file for Gras headphone measurements raise TypeError( 'Crinacle\'s Gras measurements are not supported yet!') else: # Read text file for IEM and Ears-711 headphone measurements with open(fp, 'r', encoding='utf-8') as fh: s = fh.read() freq = [] raw = [] for line in s.split('\n'): if len(line) == 0 or line[0] == '*': # Skip empty lines and comments if 'C-weighting compensation: On' in line: print(f'C-weighted measurement: {item.false_name}') continue frp = line.split(' ') if len(frp) == 1: frp = line.split('\t') if len(frp) == 2: f, r = frp elif len(frp) == 3: f, r, p = frp else: # Must be comment line continue if f == '?' or r == '?': # Skip lines with missing data continue try: freq.append(float(f)) raw.append(float(r)) except ValueError as err: # Failed to convert values to floats, must be header or comment row, skip continue # Create standard fr object _fr = FrequencyResponse(name=item.true_name, frequency=freq, raw=raw) _fr.interpolate() _fr.center() fr.raw += _fr.raw fr.raw /= len(file_paths) # Save dir_path = os.path.join(DIR_PATH, 'data', item.form, fr.name) os.makedirs(dir_path, exist_ok=True) file_path = os.path.join(dir_path, f'{fr.name}.csv') fr.write_to_csv(file_path) print(f'Saved "{fr.name}" to "{file_path}"')
def process(self, item, file_paths, target_dir=None): if item.form == 'ignore': return if target_dir is None: raise TypeError('"target_dir" must be given') avg_fr = FrequencyResponse(name=item.true_name) avg_fr.raw = np.zeros(avg_fr.frequency.shape) for fp in file_paths: with open(fp, 'r', encoding='utf-8') as fh: s = fh.read() freq = [] raw = [] for line in s.split('\n'): if len(line) == 0 or line[0] == '*': # Skip empty lines and comments if 'C-weighting compensation: On' in line: print(f'C-weighted measurement: {item.false_name}') continue frp = line.split(', ') if len(frp) == 1: frp = line.split('\t') if len(frp) == 1: frp = line.split(' ') if len(frp) == 2: f, r = frp elif len(frp) == 3: f, r, p = frp else: # Must be comment line continue if f == '?' or r == '?': # Skip lines with missing data continue try: freq.append(float(f)) raw.append(float(r)) except ValueError as err: # Failed to convert values to floats, must be header or comment row, skip continue # Create standard fr object fr = FrequencyResponse(name=item.true_name, frequency=freq, raw=raw) fr.interpolate() fr.center() avg_fr.raw += fr.raw avg_fr.raw /= len(file_paths) # Save dir_path = os.path.join(target_dir, avg_fr.name) os.makedirs(dir_path, exist_ok=True) file_path = os.path.join(dir_path, f'{avg_fr.name}.csv') avg_fr.write_to_csv(file_path) print(f'Saved "{avg_fr.name}" to "{file_path}"')
def average_measurements(input_dir=None, output_dir=None): if input_dir is None: raise TypeError('Input directory path is required!') if output_dir is None: output_dir = os.path.abspath(input_dir) input_dir = os.path.abspath(input_dir) output_dir = os.path.abspath(output_dir) models = {} for file_path in glob(os.path.join(input_dir, '*')): model = os.path.split(file_path)[-1] if not re.search(MOD_REGEX, model, re.IGNORECASE): continue norm = re.sub(MOD_REGEX, '', model, 0, flags=re.IGNORECASE) try: models[norm].append(model) except KeyError as err: models[norm] = [model] for norm, origs in models.items(): if len(origs) > 1: f = FrequencyResponse.generate_frequencies() avg = np.zeros(len(f)) for model in origs: fr = FrequencyResponse.read_from_csv(os.path.join(input_dir, model, model + '.csv')) fr.interpolate() fr.center() avg += fr.raw avg /= len(origs) fr = FrequencyResponse(name=norm, frequency=f, raw=avg) d = os.path.join(output_dir, norm) os.makedirs(d, exist_ok=True) file_path = os.path.join(d, norm + '.csv') fr.write_to_csv(file_path)
def main(): models = {} for file_path in glob(os.path.join(DIR, '*')): model = os.path.split(file_path)[-1] if not (re.search(' sample [a-zA-Z0-9]$', model, re.IGNORECASE) or re.search(' sn[a-zA-Z0-9]+$', model, re.IGNORECASE)): # Skip measurements with sample or serial number, those have averaged results continue norm = re.sub(' sample [a-zA-Z0-9]$', '', model, 0, re.IGNORECASE) norm = re.sub(' sn[a-zA-Z0-9]+$', '', norm, 0, re.IGNORECASE) try: models[norm].append(model) except KeyError as err: models[norm] = [model] for norm, origs in models.items(): if len(origs) > 1: print(norm, origs) avg = np.zeros(613) f = FrequencyResponse.generate_frequencies() for model in origs: fr = FrequencyResponse.read_from_csv( os.path.join(DIR, model, model + '.csv')) fr.interpolate() fr.center() avg += fr.raw avg /= len(origs) fr = FrequencyResponse(name=norm, frequency=f, raw=avg) d = os.path.join(OUT_DIR, norm) if not os.path.isdir(d): os.makedirs(d) fr.write_to_csv(os.path.join(d, norm + '.csv'))
def main(): trans = FrequencyResponse.read_from_csv( 'resources\innerfidelity_transformation_SBAF-Serious.csv') comp16 = FrequencyResponse.read_from_csv( 'resources\innerfidelity_compensation_2016.csv') comp17 = FrequencyResponse.read_from_csv( 'resources\innerfidelity_compensation_2017.csv') trans.interpolate() trans.center() comp = FrequencyResponse(name='Serious Compensation', frequency=comp16.frequency, raw=comp16.raw + trans.raw) fig, ax = trans.plot_graph(show=False) comp16.plot_graph(fig=fig, ax=ax, show=False) comp.plot_graph(fig=fig, ax=ax, show=False) comp17.plot_graph(fig=fig, ax=ax, show=False) fig.legend(['Serious', '2016', '2016+Serious', '2017']) plt.show() comp.write_to_csv('resources\innerfidelity_compensation_SBAF-Serious.csv') comp.plot_graph( show=False, close=True, file_path='resources\innerfidelity_compensation_SBAF_Serious.png')
def main(): stock = FrequencyResponse.read_from_csv('data/HE400S stock.csv') focus = FrequencyResponse.read_from_csv('data/HE400S focus.csv') base = FrequencyResponse.read_from_csv('innerfidelity/data/HiFiMAN HE400S/HiFiMAN HE400S.csv') stock.interpolate(f_min=20, f_max=20000) stock.center() focus.interpolate(f_min=20, f_max=20000) focus.center() base.interpolate(f=stock.frequency) base.center() diff = FrequencyResponse(name='Diff', frequency=stock.frequency, raw=focus.raw-stock.raw) fr = FrequencyResponse(name='HE400S with Focus Pads', frequency=stock.frequency, raw=base.raw+diff.raw) _fr = FrequencyResponse(name='debug', frequency=stock.frequency, raw=base.raw, smoothed=diff.raw, equalization=fr.raw) _fr.plot_graph() fr.smoothen_fractional_octave(window_size=1 / 5, iterations=10, treble_window_size=1 / 2, treble_iterations=100) #stock.plot_graph() #focus.plot_graph() #diff.plot_graph() #base.plot_graph() #fr.plot_graph() os.makedirs('innerfidelity/data/HiFiMAN HE400S with Focus Pads', exist_ok=True) fr.write_to_csv(file_path='innerfidelity/data/HiFiMAN HE400S with Focus Pads/HiFiMAN HE400S with Focus Pads ORIG.csv') fr.equalize(max_gain=12, smoothen=True, window_size=1 / 5, bass_target=4) fr.write_to_csv(file_path='innerfidelity/data/HiFiMAN HE400S with Focus Pads/HiFiMAN HE400S with Focus Pads.csv') fig, ax = fr.plot_graph(show=False, file_path='innerfidelity/data/HiFiMAN HE400S with Focus Pads/HiFiMAN HE400S with Focus Pads.png') plt.close(fig) fr.write_eqapo_graphic_eq('innerfidelity/data/HiFiMAN HE400S with Focus Pads/HiFiMAN HE400S with Focus Pads EqAPO.txt')
def main(): pop = pop_frequency_distribution() elc = equal_loudness_contour(80) elc.center() elc.raw = -elc.raw loudness = FrequencyResponse('Loudness', frequency=pop.frequency, raw=pop.raw + elc.raw) #loudness.center() fig, ax = pop.plot_graph(show=False) #elc.plot_graph(fig=fig, ax=ax, show=False) loudness.plot_graph(fig=fig, ax=ax, show=False) v = FrequencyResponse(name='V', frequency=[20, 1000, 20000], raw=[10, -10, 10]) v.interpolate(pol_order=2) v.interpolate(f=loudness.frequency) v.plot_graph(fig=fig, ax=ax, show=False) v_l = FrequencyResponse(name='V Loudness', frequency=v.frequency, raw=v.raw + loudness.raw) v_l.plot_graph(fig=fig, ax=ax, show=False) a = FrequencyResponse(name='A', frequency=[20, 1000, 20000], raw=[-10, 5, -10]) a.interpolate(pol_order=2) a.interpolate(f=loudness.frequency) a.plot_graph(fig=fig, ax=ax, show=False) a_l = FrequencyResponse(name='A Loudness', frequency=a.frequency, raw=a.raw + loudness.raw) a_l.plot_graph(fig=fig, ax=ax, show=False, a_min=-20, a_max=90) plt.legend([ 'Pop Music Frequency Distribution', 'Loudness', 'V', 'V+L', 'A', 'A+L' ]) print('V: {}'.format(20 * np.log10(np.mean(np.power(v.raw / 20, 10))))) print('V loudness: {}'.format( 20 * np.log10(np.mean(np.power(v_l.raw / 20, 10))))) print('A: {}'.format(20 * np.log10(np.mean(np.power(a.raw / 20, 10))))) print('A loudness: {}'.format( 20 * np.log10(np.mean(np.power(a_l.raw / 20, 10))))) plt.show() loudness.write_to_csv('music_loudness_contour.csv') plt.close(fig)
def main(): fig, ax = plt.subplots() diffs = [] # Calculate differences for all models for file in glob(os.path.join('compensation', 'compensated', '**', '*.csv'), recursive=True): file = os.path.abspath(file) comp = FrequencyResponse.read_from_csv(file) comp.interpolate() comp.center() raw_data_path = file.replace('compensated', 'raw') raw = FrequencyResponse.read_from_csv(raw_data_path) raw.interpolate() raw.center() diff = FrequencyResponse(name=comp.name, frequency=comp.frequency, raw=raw.raw - comp.raw) plt.plot(diff.frequency, diff.raw) diffs.append(diff.raw) # Average and smoothen difference f = FrequencyResponse.generate_frequencies() diffs = np.vstack(diffs) diff = np.mean(diffs, axis=0) diff = FrequencyResponse(name='Headphone.com Compensation', frequency=f, raw=diff) diff.smoothen_fractional_octave(window_size=1 / 9, iterations=10) diff.raw = diff.smoothed diff.smoothed = np.array([]) plt.xlabel('Frequency (Hz)') plt.semilogx() plt.xlim([20, 20000]) plt.ylabel('Amplitude (dBr)') plt.ylim([-15, 15]) plt.grid(which='major') plt.grid(which='minor') plt.title('Headphone.com Compensation Function') ax.xaxis.set_major_formatter(ticker.StrMethodFormatter('{x:.0f}')) plt.show() diff.write_to_csv('headphonecom_compensation.csv') diff.plot_graph(show=True, f_min=10, f_max=20000, file_path='headphonecom_compensation.png')
def save(df, column, name): fr = FrequencyResponse(name=name, frequency=df['Frequency'], raw=df[column]) fr.interpolate() fr.center() name = name.lower().replace(' ', '_') fr.write_to_csv('compensation/{}.csv'.format(name)) fr.plot_graph(file_path='compensation/{}.png'.format(name), show=False, color=None) ind = np.argmin(fr.raw[:400]) fr.raw[:ind] = fr.raw[ind] fr.write_to_csv('compensation/{}_wo_bass.csv'.format(name)) fr.plot_graph(file_path='compensation/{}_wo_bass.png'.format(name), show=False, color=None)
def main(input_dir=None, output_dir=None): if input_dir is None: raise TypeError('Input directory path is required!') if output_dir is None: raise TypeError('Output directory path is required!') input_dir = os.path.abspath(input_dir) output_dir = os.path.abspath(output_dir) models = {} for file_path in glob(os.path.join(input_dir, '*')): model = os.path.split(file_path)[-1] if not (re.search(' sample [a-zA-Z0-9]$', model, re.IGNORECASE) or re.search(' sn[a-zA-Z0-9]+$', model, re.IGNORECASE)): continue norm = re.sub(' sample [a-zA-Z0-9]$', '', model, 0, re.IGNORECASE) norm = re.sub(' sn[a-zA-Z0-9]+$', '', norm, 0, re.IGNORECASE) try: models[norm].append(model) except KeyError as err: models[norm] = [model] for norm, origs in models.items(): if len(origs) > 1: print(norm, origs) avg = np.zeros(613) f = FrequencyResponse.generate_frequencies() for model in origs: fr = FrequencyResponse.read_from_csv( os.path.join(input_dir, model, model + '.csv')) fr.interpolate() fr.center() avg += fr.raw avg /= len(origs) fr = FrequencyResponse(name=norm, frequency=f, raw=avg) d = os.path.join(output_dir, norm) if not os.path.isdir(d): os.makedirs(d) fr.write_to_csv(os.path.join(d, norm + '.csv'))
def main(): # Read name index name_index = dict() df = pd.read_csv(os.path.join(DIR_PATH, 'name_index.tsv'), sep='\t', header=None) # Replace empty cells with empty strings df = df.fillna('') df.columns = ['name', 'full_name', 'comment'] records = df.to_dict('records') full_names = set() for record in records: if record['full_name'] in full_names: warnings.warn( 'Duplicate entry in name index with full name: "{}".'.format( record['full_name'])) continue name_index[record['name']] = record data = dict() for file_path in glob(os.path.join(DIR_PATH, 'raw_data', '*.txt')): name = os.path.split(file_path)[1] # Remove ".txt" and " R" or " L" suffix name = re.sub('\.txt$', '', name) name = re.sub(' (L|R)', '', name) if name not in name_index: warnings.warn( '"{}" missing from name index, skipping.'.format(name)) continue if name_index[name]['comment'] in [ 'ignore', 'onear' ] or not name_index[name]['full_name']: warnings.warn('Skipping "{}".'.format(name)) continue name = name_index[name]['full_name'] if name not in data: data[name] = [] # Read file with open(file_path, 'r') as f: s = f.read() freq = [] raw = [] for line in s.split('\n'): if len(line) == 0 or line[0] == '*': # Skip empty lines and comments if 'C-weighting compensation: On' in line: warnings.warn('C-weighted measurement: ' + name) continue frp = line.split(' ') if len(frp) == 1: frp = line.split('\t') if len(frp) == 2: f, r = frp elif len(frp) == 3: f, r, p = frp else: # Must be comment line continue if f == '?' or r == '?': # Skip lines with missing data continue try: freq.append(float(f)) raw.append(float(r)) except ValueError as err: # Failed to convert values to floats, must be header or comment row, skip continue # Create standard fr object fr = FrequencyResponse(name=name, frequency=freq, raw=raw) fr.interpolate() fr.center() data[name].append(fr) if not os.path.isdir(os.path.join(DIR_PATH, 'inspection')): os.makedirs(os.path.join(DIR_PATH, 'inspection')) # Iterate all models for name, frs in data.items(): # Average SPL data from all measurements for this model (so Left and Right) raw = np.mean([fr.raw for fr in frs], axis=0) # Save as CSV fr = FrequencyResponse(name=name, frequency=frs[0].frequency, raw=raw) dir_path = os.path.join(DIR_PATH, 'data', 'inear', name) if not os.path.isdir(dir_path): os.makedirs(dir_path) fr.write_to_csv(os.path.join(dir_path, name + '.csv')) # Save inspection image fr.plot_graph(show=False, file_path=os.path.join(DIR_PATH, 'inspection', name + '.png')) plt.close()
def main(): # Filenames if_files = list( glob(os.path.join('innerfidelity', 'data', '**', '*.csv'), recursive=True)) if_file_names = [os.path.split(os.path.abspath(f))[-1] for f in if_files] normalized_if_files = [normalize(s) for s in if_file_names] hp_files = list( glob(os.path.join('rtings', 'data', '**', '*.csv'), recursive=True)) # Find matching files matching_if_files = [] matching_hp_files = [] for hp_file in hp_files: file_name = os.path.split(os.path.abspath(hp_file))[-1] for i in range(len(normalized_if_files)): if normalized_if_files[i] == normalize(file_name): matching_hp_files.append(hp_file) matching_if_files.append(if_files[i]) # Write mathces to file for manual inspection df = pd.DataFrame( np.array([matching_hp_files, matching_if_files]).transpose()) df.to_csv('matches.csv', index=False, header=False) fig, ax = plt.subplots() diffs = [] # Calculate differences for all models if_compensation = FrequencyResponse.read_from_csv( os.path.join('innerfidelity', 'resources', 'innerfidelity_compensation_2017.csv')) if_compensation.interpolate() hp_compensation = FrequencyResponse.read_from_csv( os.path.join('rtings', 'resources', 'rtings_compensation.csv')) hp_compensation.interpolate() for i in range(len(matching_if_files)): if_fr = FrequencyResponse.read_from_csv(matching_if_files[i]) if_fr.interpolate() if_fr.center() #if_fr.compensate(if_compensation) hp_fr = FrequencyResponse.read_from_csv(matching_hp_files[i]) hp_fr.interpolate() hp_fr.center() #hp_fr.compensate(hp_compensation) #diff = FrequencyResponse(name=if_fr.name, frequency=if_fr.frequency, raw=hp_fr.error - if_fr.error) diff = FrequencyResponse(name=if_fr.name, frequency=if_fr.frequency, raw=hp_fr.raw - if_fr.raw) plt.plot(diff.frequency, diff.raw) diffs.append(diff.raw) # Average and smoothen difference f = FrequencyResponse.generate_frequencies() diffs = np.vstack(diffs) diff = np.mean(diffs, axis=0) std = np.std(diffs, axis=0) diff = FrequencyResponse(name='Rtings Raw to Innerfidelity Raw', frequency=f, raw=diff) #diff.smoothen(window_size=1/7, iterations=10) diff.smoothen_fractional_octave(window_size=1 / 5, iterations=100) diff.raw = diff.smoothed diff.smoothed = np.array([]) plt.xlabel('Frequency (Hz)') plt.semilogx() plt.xlim([20, 20000]) plt.ylabel('Amplitude (dBr)') plt.ylim([-15, 15]) plt.grid(which='major') plt.grid(which='minor') plt.title('Rtings Raw to Innerfidelity Raw') ax.xaxis.set_major_formatter(ticker.StrMethodFormatter('{x:.0f}')) plt.show() fig, ax = diff.plot_graph(f_min=20, f_max=20000, show=False, color=None) ax.fill_between(diff.frequency, diff.raw + std, diff.raw - std, facecolor='lightblue') plt.legend(['Rtings Raw to Innerfidelity Raw', 'Standard Deviation']) plt.ylim([-10, 10]) fig.savefig(os.path.join('calibration', 'rtings_to_innerfidelity.png'), dpi=240) plt.show() diff.write_to_csv( os.path.join('calibration', 'rtings_to_innerfidelity.csv')) diff.raw *= -1 diff.name = 'Innerfidelity Raw to Rtings Raw' fig, ax = diff.plot_graph(f_min=20, f_max=20000, show=False, color=None) ax.fill_between(diff.frequency, diff.raw + std, diff.raw - std, facecolor='lightblue') plt.legend(['Innerfidelity Raw to Rtings Raw', 'Standard Deviation']) plt.ylim([-10, 10]) fig.savefig(os.path.join('calibration', 'innerfidelity_to_rtings.png'), dpi=240) plt.show() diff.write_to_csv( os.path.join('calibration', 'innerfidelity_to_rtings.csv'))
def process(self, item, url): json_file = Crawler.download(url, item.true_name, os.path.join(DIR_PATH, 'json')) if json_file is not None: with open(os.path.join(DIR_PATH, 'json', f'{item.true_name}.json'), 'r', encoding='utf-8') as fh: json_data = json.load(fh) fr, target = RtingsCrawler.parse_json(json_data) fr.name = item.true_name else: # No frequency response available, download bass, mid and treble # Bass Crawler.download( url.replace('frequency-response-14.json', 'bass.json'), f'{item.true_name}-bass', os.path.join(DIR_PATH, 'json')) with open(os.path.join(DIR_PATH, 'json', f'{item.true_name}-bass.json'), 'r', encoding='utf-8') as fh: bass_fr, bass_target = self.parse_json(json.load(fh)) # Mid Crawler.download( url.replace('frequency-response-14.json', 'mid.json'), f'{item.true_name}-mid', os.path.join(DIR_PATH, 'json')) with open(os.path.join(DIR_PATH, 'json', f'{item.true_name}-mid.json'), 'r', encoding='utf-8') as fh: mid_fr, mid_target = self.parse_json(json.load(fh)) # Treble Crawler.download( url.replace('frequency-response-14.json', 'treble.json'), f'{item.true_name}-treble', os.path.join(DIR_PATH, 'json')) with open(os.path.join(DIR_PATH, 'json', f'{item.true_name}-treble.json'), 'r', encoding='utf-8') as fh: treble_fr, treble_target = self.parse_json(json.load(fh)) fr = FrequencyResponse( name=item.true_name, frequency=np.concatenate( [bass_fr.frequency, mid_fr.frequency, treble_fr.frequency]), raw=np.concatenate([bass_fr.raw, mid_fr.raw, treble_fr.raw])) target = FrequencyResponse(name=item.true_name, frequency=np.concatenate([ bass_target.frequency, mid_target.frequency, treble_target.frequency ]), raw=np.concatenate([ bass_target.raw, mid_target.raw, treble_target.raw ])) fr.interpolate() if np.std(fr.raw) == 0: # Frequency response data has non-zero target response, use that target.interpolate() target = target print(f'Using target for {fr.name}') elif item.form == 'inear': # Using in-ear target response target = INEAR_TARGET else: # Using on-ear or earbud target response target = ONEAR_TARGET target.center() fr.raw += target.raw fr.center() # Inspection dir_path = os.path.join(DIR_PATH, 'inspection') os.makedirs(dir_path, exist_ok=True) file_path = os.path.join(dir_path, f'{fr.name}.png') fig, ax = fr.plot_graph(file_path=file_path, show=False) plt.close(fig) # Write to file dir_path = os.path.join(DIR_PATH, 'data', item.form, fr.name) os.makedirs(dir_path, exist_ok=True) file_path = os.path.join(dir_path, fr.name + '.csv') fr.write_to_csv(file_path) print(f'Saved "{fr.name}" to "{file_path}"')
def main(): data = dict() for file_path in glob(os.path.join(DIR_PATH, 'raw_data', '*.txt')): # Read name of the IEM from file path _, file_name = os.path.split(file_path) name = '.'.join(file_name.split('.')[:-1]) name = name[:-2] # Remove channel from name if name not in data: data[name] = [] # Read file with open(file_path, 'r') as f: s = f.read() freq = [] raw = [] for line in s.split('\n'): if len(line) == 0 or line[0] == '*': # Skip empty lines and comments if 'C-weighting compensation: On' in line: warnings.warn('C-weighted measurement: ' + name) continue frp = line.split(' ') if len(frp) == 1: frp = line.split('\t') if len(frp) == 2: f, r = frp elif len(frp) == 3: f, r, p = frp else: # Must be comment line continue if f == '?' or r == '?': # Skip lines with missing data continue try: freq.append(float(f)) raw.append(float(r)) except ValueError as err: # Failed to convert values to floats, must be header or comment row, skip continue # Create standard fr object fr = FrequencyResponse(name=name, frequency=freq, raw=raw) fr.interpolate() fr.center() data[name].append(fr) if os.path.exists(os.path.join(DIR_PATH, 'inspection')): os.makedirs(os.path.join(DIR_PATH, 'inspection')) # Iterate all models for name, frs in data.items(): # Average SPL data from all measurements for this model (so Left and Right) raw = np.mean([fr.raw for fr in frs], axis=0) # Save as CSV fr = FrequencyResponse(name=name, frequency=frs[0].frequency, raw=raw) fr.write_to_csv(os.path.join(DIR_PATH, 'data', name + '.csv')) # Save inspection image fr.plot_graph(show=False, file_path=os.path.join(DIR_PATH, 'inspection', name + '.png'))
def main(): arg_parser = argparse.ArgumentParser() arg_parser.add_argument('--input_dir', type=str, default='images', help='Path to images directory.') arg_parser.add_argument('--inspection_dir', type=str, default='inspection', help='Path to inspection directory.') arg_parser.add_argument('--output_dir', type=str, default='data', help='Path to data directory.') arg_parser.add_argument('--compensation', type=str, default='resources/rtings_compensation_w_bass.csv', help='Path to compensation file.') cli_args = arg_parser.parse_args() input_dir = os.path.abspath(cli_args.input_dir) inspection_dir = os.path.abspath(cli_args.inspection_dir) output_dir = os.path.abspath(cli_args.output_dir) compensation_path = os.path.abspath(cli_args.compensation) if not os.path.isdir(inspection_dir): os.makedirs(inspection_dir) if not os.path.isdir(os.path.join(inspection_dir, 'left')): os.makedirs(os.path.join(inspection_dir, 'left')) if not os.path.isdir(os.path.join(inspection_dir, 'right')): os.makedirs(os.path.join(inspection_dir, 'right')) if not os.path.isdir(os.path.join(inspection_dir, 'fr')): os.makedirs(os.path.join(inspection_dir, 'fr')) if not os.path.isdir(output_dir): os.makedirs(output_dir) # Compensation comp = FrequencyResponse.read_from_csv(compensation_path) for file_path in glob(os.path.join(input_dir, '*.png')): print(file_path) name = os.path.split(file_path)[-1].replace('.png', '') # Read and parse image im = Image.open(file_path) fr_left, inspection_left = parse_image(im, name, 'left') fr_right, inspection_right = parse_image(im, name, 'right') # Save inspection images inspection_left.save( os.path.join(inspection_dir, 'left', name + '.png')) inspection_right.save( os.path.join(inspection_dir, 'right', name + '.png')) # Create directory dir_path = os.path.join(output_dir, name) if not os.path.isdir(dir_path): os.makedirs(dir_path) # Average channels if not np.array_equal(fr_left.frequency, fr_right.frequency): warnings.warn( 'Left and right channel frequency data of "{}" don\'t match!'. format(name)) continue raw = np.mean(np.vstack( (fr_left.raw, fr_right.raw)), axis=0) + comp.raw fr = FrequencyResponse(name=name, frequency=fr_left.frequency, raw=raw) fr.plot_graph(show=False, file_path=os.path.join(inspection_dir, 'fr', name + '.png')) # Write to CSV fr.write_to_csv(os.path.join(output_dir, name, name + '.csv'))