def main(model, data, params): X, y, metadata = get_data(data) X_test, y_test, _ = get_data(data, test=True) print('Data shape, Train: %s, Test: %s' % (X.shape, X_test.shape)) if params is not None: params = eval(params) else: params = {} model_name = model model = get_model(model, metadata['regression']) params['metadata'] = metadata clf = model(**params) clf.fit(X, y) if hasattr(clf, '_negative_log_likelihood'): plot_nll(clf, model_name, data) y_pred = clf.predict(X) print('Train:') pprint(scores(y_pred, y, scoring=metadata['scoring'])) y_pred = clf.predict(X_test) print('Test:') pprint(scores(y_pred, y_test, scoring=metadata['scoring']))
def test_dot_changes_different_contents_and_identical_files(dot_changes_differences_different_contents_and_identical_files): assert dot_changes_differences_different_contents_and_identical_files[0] assert dot_changes_differences_different_contents_and_identical_files[1] expected_diff_contents = get_data('dot_changes_description_expected_diff') expected_diff_files = get_data('dot_changes_different_contents_and_identical_files_expected_diff') assert dot_changes_differences_different_contents_and_identical_files[0].unified_diff == expected_diff_contents assert dot_changes_differences_different_contents_and_identical_files[1].unified_diff == expected_diff_files
def rates(self): name = 'rates' fig,axes=plt.subplots(1,2,figsize=(8,8)) params = self.params for ind,item in enumerate(self.data): N=params.N_t target = get_data(item[self.p],'target',params.offset[self.o],params.noise[self.nn]) lure = get_data(item[self.p],'lure',params.offset[self.o],params.noise[self.nn]) hit = calc_rates(target,N)[:-1] fa = calc_rates(lure,N)[:-1] x_values=np.arange(1,len(fa)+1) ax[0].plot(x_values,hit,'o-',color=self.colors[ind],alpha=self.alphas[ind],marker=self.markers[ind],label=f"{self.aux}= {self.labels[ind]}") ax[1].plot(x_values,fa,'o-',color=self.colors[ind],alpha=self.alphas[ind],marker=self.markers[ind],label=f"{self.aux}= {self.labels[ind]}") custom_axis(ax[0],{'xlabel':{'text':'#Threshold'}, 'ylabel':{'text':'Hit rate'},'xlim':{'xlimit':(min(x_values),max(x_values))}}) custom_axis(ax[1],{'xlabel':{'text':'#Threshold'}, 'ylabel':{'text':'False alarm rate'},'xlim':{'xlimit':(min(x_values),max(x_values))}, 'legend':{'size':14,'bbox_to_anchor':(1.03, 1.03)}}) fig.tight_layout() fig.suptitle(self.title,fontsize=14,y=.7) if params.save_figs: fig.savefig(f"{self.output_dir}{self.effect}-{name}-{self.params.noise[self.nn]}-{params.simID}.pdf")
def roc_hist(self,noise,row=9,figsize=(8,8),save=False): rcParams["figure.figsize"]=figsize #plot the ROCs target_yes=get_data(self.data,'target',row,noise) lure_yes=get_data(self.data,'lure',row,noise) hit=calc_rates(target_yes,self.params.N_t) fa=calc_rates(lure_yes,self.params.N_t) fig,axes=plt.subplots(1,2) ax=axes[0] ax.plot(fa,hit,'o-') ax.plot([0,1],[0,1],'k--') axis_default(ax,'False alarm rate','Hit rate', limit=[[0,1],[0,1]],aspect=True) ax1=axes[1] target_dist=get_data(self.data,'min-dist-target',row,noise) lure_dist=get_data(self.data,'min-dist-lure',row,noise) thr_range=get_data(self.data,'threshold_range',row,noise)[:-1] ax1.hist(np.ravel(target_dist),color='green',bins=self.params.n_bins,alpha=0.7,label='Target') ax1.hist(np.ravel(lure_dist),color='red',bins=self.params.n_bins,alpha=0.7,label='Lure') y=ax1.get_ylim() [ax1.plot([item]*2,y,'k--') for item in thr_range] axis_default(ax1,'Distance','Count',aspect=True,legend=True) fig.tight_layout() fig.suptitle('Noise=%s' %noise,fontsize=16,y=0.75) if save: fig.savefig(self.output_dir+'ROC_hist-'+str(noise)+'-'+self.params.simID+'.png')
def test_lib_differences(lib_differences): assert len(lib_differences) == 2 assert lib_differences[0].source1 == 'file list' expected_metadata_diff = get_data('elf_lib_metadata_expected_diff') assert lib_differences[0].unified_diff == expected_metadata_diff assert 'objdump' in lib_differences[1].source1 expected_objdump_diff = get_data('elf_lib_objdump_expected_diff') assert lib_differences[1].unified_diff == expected_objdump_diff
def temp_prediction(ts_start): dt_start = datetime.datetime.fromtimestamp(ts_start) start = datetime.datetime(year=dt_start.year, month=dt_start.month, day=dt_start.day, hour=dt_start.hour, minute=0, second=0) # Get data from database status, result_data = data.get_data(start) if status == 'ERROR': return ('ERROR', result_data) status, result_data = data.get_hourly_data(result_data, 36, ts_start) if status == 'ERROR': return ('ERROR', result_data) # check integrity of returned values status, result_data = make_prediction(result_data) if status == 'ERROR': return ('ERROR', "Error in prediction") # convert to dictionary ts = time.mktime(start.timetuple()) ts = int(ts) + (24 * 60 * 60) final_result = dict() for i in range(24): key = str(int(ts) - (3600 * (i))) final_result[key] = result_data[i] return ("OK", final_result)
def correct_retrieval_list(self, info='targ-match', figsize=(8, 8)): params = self.params noise = params.noise data_lists = [{info: []} for i in range(len(params.list_length))] data_all = list(range(len(params.list_length))) # load the data for all list lengths for j in range(len(params.list_length)): data_all[j] = [ pd.read_pickle(self.input_dir + str(params.list_length[j]) + '-' + str(params.pat_sep[jj]) + '.pkl') for jj in range(len(params.pat_sep)) ] # loda the required info for all list lengths for dic, dat in zip(data_lists, data_all): for key in dic.keys(): dic[key] = get_data(dat, key, params.offset[-1], params.noise) fig, axes = plt.subplots(1, len(params.pat_sep[::2])) for i in range(len(params.pat_sep[::2])): ind = params.pat_sep.index(params.pat_sep[::2][i]) ax = axes[i] for nn in range(len(params.noise)): plot_data = [item[info][ind][nn] for item in data_lists] ax.plot(params.list_length, plot_data, 'o-', label=np.round(noise[nn], 2)) if ind == len(params.pat_sep) - 1: axis_default( ax, 'List length', info, limit=[[params.list_length[0], params.list_length[-1]], [0.3, 1]], legend=True, leg_size=10, aspect=True, r=1) else: axis_default( ax, 'List length', info, limit=[[params.list_length[0], params.list_length[-1]], [0.3, 1]], legend=False, aspect=True, r=1) ax.set_title('Pat Sep=%s' % params.pat_sep[::2][i], fontsize=14, fontweight='bold', y=1.05) fig.tight_layout() if self.save: fig.savefig(self.output_dir + '/Rec_accuracy_list' + "_" + params.simID + '.' + self.fig_format, format=self.fig_format, dpi=300)
def train_center_net(train_df, oof_df): train_dataset = centernet.WheatDataset(train_df, transforms=get_train_transforms()) train_dataloader = DataLoader(train_dataset, batch_size=Config.Train.batch_size, shuffle=True, num_workers=4, drop_last=True, pin_memory=True) oof_dataset = centernet.WheatDataset(oof_df, test=True, transforms=get_valid_transforms()) oof_dataloader = DataLoader(oof_dataset, batch_size=Config.Train.batch_size, shuffle=False, num_workers=4, pin_memory=True) model = Resnest50CenterNet(conf=Config) early_stop = callbacks.EarlyStopping(monitor='val_map', patience=10, mode='max', verbose=True) checkpoint = callbacks.ModelCheckpoint(str(Config.Train.checkpoint_dir), monitor='val_map', verbose=True, mode='max', save_top_k=1) cbs = [ callbacks.LearningRateLogger() ] trainer = Trainer(gpus=1, early_stop_callback=early_stop, checkpoint_callback=checkpoint, callbacks=cbs, benchmark=True, deterministic=True, max_epochs=Config.Train.epochs) trainer.fit(model, train_dataloader=train_dataloader, val_dataloaders=oof_dataloader) valid_dataset = centernet.WheatDataset(get_data(mode='valid'), test=True, transforms=get_test_transforms()) valid_dataloader = DataLoader(valid_dataset, batch_size=Config.Train.batch_size, shuffle=False, num_workers=4, pin_memory=True) trainer.test(model, test_dataloaders=valid_dataloader)
def test_difference_in_ascii(ascii1, ascii2): difference = ascii1.compare(ascii2) assert difference is not None expected_diff = get_data('text_ascii_expected_diff') assert difference.unified_diff == expected_diff assert not difference.comments assert len(difference.details) == 0
def test_fallback_comparisons(monkeypatch): manager = ComparatorManager() monkeypatch.setattr(manager, 'COMPARATORS', ( ('debian_fallback.DotChangesFile',), ('debian_fallback.DotDscFile',), ('debian_fallback.DotBuildinfoFile',), )) manager.reload() for a, b, expected_diff in ( ( 'test1.changes', 'test2.changes', 'dot_changes_fallback_expected_diff', ), ( 'test1.dsc', 'test2.dsc', 'dot_dsc_fallback_expected_diff' ), ( 'test1.buildinfo', 'test2.buildinfo', 'dot_buildinfo_fallback_expected_diff', ), ): # Re-specialize after reloading our Comparators file1 = specialize(FilesystemFile(data(a))) file2 = specialize(FilesystemFile(data(b))) assert file1.compare(file1) is None assert file2.compare(file2) is None assert file1.compare(file2).unified_diff == get_data(expected_diff)
def test_item3_deflate_llvm_bitcode(differences): assert differences[3].source1 == 'alloc_system-d16b8f0e.0.bytecode.deflate' assert differences[3].source2 == 'alloc_system-d16b8f0e.0.bytecode.deflate' expected_diff = get_data('rlib_llvm_dis_expected_diff') actual_diff = differences[3].details[0].details[1].unified_diff assert diff_ignore_line_numbers(actual_diff) == diff_ignore_line_numbers( expected_diff)
def test_compare_to_symlink(tmpdir): path = str(tmpdir.join('src')) os.symlink('/etc/passwd', path) a = specialize(FilesystemFile(str(tmpdir.mkdir('dir')))) b = specialize(FilesystemFile(path)) assert a.compare(b).unified_diff == get_data('test_directory_symlink_diff')
def false_alarms(self, nn, figsize=(6, 6), c=0, m=4, info='lure'): params = self.params data = self.data rcParams["figure.figsize"] = figsize plot_data = {info: []} for key in plot_data.keys(): plot_data[key] = get_data(data, key, params.offset, nn) if np.mod(len(params.offset), 2) == 0: loop = len(params.offset) elif len(params.offset) == 1: loop = 1 else: loop = len(params.offset) - 1 fig = plt.figure() for o in range(loop): ax = fig.add_subplot(2, int(np.ceil(loop / 2)), o + 1) cort = plot_data[info][c][o] hip = plot_data[info][m][o] try: comb = plot_data['lure'][m + params.hip][o] except: pass threshold = np.arange(1, params.N_thr + 1) ax.plot(threshold, np.cumsum(cort), 'ro-', label='Cort') ax.plot(threshold, np.cumsum(hip), 'bo-', label='Hip') try: ax.plot(threshold, np.cumsum(comb), 'go-', label='Comb') except: pass ax.set_title('Offset=%s' % params.offset[o]) axis_default(ax, 'Threshold', 'CumSum', limit=[[min(threshold), max(threshold)], [0, self.N]], legend=True, leg_size=10, aspect=True) fig.tight_layout() fig.suptitle('Noise: ' + str(nn), fontsize=14, fontweight='bold', y=1.05) if self.save: fig.savefig(self.output_dir + '/Lures' + "_" + params.cond + "_" + str(nn) + "_" + params.simID + '.' + self.fig_format, format=self.fig_format, dpi=300)
def test_with_compare_details_and_fallback(): class MockFile(FilesystemFile): def compare_details(self, other, source=None): return [] difference = MockFile(TEST_FILE1_PATH).compare(MockFile(TEST_FILE2_PATH)) expected_diff = get_data('binary_expected_diff') assert 'yet data differs' in difference.comment assert normalize_zeros(difference.unified_diff) == expected_diff
def correct_retrieval(self, info='targ-match'): params = self.params name = 'correct-retrieval' # fig,axes = self.empty_figure(name) fig = plt.figure(figsize=(6, 6)) if np.mod(len(params.offset), 2) == 0: loop = len(params.offset) elif len(params.offset) == 1: loop = 1 else: loop = len(params.offset) - 1 for o in range(loop): ax = fig.add_subplot(2, int(np.ceil(loop / 2)), o + 1) plot_data = {info: []} for key in plot_data.keys(): plot_data[key] = get_data(self.data[:params.hip + 1], key, params.offset[o], params.noise) [ ax.plot(params.noise, plot_data[key][params.scale.index(item)], 'o-', color='k', alpha=0.7, marker=markers[ind], label='p=%s' % item) for ind, item in enumerate(params.scale[:params.hip + 1]) ] custom_axis( ax, { 'xlabel': { 'text': omega }, 'ylabel': { 'text': 'Correct retrieval' }, 'xlim': { 'xlimit': (min(params.noise), max(params.noise)) }, 'ylim': { 'ylimit': (0.3, 1) }, 'xticks': { 'round': 2 }, 'legend': { 'size': 14, 'bbox_to_anchor': None } }) fig.tight_layout() fig.suptitle(self.title, fontsize=14, y=1.1) if params.save_figs: fig.savefig( f"{self.output_dir}/{name}-{self.params.noise[self.nn]}-{params.simID}.pdf" )
def correct_retrieval(self, nn, info='targ-match', figsize=(6, 6)): params = self.params data = self.data[:params.hip + 1] rcParams["figure.figsize"] = figsize if np.mod(len(params.offset), 2) == 0: loop = len(params.offset) elif len(params.offset) == 1: loop = 1 else: loop = len(params.offset) - 1 fig = plt.figure() alphas = np.linspace(0.4, 1, len(params.pat_sep)) linewidths = np.linspace(0.5, 3, len(params.pat_sep)) for o in range(loop): ax = fig.add_subplot(2, int(np.ceil(loop / 2)), o + 1) plot_data = {info: []} for key in plot_data.keys(): plot_data[key] = get_data(data, key, params.offset[o], nn) [ ax.plot(nn, plot_data[key][params.pat_sep.index(item)], 'o-', color='slategray', marker=markers[ind], label='p=%s' % item) for ind, item in enumerate(params.pat_sep[:params.hip + 1]) ] # [ax.text(nn[-1]+0.025,plot_data[key][params.pat_sep.index(item)][-1]-0.01,'p=%s'%item,fontsize=14) for item in params.pat_sep[:params.hip+1]] ax.set_ylim(0.3, 1) # ax.set_xlim(nn[0],nn[-1]+0.09) axis_default(ax, 'Noise', 'Correct retrieval', labelsize=16, limit=[[min(nn), max(nn)], [0.3, 1]], ticksize=14, r=2) # ax.set_xlim(nn[0],nn[-1]+0.11) ax.legend(loc='lower left', prop={'size': 12}) set_aspect(ax) ax.set_title('Offset=%s' % params.offset[o], fontsize=14) fig.tight_layout() fig.suptitle('Intended retrieval', fontsize=14, fontweight='bold', y=1.05) if self.save: fig.savefig(self.output_dir + '/Ret_accuracy' + "_" + params.cond + "_" + params.simID + '.' + self.fig_format, format=self.fig_format, dpi=300)
def train(): data_df = get_data() train_ids, oof_ids = train_test_split(data_df['image_id'].unique(), test_size=0.10, shuffle=True, random_state=Config.seed) train_df = data_df.loc[data_df['image_id'].isin(train_ids)] oof_df = data_df.loc[data_df['image_id'].isin(oof_ids)] if Config.model_type == 'faster_rcnn': train_faster_rcnn(train_df, oof_df) elif Config.model_type == 'center_net': train_center_net(train_df, oof_df)
def test_compare_to_file(tmpdir): path = str(tmpdir.join('file')) with open(path, 'w') as f: f.write("content") a = specialize(FilesystemFile(str(tmpdir.mkdir('dir')))) b = specialize(FilesystemFile(path)) assert a.compare(b).unified_diff == get_data('test_directory_file_diff')
def bps_distances(self, nn, figsize=(8, 8), save=True): params = self.params data = self.data rcParams["figure.figsize"] = figsize fig, axes = plt.subplots(1, 2) for m in range(len([0, params.hip])): dat1 = data[m] ax = axes[m] plot_data = { 'targ-hist-freq': [], 'lure-hist-freq': [], 'targ-hist-bins': [], 'lure-hist-bins': [], 'threshold_range': [], 'min-distances': [], 'target': [], 'lure': [] } for key in plot_data.keys(): plot_data[key] = get_data(dat1, key, params.offset, nn) width = np.diff(plot_data['targ-hist-bins'][0])[0] ax.bar(plot_data['targ-hist-bins'][0][:-1], plot_data['targ-hist-freq'][0], width=width, color='g', alpha=0.7) ax.bar(plot_data['lure-hist-bins'][0][:-1], plot_data['lure-hist-freq'][0], width=width, color='orange', alpha=0.7) ax.bar(plot_data['lure-hist-bins'][-1][:-1], plot_data['lure-hist-freq'][-1], width=width, color='r', alpha=0.7) y = ax.get_ylim() [ ax.plot([item] * 2, y, 'k-') for item in plot_data['threshold_range'][0] ] ax.set_title('Pat_sep=%s' % params.pat_sep[m], fontsize=16) # ax.set_xlim(0,0.1) set_aspect(ax) fig.suptitle('Noise: ' + str(nn), fontsize=14, fontweight='bold', y=0.8) if save: ax.set_rasterized(True) fig.savefig(self.output_dir + '/bps_hist' + "_" + params.cond + "_" + str(nn) + "_" + params.simID + '.' + self.fig_format, format=self.fig_format, dpi=300)
def test_differences(differences): assert differences[0].source1 == 'zipinfo {}' assert differences[0].source2 == 'zipinfo {}' assert differences[1].source1 == 'content.opf' assert differences[1].source2 == 'content.opf' assert differences[2].source1 == 'toc.ncx' assert differences[2].source2 == 'toc.ncx' assert differences[3].source1 == 'ch001.xhtml' assert differences[3].source2 == 'ch001.xhtml' expected_diff = get_data('epub_expected_diffs') assert expected_diff == "".join(map(lambda x: x.unified_diff, differences))
def test_destination(tmpdir): def create(x): path = os.path.join(str(tmpdir.mkdir(x)), 'src') os.symlink('/{}'.format(x), path) return specialize(FilesystemFile(path)) a = create('a') b = create('b') expected_diff = get_data('symlink_expected_destination_diff') assert a.compare(b).unified_diff == expected_diff
def test_diff(obj_differences): assert len(obj_differences) == 4 l = [ 'macho_expected_diff_arch', 'macho_expected_diff_headers', 'macho_expected_diff_loadcommands', 'macho_expected_diff_disassembly' ] for idx, diff in enumerate(obj_differences): with open(os.path.join(os.path.dirname(__file__), '../data', l[idx]), 'w') as f: print(diff.unified_diff, file=f) expected_diff = get_data('macho_expected_diff') assert obj_differences[0].unified_diff == expected_diff
def test_differences(differences): assert differences[0].source1 == 'test1.jar' assert differences[0].source2 == 'test2.jar' zipinfo = differences[0].details[0] classdiff = differences[0].details[1] assert zipinfo.source1 == 'zipinfo -v {}' assert zipinfo.source2 == 'zipinfo -v {}' assert classdiff.source1 == 'com/example/MainActivity.class' assert classdiff.source2 == 'com/example/MainActivity.class' expected_diff = get_data('dex_expected_diffs') found_diff = zipinfo.unified_diff + classdiff.details[0].unified_diff assert expected_diff == found_diff
def get_running_data(maclist,name,begin_time,end_time): beginTime = begin_time endTime = end_time raw_data=[] for i in range(len(maclist)): temp = get_local_data([maclist[i]], beginTime, endTime, DeviceFlag = 'dehumidifier') raw_data = get_data(temp, raw_data, name[i]) raw_data = np.array(raw_data) Home_Data, Virtual_List, Device_List = get_real_data(raw_data) return Home_Data, Virtual_List, Device_List
def test_with_compare_details_and_tool_not_found(monkeypatch): monkeypatch.setattr('diffoscope.exc.RequiredToolNotFound.get_package', lambda _: 'some-package') class MockFile(FilesystemFile): @tool_required('nonexistent') def compare_details(self, other, source=None): raise Exception('should not be run') difference = MockFile(TEST_FILE1_PATH).compare(MockFile(TEST_FILE2_PATH)) expected_diff = get_data('binary_expected_diff') assert 'nonexistent' in difference.comment assert 'some-package' in difference.comment assert normalize_zeros(difference.unified_diff) == expected_diff
def test_with_compare_details_and_failed_process(): output = 'Free Jeremy Hammond' class MockFile(FilesystemFile): def compare_details(self, other, source=None): subprocess.check_output( ['sh', '-c', 'echo "%s"; exit 42' % output], shell=False) raise Exception('should not be run') difference = MockFile(TEST_FILE1_PATH).compare(MockFile(TEST_FILE2_PATH)) expected_diff = get_data('../data/binary_expected_diff') assert output in difference.comment assert '42' in difference.comment assert normalize_zeros(difference.unified_diff) == expected_diff
def test_differences(differences): assert differences[0].source1 == 'test1.ext4.tar' tarinfo = differences[0].details[0] tardiff = differences[0].details[1] encodingdiff = tardiff.details[0] assert tarinfo.source1 == 'file list' assert tarinfo.source2 == 'file list' assert tardiff.source1 == './date.txt' assert tardiff.source2 == './date.txt' assert encodingdiff.source1 == 'encoding' assert encodingdiff.source2 == 'encoding' expected_diff = get_data('ext4_expected_diffs') found_diff = tarinfo.unified_diff + tardiff.unified_diff + encodingdiff.unified_diff assert expected_diff == found_diff
def test_fallback_comparison(monkeypatch): manager = ComparatorManager() monkeypatch.setattr(manager, 'COMPARATORS', (('rpm_fallback.RpmFile', ), )) manager.reload() # Re-specialize after reloading our Comparators rpm1 = specialize(FilesystemFile(data('test1.rpm'))) rpm2 = specialize(FilesystemFile(data('test2.rpm'))) assert rpm1.compare(rpm1) is None assert rpm2.compare(rpm2) is None expected_diff = get_data('rpm_fallback_expected_diff') assert normalize_zeros(rpm1.compare(rpm2).unified_diff) == expected_diff
def fc_offset_performance(self, figsize=(8, 8), ylabel='Correct', info=['target']): params = self.params data = self.data noise = params.noise rcParams["figure.figsize"] = figsize for i in range(len(info)): try: y_lab = ylabel[i] except: pass fig = plt.figure() for nn in range(len(noise)): ax = fig.add_subplot(1, len(noise), nn + 1) plot_data = {info[i]: []} for key in plot_data.keys(): plot_data[key] = get_data(data, key, params.offset, noise[nn]) ax.plot(params.offset, plot_data[info[i]][0], 'ro-', label='Cort') ax.plot(params.offset, plot_data[info[i]][-1], 'go-', label='Hip') axis_default(ax, 'Offset', y_lab, limit=[[params.offset[0], params.offset[-1]], [0.5, 1]], legend=True, aspect=True, r=1) ax.set_title('Noise=%s' % noise[nn], fontsize=14, fontweight='bold', y=1.05) fig.tight_layout() if self.save: fig.savefig(self.output_dir + '/fc-offset' + "_" + params.cond + "_" + params.simID + '.' + self.fig_format, format=self.fig_format, dpi=300)
def get_cities_in_radius(radius, origin): cities = get_data(); cities_in_radius = [] for city in cities: dist = distance(origin['lat'], origin['lon'], city['lat'], city['lon']) #print('distance from Dublin to {:s} is {:f}'.format(city['city'], dist)) if dist <= radius: cities_in_radius.append(city) city_names = map_city_names(cities_in_radius) sorted_names = sorted(city_names) print('Cities in a {:d} km radius from Dublin:'.format(radius)) print('------------------------------------------') for name in sorted_names: print(name)
def distance_histograms(self, param=''): params = self.params name = 'distance_histograms' # fig,axes = self.empty_figure(name) fig,axes=plt.subplots(1,3,figsize=(8,8)) plot_data={'targ-hist-freq'+param:[],'lure-hist-freq'+param:[],'targ-hist-bins'+param:[],'lure-hist-bins'+param:[],'threshold_range'+param:[]} print(plot_data.keys()) for key in plot_data.keys(): plot_data[key]=get_data(self.data[:params.hip+1],key,params.offset,params.noise[self.nn]) for o in range(len(params.offset)): count=0 for m in params.pat_sep[::2]: target_bins = plot_data['targ-hist-bins'+param][params.pat_sep.index(m)][o] target_freq = plot_data['targ-hist-freq'+param][params.pat_sep.index(m)][o] lure_bins = plot_data['lure-hist-bins'+param][params.pat_sep.index(m)][o] lure_freq = plot_data['lure-hist-freq'+param][params.pat_sep.index(m)][o] try: ax = axes[count] except: ax = axes count += 1 ax.bar(target_bins[:-1],target_freq,width=np.diff(target_bins)[0],color='tab:green',alpha=.8,label="Target") ax.bar(lure_bins[:-1],lure_freq,width=np.diff(target_bins)[0],color='tab:purple',alpha=.8,label="Lure") [ax.plot([item]*2,[0,params.N_t*.7], 'k--') for item in plot_data['threshold_range'+param][params.pat_sep.index(m)][o]] [ax.plot([item+0.02]*2,[0,params.N_t*.7], '--',color='gray',alpha=0.5) for item in plot_data['threshold_range'+param][params.pat_sep.index(m)][o]] if m == params.pat_sep[::2][-1]: custom_axis(ax,{'xlabel':{'text':' '}, 'ylabel':{'text':''},'xlim':{'xlimit':(-0.03,1)},'ylim':{'ylimit':(0,int(params.N_t*.6))}, 'legend':{'size':14,'bbox_to_anchor':None}}) elif count == 1: custom_axis(ax,{'xlabel':{'text':''}, 'ylabel':{'text':f'{omega}={params.noise[self.nn]}\nCount'},'xlim':{'xlimit':(-0.03,1)},'ylim':{'ylimit':(0,int(params.N_t*.6))}}) else: custom_axis(ax,{'xlabel':{'text':'Distance'}, 'ylabel':{'text':''},'xlim':{'xlimit':(-0.03,1)},'ylim':{'ylimit':(0,int(params.N_t*.6))}}) ax.set_title(f'p={m}',loc='center', fontsize=16) fig.tight_layout() fig.suptitle(self.title,fontsize=14,y=.7) if params.save_figs: fig.savefig(f"{self.output_dir}/{name}-{params.noise[self.nn]}-{params.simID}.pdf")