'axis': 1, 'thr_low': thr, }) an.add_analysis("get_mean_std", args={'thr_low': thr}) bins = np.arange(-150, 300, 5) an.add_analysis("get_histo_counts", args={'bins': bins}) an.add_analysis(get_line_histos, args={'axis': 0, 'bins': bins}) # set the dataset an.set_dataset(dataset_name) # add preprocess steps #an.add_preprocess("image_set_roi", args={'roi': roi}) #an.add_preprocess("image_set_thr", thr_low=thr) # run the analysis results = an.analyze_images(fname, n=1000) # plot plt.figure(figsize=(7, 7)) plt.plot(np.nansum(results["spectra"], axis=0), label="ADU > " + str(thr)) plt.legend(loc='best') #plt.show() plt.figure(figsize=(7, 7)) plt.bar(bins[:-1], results["histo_counts"], log=True, width=5) #plt.show() plt.figure(figsize=(15, 10)) plt.subplot(121) plt.imshow(results["histo_counts_line"], vmin=0,
selection=sel) # get laser on/off tags is_laser_on_tags = df[df.is_laser == 1].index.tolist() is_laser_off_tags = df[df.is_laser == 0].index.tolist() # get spectra from Von Hamos, using laser on / off tags #roi = [[0, 1024], [325, 335]] # X, Y ap = ImagesProcessor(facility="SACLA") ap.add_analysis('get_projection', args={"axis": 1}) ap.add_analysis('get_mean_std') ap.set_dataset('/run_%s/detector_2d_1' % run) ap.add_preprocess("set_thr", args={"thr_low": 65}) # get the total spectra results_on = ap.analyze_images(fname, tags=is_laser_on_tags) spectrum_on = results_on["get_projection"]["spectra"].sum(axis=0) results_off = ap.analyze_images(fname, tags=is_laser_off_tags) spectrum_off = results_off["get_projection"]["spectra"].sum(axis=0) spectrum_off = spectrum_off / spectrum_off.sum() spectrum_on = spectrum_on / spectrum_on.sum() # this is the average image from the Von Hamos sum_image_on = results_on["get_mean_std"]["images_mean"] # Plot! plt.subplot(1, 2, 1) plt.imshow(sum_image_on) plt.subplot(1, 2, 2) plt.plot(spectrum_off, label="laser off")
for run in runs: rname = str(run) fname = DIR + rname + ".h5" print('\nAnalyzing run ' + rname + '\n') """ Analyze images and integrate roi and bkgRoi. Can take a lot of time The results are saved in a pickle file in the folder analyzed_runs/imgAna. To add prepocess, analysis functions, open the dataAna.imgAna function. """ if imgAna: dataset_name = "/run_" + rname + "/detector_2d_1" ip.set_dataset(dataset_name, remove_preprocess=False) # run the analysis results = ip.analyze_images(fname, n=n) # plot results imgs = results["images_mean"] plt.figure(figsize=(8, 8)) plt.subplot2grid((2, 2), (0, 0), rowspan=2) plt.imshow(imgs) # plt.imshow(imgs[bkgRoi[0][0]:bkgRoi[0][1], bkgRoi[1][0]:bkgRoi[1][1]], aspect=0.5, # extent=(bkgRoi[1][0], bkgRoi[1][1], bkgRoi[0][0], bkgRoi[0][1]), interpolation="none") plt.subplot2grid((2, 2), (0, 1)) plt.imshow(imgs[roi[0][0]:roi[0][1], roi[1][0]:roi[1][1]], aspect=0.5, extent=(roi[1][0], roi[1][1], roi[0][1], roi[0][0]), interpolation="none") plt.title('ROI') # FOR DARKFRAME save to npy: # np.save('/home/esposiv/python_scripts2016/analysis/dark_2016_388662_testsave.npy', imgs)
for run in runs: rname = str(run) fname = DIR + rname + ".h5" print(('\nAnalyzing run ' + rname + '\n')) """ Analyze images and integrate roi and bkgRoi. Can take a lot of time The results are saved in a pickle file in the folder analyzed_runs/imgAna. To add prepocess, analysis functions, open the dataAna.imgAna function. """ if imgAna: dataset_name = "/run_" + rname + "/detector_2d_1" ip.set_dataset(dataset_name, remove_preprocess=False) # run the analysis results = ip.analyze_images(fname, n=n) # plot results imgs = results["images_mean"] plt.figure(figsize=(8, 8)) plt.subplot2grid((2, 2), (0, 0), rowspan=2) plt.imshow(imgs) # plt.imshow(imgs[bkgRoi[0][0]:bkgRoi[0][1], bkgRoi[1][0]:bkgRoi[1][1]], aspect=0.5, # extent=(bkgRoi[1][0], bkgRoi[1][1], bkgRoi[0][0], bkgRoi[0][1]), interpolation="none") plt.subplot2grid((2, 2), (0, 1)) plt.imshow(imgs[roi[0][0]:roi[0][1], roi[1][0]:roi[1][1]], aspect=0.5, extent=(roi[1][0], roi[1][1], roi[0][1], roi[0][0]), interpolation="none") plt.title('ROI') # FOR DARKFRAME save to npy: # np.save('/home/esposiv/python_scripts2016/analysis/dark_2016_388662_testsave.npy', imgs)
def bin_tt_COM(df, bin_edges, rname, fname, calibration=0.01, roi=[[235, 270], [500, 540]]): """ Bin data according to the timing tool and perform a center of mass analysis of the roi This script is somewhat redundant with the image analysis, as it loops again through all the images. """ # create corrected delay df["dl_corr"] = df.delay + calibration * df.tt bin_size = bin_edges[1] - bin_edges[0] df_xon = df[df.x_status == 1] df_lon = df_xon[df.laser_status == 1] df_loff = df_xon[df.laser_status == 0] bin_center = bin_edges[:-1] + 0.5 * bin_size df_out = pd.DataFrame(bin_center, columns=["time"]) if len(df_lon) != 0: binned_int_lon = stats.binned_statistic(df_lon.dl_corr, df_lon.intensity, bins=bin_edges, statistic="mean") binned_bkg_lon = stats.binned_statistic(df_lon.dl_corr, df_lon.bkg, bins=bin_edges, statistic="mean") binned_I0_lon = stats.binned_statistic(df_lon.dl_corr, df_lon.I0, bins=bin_edges, statistic="mean") df_out["intensity_lon"] = binned_int_lon.statistic df_out["bkg_lon"] = binned_bkg_lon.statistic df_out["I0_lon"] = binned_I0_lon.statistic else: print ("No laser ON shots") if len(df_loff) != 0: binned_int_loff = stats.binned_statistic(df_loff.dl_corr, df_loff.intensity, bins=bin_edges, statistic="mean") binned_bkg_loff = stats.binned_statistic(df_loff.dl_corr, df_loff.bkg, bins=bin_edges, statistic="mean") binned_I0_loff = stats.binned_statistic(df_loff.dl_corr, df_loff.I0, bins=bin_edges, statistic="mean") df_out["I0_loff"] = binned_I0_loff.statistic df_out["bkg_loff"] = binned_bkg_loff.statistic df_out["intensity_loff"] = binned_int_loff.statistic else: print ("No laser OFF shots") """ COM analysis COM analysis loops through the bins and load the images corresponding for each bin. The COM of the averaged images in the bin is taken and written in the df_out dataframe """ binnumber = binned_int_lon.binnumber peakCOM = np.zeros([len(df_out.time), 2]) dataset_name = "/run_" + rname + "/detector_2d_1" ip = ImagesProcessor(facility="SACLA") ip.flatten_results = True ip.set_dataset(dataset_name) ip.add_preprocess("set_roi", args={"roi": roi}) ip.add_analysis("get_mean_std") for ii in range(len(df_out.time)): n = ii + 1 ismember = binnumber == n tagList = df.index[ismember] results = ip.analyze_images(fname, n=-1, tags=tagList) if "images_mean" in results: peakCOM[ii, :] = ndimage.measurements.center_of_mass(results["images_mean"]) else: peakCOM[ii, :] = np.NaN del results print ("bin number %s" % n) df_out["COMx"] = peakCOM[:, 0] df_out["COMy"] = peakCOM[:, 1] return df_out
def bin_tt_COM(df, bin_edges, rname, fname, calibration=0.01, roi=[[235, 270], [500, 540]]): """ Bin data according to the timing tool and perform a center of mass analysis of the roi This script is somewhat redundant with the image analysis, as it loops again through all the images. """ # create corrected delay df['dl_corr'] = df.delay + calibration * df.tt bin_size = bin_edges[1] - bin_edges[0] df_xon = df[df.x_status == 1] df_lon = df_xon[df.laser_status == 1] df_loff = df_xon[df.laser_status == 0] bin_center = bin_edges[:-1] + 0.5 * bin_size df_out = pd.DataFrame(bin_center, columns=['time']) if len(df_lon) != 0: binned_int_lon = stats.binned_statistic(df_lon.dl_corr, df_lon.intensity, bins=bin_edges, statistic='mean') binned_bkg_lon = stats.binned_statistic(df_lon.dl_corr, df_lon.bkg, bins=bin_edges, statistic='mean') binned_I0_lon = stats.binned_statistic(df_lon.dl_corr, df_lon.I0, bins=bin_edges, statistic='mean') df_out['intensity_lon'] = binned_int_lon.statistic df_out['bkg_lon'] = binned_bkg_lon.statistic df_out['I0_lon'] = binned_I0_lon.statistic else: print('No laser ON shots') if len(df_loff) != 0: binned_int_loff = stats.binned_statistic(df_loff.dl_corr, df_loff.intensity, bins=bin_edges, statistic='mean') binned_bkg_loff = stats.binned_statistic(df_loff.dl_corr, df_loff.bkg, bins=bin_edges, statistic='mean') binned_I0_loff = stats.binned_statistic(df_loff.dl_corr, df_loff.I0, bins=bin_edges, statistic='mean') df_out['I0_loff'] = binned_I0_loff.statistic df_out['bkg_loff'] = binned_bkg_loff.statistic df_out['intensity_loff'] = binned_int_loff.statistic else: print('No laser OFF shots') """ COM analysis COM analysis loops through the bins and load the images corresponding for each bin. The COM of the averaged images in the bin is taken and written in the df_out dataframe """ binnumber = binned_int_lon.binnumber peakCOM = np.zeros([len(df_out.time), 2]) dataset_name = "/run_" + rname + "/detector_2d_1" ip = ImagesProcessor(facility="SACLA") ip.flatten_results = True ip.set_dataset(dataset_name) ip.add_preprocess("set_roi", args={'roi': roi}) ip.add_analysis("get_mean_std") for ii in range(len(df_out.time)): n = ii + 1 ismember = (binnumber == n) tagList = df.index[ismember] results = ip.analyze_images(fname, n=-1, tags=tagList) if 'images_mean' in results: peakCOM[ii, :] = ndimage.measurements.center_of_mass( results['images_mean']) else: peakCOM[ii, :] = np.NaN del results print(('bin number %s' % n)) df_out['COMx'] = peakCOM[:, 0] df_out['COMy'] = peakCOM[:, 1] return df_out