示例#1
0
df, filenames = ut.analysis.get_data_daq(fname,
                                         daq_labels,
                                         sacla_converter,
                                         t0=0,
                                         selection=sel)

# get laser on/off tags
is_laser_on_tags = df[df.is_laser == 1].index.tolist()
is_laser_off_tags = df[df.is_laser == 0].index.tolist()

# get spectra from Von Hamos, using laser on / off tags
#roi = [[0, 1024], [325, 335]]  # X, Y
ap = ImagesProcessor(facility="SACLA")
ap.add_analysis('get_projection', args={"axis": 1})
ap.add_analysis('get_mean_std')
ap.set_dataset('/run_%s/detector_2d_1' % run)
ap.add_preprocess("set_thr", args={"thr_low": 65})

# get the total spectra
results_on = ap.analyze_images(fname, tags=is_laser_on_tags)
spectrum_on = results_on["get_projection"]["spectra"].sum(axis=0)
results_off = ap.analyze_images(fname, tags=is_laser_off_tags)
spectrum_off = results_off["get_projection"]["spectra"].sum(axis=0)

spectrum_off = spectrum_off / spectrum_off.sum()
spectrum_on = spectrum_on / spectrum_on.sum()

# this is the average image from the Von Hamos
sum_image_on = results_on["get_mean_std"]["images_mean"]

# Plot!
示例#2
0
    an = ImagesProcessor(facility="SACLA")
    # if you want a flat dict as a result
    an.flatten_results = True

    # add analysis
    an.add_analysis("get_projection", args={
        'axis': 1,
        'thr_low': thr,
    })
    an.add_analysis("get_mean_std", args={'thr_low': thr})
    bins = np.arange(-150, 300, 5)
    an.add_analysis("get_histo_counts", args={'bins': bins})
    an.add_analysis(get_line_histos, args={'axis': 0, 'bins': bins})

    # set the dataset
    an.set_dataset(dataset_name)
    # add preprocess steps
    #an.add_preprocess("image_set_roi", args={'roi': roi})
    #an.add_preprocess("image_set_thr", thr_low=thr)

    # run the analysis
    results = an.analyze_images(fname, n=1000)

    # plot
    plt.figure(figsize=(7, 7))
    plt.plot(np.nansum(results["spectra"], axis=0), label="ADU > " + str(thr))
    plt.legend(loc='best')
    #plt.show()

    plt.figure(figsize=(7, 7))
    plt.bar(bins[:-1], results["histo_counts"], log=True, width=5)
示例#3
0
def compute_rixs_spectra(
    dataset_name,
    df,
    thr_low=0,
    thr_hi=999999,
):
    # In principle, a single run can contain *multiple mono settings*, so we need to load data from all the runs, and the group them by mono energy. `Pandas` can help us with that...
    # We load all data from files, place it in a `DataFrame`, and then add some useful derived quantities. At last, we use `tags` as index for the `DataFrame`

    runs = sorted(df.run.unique())
    print(runs)

    # label for ascii output dump
    out_label = "rixs_" + runs[0] + "-" + runs[-1]

    delay = df.delay.unique()
    if len(delay) > 1:
        print(
            "More than one delay settings in the selected run range, exiting")
        sys.exit(-1)

    print("\nAvailable energy settings")
    print(df.photon_mono_energy.unique(), "\n")

    # Now we can run the analysis. For each energy value and each run, a *list of tags* is created,
    # such that events have the same mono energy and they are part of the same run (as each run is in a separated file).
    # For each of these lists, we run the `AnalysisProcessor` and create the required spectra, for laser on and off.

    # the mono energies contained in the files
    energies_list = sorted(df.photon_mono_energy.unique().tolist())
    fnames = [DIR + str(run) + "_roi.h5" for run in runs]

    # The AnalysisProcessor
    an = ImagesProcessor(facility="SACLA")
    # if you want a flat dict as a result
    an.flatten_results = True

    # add analysis
    an.add_analysis("get_projection",
                    args={
                        'axis': 1,
                        'thr_low': thr_low,
                        'thr_hi': thr_hi
                    })
    an.add_analysis("get_mean_std", args={'thr_low': thr_low})
    bins = np.arange(-150, 1000, 5)
    an.add_analysis("get_histo_counts", args={'bins': bins})
    an.set_dataset("/run_%s/%s" % (str(run), dataset_name))

    # run the analysis
    n_events = -1
    spectrum_on = None
    spectrum_off = None

    # multiprocessing import
    from multiprocessing import Pool
    from multiprocessing.pool import ApplyResult

    # initialization of the RIXS maps. Element 0 is laser_on_ element 1 is laser_off
    rixs_map = [
        np.zeros((len(energies_list), 1024)),
        np.zeros((len(energies_list), 1024))
    ]
    rixs_map_std = [
        np.zeros((len(energies_list), 1024)),
        np.zeros((len(energies_list), 1024))
    ]

    n_events = -1
    spectrum = [None, None]
    total_results = {}
    events_per_energy = [{}, {}]

    for i, energy in enumerate(energies_list):
        async_results = []  # list for results

        events_per_energy[0][energy] = 0
        events_per_energy[1][energy] = 0
        energy_masks = []
        # creating the pool
        pool = Pool(processes=8)
        # looping on the runs
        for j, run in enumerate(runs):
            df_run = df[df.run == run]
            energy_masks.append(df_run[df_run.photon_mono_energy == energy])
            # apply the analysis
            async_results.append(
                pool.apply_async(
                    an, (fnames[j], n_events, energy_masks[j].index.values)))

        # closing the pool
        pool.close()

        # waiting for all results
        results = [r.get() for r in async_results]
        print("Got results for energy", energy)

        # producing the laser on/off maps
        for j, run in enumerate(runs):

            if run not in total_results:
                total_results[run] = {}

            if "spectra" not in results[j]:
                continue

            df_run = df[df.run == run]
            energy_mask = energy_masks[j]
            laser_masks = [None, None]
            if n_events != -1:
                laser_masks[0] = energy_mask.is_laser.values[:n_events]
            else:
                laser_masks[0] = energy_mask.is_laser.values
            laser_masks[1] = ~laser_masks[0]

            for laser in [0, 1]:
                norm = np.count_nonzero(
                    ~np.isnan(results[j]["spectra"][laser_masks[laser]][:, 0]))
                events_per_energy[laser][energy] += norm
                spectrum = np.nansum(
                    (results[j]["spectra"][laser_masks[laser]].T /
                     df_run[laser_masks[laser]].I0.values).T,
                    axis=0)
                spectrum_events = np.nansum(
                    results[j]["spectra"][laser_masks[laser]], axis=0)
                rixs_map[laser][energies_list.index(energy)] += spectrum
                rixs_map_std[laser][energies_list.index(
                    energy)] += spectrum_events

            total_results[run][energy] = {}
            total_results[run][energy]["results"] = results[j]
            total_results[run][energy]["laser_on"] = laser_masks[0]

    for laser in [0, 1]:
        for energy in list(events_per_energy[0].keys()):
            rixs_map[laser][energies_list.index(
                energy)] /= events_per_energy[laser][energy]

        rixs_map_std[laser] = rixs_map[laser] / np.sqrt(rixs_map_std[laser])
        np.savetxt(
            "%s_map_%s_%dps.txt" %
            (out_label, "on" if laser == 0 else "off", delay), rixs_map[laser])

    #np.savetxt("%s_map_%dps_energies.txt" % (out_label, delay), sorted(events_per_energy[0].keys()))

    return rixs_map, rixs_map_std, total_results
示例#4
0
ip.add_analysis("get_histo_counts", args={'bins': bins, 'roi': roi})
ip.add_analysis("roi_bkgRoi", args={'roi': roi, 'bkg_roi': bkgRoi})

for run in runs:
    rname = str(run)
    fname = DIR + rname + ".h5"
    print('\nAnalyzing run ' + rname + '\n')
    """
    Analyze images and integrate roi and bkgRoi. Can take a lot of time
    The results are saved in a pickle file in the folder analyzed_runs/imgAna.
    To add prepocess, analysis functions, open the dataAna.imgAna function.
    """
    if imgAna:
        dataset_name = "/run_" + rname + "/detector_2d_1"

        ip.set_dataset(dataset_name, remove_preprocess=False)

        # run the analysis
        results = ip.analyze_images(fname, n=n)

        # plot results
        imgs = results["images_mean"]
        plt.figure(figsize=(8, 8))
        plt.subplot2grid((2, 2), (0, 0), rowspan=2)
        plt.imshow(imgs)
        #     plt.imshow(imgs[bkgRoi[0][0]:bkgRoi[0][1], bkgRoi[1][0]:bkgRoi[1][1]], aspect=0.5,
        #                extent=(bkgRoi[1][0], bkgRoi[1][1], bkgRoi[0][0], bkgRoi[0][1]), interpolation="none")
        plt.subplot2grid((2, 2), (0, 1))
        plt.imshow(imgs[roi[0][0]:roi[0][1], roi[1][0]:roi[1][1]], aspect=0.5,
                   extent=(roi[1][0], roi[1][1], roi[0][1], roi[0][0]), interpolation="none")
        plt.title('ROI')
示例#5
0
ip.add_analysis("get_histo_counts", args={'bins': bins, 'roi': roi})
ip.add_analysis("roi_bkgRoi", args={'roi': roi, 'bkg_roi': bkgRoi})

for run in runs:
    rname = str(run)
    fname = DIR + rname + ".h5"
    print(('\nAnalyzing run ' + rname + '\n'))
    """
    Analyze images and integrate roi and bkgRoi. Can take a lot of time
    The results are saved in a pickle file in the folder analyzed_runs/imgAna.
    To add prepocess, analysis functions, open the dataAna.imgAna function.
    """
    if imgAna:
        dataset_name = "/run_" + rname + "/detector_2d_1"

        ip.set_dataset(dataset_name, remove_preprocess=False)

        # run the analysis
        results = ip.analyze_images(fname, n=n)

        # plot results
        imgs = results["images_mean"]
        plt.figure(figsize=(8, 8))
        plt.subplot2grid((2, 2), (0, 0), rowspan=2)
        plt.imshow(imgs)
        #     plt.imshow(imgs[bkgRoi[0][0]:bkgRoi[0][1], bkgRoi[1][0]:bkgRoi[1][1]], aspect=0.5,
        #                extent=(bkgRoi[1][0], bkgRoi[1][1], bkgRoi[0][0], bkgRoi[0][1]), interpolation="none")
        plt.subplot2grid((2, 2), (0, 1))
        plt.imshow(imgs[roi[0][0]:roi[0][1], roi[1][0]:roi[1][1]], aspect=0.5,
                   extent=(roi[1][0], roi[1][1], roi[0][1], roi[0][0]), interpolation="none")
        plt.title('ROI')
示例#6
0
def bin_tt_COM(df, bin_edges, rname, fname, calibration=0.01, roi=[[235, 270], [500, 540]]):
    """
    Bin data according to the timing tool and perform a center of mass analysis of the roi
    This script is somewhat redundant with the image analysis, as it loops again through all the images.
    """

    # create corrected delay
    df["dl_corr"] = df.delay + calibration * df.tt
    bin_size = bin_edges[1] - bin_edges[0]

    df_xon = df[df.x_status == 1]
    df_lon = df_xon[df.laser_status == 1]
    df_loff = df_xon[df.laser_status == 0]

    bin_center = bin_edges[:-1] + 0.5 * bin_size
    df_out = pd.DataFrame(bin_center, columns=["time"])

    if len(df_lon) != 0:
        binned_int_lon = stats.binned_statistic(df_lon.dl_corr, df_lon.intensity, bins=bin_edges, statistic="mean")
        binned_bkg_lon = stats.binned_statistic(df_lon.dl_corr, df_lon.bkg, bins=bin_edges, statistic="mean")
        binned_I0_lon = stats.binned_statistic(df_lon.dl_corr, df_lon.I0, bins=bin_edges, statistic="mean")
        df_out["intensity_lon"] = binned_int_lon.statistic
        df_out["bkg_lon"] = binned_bkg_lon.statistic
        df_out["I0_lon"] = binned_I0_lon.statistic
    else:
        print ("No laser ON shots")

    if len(df_loff) != 0:
        binned_int_loff = stats.binned_statistic(df_loff.dl_corr, df_loff.intensity, bins=bin_edges, statistic="mean")
        binned_bkg_loff = stats.binned_statistic(df_loff.dl_corr, df_loff.bkg, bins=bin_edges, statistic="mean")
        binned_I0_loff = stats.binned_statistic(df_loff.dl_corr, df_loff.I0, bins=bin_edges, statistic="mean")
        df_out["I0_loff"] = binned_I0_loff.statistic
        df_out["bkg_loff"] = binned_bkg_loff.statistic
        df_out["intensity_loff"] = binned_int_loff.statistic
    else:
        print ("No laser OFF shots")

    """
    COM analysis
        COM analysis loops through the bins and load the images corresponding for each bin.
        The COM of the averaged images in the bin is taken and written in the df_out dataframe
    """
    binnumber = binned_int_lon.binnumber
    peakCOM = np.zeros([len(df_out.time), 2])

    dataset_name = "/run_" + rname + "/detector_2d_1"
    ip = ImagesProcessor(facility="SACLA")

    ip.flatten_results = True
    ip.set_dataset(dataset_name)

    ip.add_preprocess("set_roi", args={"roi": roi})
    ip.add_analysis("get_mean_std")

    for ii in range(len(df_out.time)):
        n = ii + 1
        ismember = binnumber == n

        tagList = df.index[ismember]
        results = ip.analyze_images(fname, n=-1, tags=tagList)

        if "images_mean" in results:
            peakCOM[ii, :] = ndimage.measurements.center_of_mass(results["images_mean"])
        else:
            peakCOM[ii, :] = np.NaN

        del results
        print ("bin number %s" % n)

    df_out["COMx"] = peakCOM[:, 0]
    df_out["COMy"] = peakCOM[:, 1]

    return df_out
示例#7
0
def bin_tt_COM(df,
               bin_edges,
               rname,
               fname,
               calibration=0.01,
               roi=[[235, 270], [500, 540]]):
    """
    Bin data according to the timing tool and perform a center of mass analysis of the roi
    This script is somewhat redundant with the image analysis, as it loops again through all the images.
    """

    # create corrected delay
    df['dl_corr'] = df.delay + calibration * df.tt
    bin_size = bin_edges[1] - bin_edges[0]

    df_xon = df[df.x_status == 1]
    df_lon = df_xon[df.laser_status == 1]
    df_loff = df_xon[df.laser_status == 0]

    bin_center = bin_edges[:-1] + 0.5 * bin_size
    df_out = pd.DataFrame(bin_center, columns=['time'])

    if len(df_lon) != 0:
        binned_int_lon = stats.binned_statistic(df_lon.dl_corr,
                                                df_lon.intensity,
                                                bins=bin_edges,
                                                statistic='mean')
        binned_bkg_lon = stats.binned_statistic(df_lon.dl_corr,
                                                df_lon.bkg,
                                                bins=bin_edges,
                                                statistic='mean')
        binned_I0_lon = stats.binned_statistic(df_lon.dl_corr,
                                               df_lon.I0,
                                               bins=bin_edges,
                                               statistic='mean')
        df_out['intensity_lon'] = binned_int_lon.statistic
        df_out['bkg_lon'] = binned_bkg_lon.statistic
        df_out['I0_lon'] = binned_I0_lon.statistic
    else:
        print('No laser ON shots')

    if len(df_loff) != 0:
        binned_int_loff = stats.binned_statistic(df_loff.dl_corr,
                                                 df_loff.intensity,
                                                 bins=bin_edges,
                                                 statistic='mean')
        binned_bkg_loff = stats.binned_statistic(df_loff.dl_corr,
                                                 df_loff.bkg,
                                                 bins=bin_edges,
                                                 statistic='mean')
        binned_I0_loff = stats.binned_statistic(df_loff.dl_corr,
                                                df_loff.I0,
                                                bins=bin_edges,
                                                statistic='mean')
        df_out['I0_loff'] = binned_I0_loff.statistic
        df_out['bkg_loff'] = binned_bkg_loff.statistic
        df_out['intensity_loff'] = binned_int_loff.statistic
    else:
        print('No laser OFF shots')
    """
    COM analysis
        COM analysis loops through the bins and load the images corresponding for each bin.
        The COM of the averaged images in the bin is taken and written in the df_out dataframe
    """
    binnumber = binned_int_lon.binnumber
    peakCOM = np.zeros([len(df_out.time), 2])

    dataset_name = "/run_" + rname + "/detector_2d_1"
    ip = ImagesProcessor(facility="SACLA")

    ip.flatten_results = True
    ip.set_dataset(dataset_name)

    ip.add_preprocess("set_roi", args={'roi': roi})
    ip.add_analysis("get_mean_std")

    for ii in range(len(df_out.time)):
        n = ii + 1
        ismember = (binnumber == n)

        tagList = df.index[ismember]
        results = ip.analyze_images(fname, n=-1, tags=tagList)

        if 'images_mean' in results:
            peakCOM[ii, :] = ndimage.measurements.center_of_mass(
                results['images_mean'])
        else:
            peakCOM[ii, :] = np.NaN

        del results
        print(('bin number %s' % n))

    df_out['COMx'] = peakCOM[:, 0]
    df_out['COMy'] = peakCOM[:, 1]

    return df_out