Ejemplo n.º 1
0
def run(config):
    params = basic_parser.read_and_parse(config, default_parameters)
    params.show()

    ff = h5_io.h5_file_pointer(fname=params.data.filename,
                               what=params.data.data_field,
                               path=params.data.data_path)

    N, Nx, Ny = ff.shape
    print N, Nx, Ny
    imgs = ff.value

    mean_img = np.mean(imgs, axis=0)
    eq_mean = panel_tools.equalize(mean_img)
    #plt.imshow(eq_mean);plt.show()

    mask = mask_tools.quick_mask((Nx, Ny), params.mask.mask_def)
    #plt.imshow(eq_mean*mask);plt.show()

    #
    center = (params.geometry.cxcydz[0][0], params.geometry.cxcydz[0][1])
    distance = params.geometry.cxcydz[0][2]
    wavelength = 12398.0 / params.geometry.energy
    det_panel = geometrized_panel.detector_panel(
        (Nx, Ny), params.geometry.pixel, distance, wavelength, center, 2)
    dxdy = det_panel.mimimum_variance_saxs_center(mean_img, mask, power=0.33)

    saxs_curves = []

    q, mean_polar = det_panel.all_rings(mean_img, mask, dxdy[0], dxdy[1], 129)
    f = params.data.filename + '_q.dat'
    f = open(f, 'w')
    for qq in q:
        print >> f, qq
    f.close()

    for ii in range(N):
        img = imgs[ii, :, :]
        s = det_panel.get_saxs(img, mask, False, dxdy[0], dxdy[1])
        Rg, LnI0 = saxs_tools.rg_estimate(s)
        if not np.isnan(Rg):
            print ii, Rg, LnI0
            saxs_curves.append(s.I)
            q, ring = det_panel.all_rings(img, mask, dxdy[0], dxdy[1], 129)
            write_img(ring, params.data.filename + '_%i.dat' % ii)
Ejemplo n.º 2
0
def run(config, interactive=False):
    params = basic_parser.read_and_parse(config, default_parameters)
    params.show()

    # first get all stuff needed for correlation calculations
    tot_ints = h5_io.h5_file_pointer(fname=params.template.filename,
                                     what=params.template.ints,
                                     path='').value

    # this is an array of indices of frames that we subject to selection.
    # it will change while we remove outliers etc
    master_indices = np.arange(len(tot_ints))

    tot_scores = h5_io.h5_file_pointer(fname=params.template.filename,
                                       what=params.template.scores,
                                       path='').value

    mask = h5_io.h5_file_pointer(fname=params.template.filename,
                                 what=params.template.mask,
                                 path='').value

    template = h5_io.h5_file_pointer(fname=params.template.filename,
                                     what=params.template.template,
                                     path='').value

    offset = h5_io.h5_file_pointer(fname=params.template.filename,
                                   what=params.template.offset,
                                   path='').value

    data_f = h5_io.h5_file_pointer(fname=params.data.filename,
                                   what=params.data.data_field,
                                   path=params.data.data_path)
    N_imgs, Nx, Ny = data_f.shape

    # display the template
    equalized_template = panel_tools.equalize(template)  #+offset )

    #-------------------------------------------------
    # Here we do some selection stuff
    #
    print "---- Starting Selection Procedure ---- \n\n"
    # first make a 2d histogram of the data
    errors = np.geterr()
    np.seterr(invalid='ignore')  # suppress negative logs
    log_ints = np.log(tot_ints)
    np.seterr(invalid=errors['invalid'])  # go back to previous settings

    sel = np.isnan(log_ints)
    log_ints = log_ints[~sel]
    these_scores = tot_scores[~sel]
    master_indices = master_indices[~sel]

    sel = these_scores > 0.5  # if worse than 0.5 we have an issue (I guess)
    log_ints = log_ints[sel]
    these_scores = these_scores[sel]
    master_indices = master_indices[sel]

    hist = plt.hist2d(log_ints, these_scores, bins=[500, 500])
    plt.clf()  # no need for the figure to ghang around
    dens = hist[0]

    # run a median filter to smoothen things a bit
    dens = mf2d(dens, 3)
    log_int_axis = hist[1]
    cc_axis = hist[2]
    log_int_axis = log_int_axis[0:-1]
    cc_axis = cc_axis[0:-1]
    LI, CC = np.meshgrid(log_int_axis, cc_axis)
    ind = np.argmax(dens)

    # normalize the histogram such that the maximum value is 1
    trunc_dens = dens / np.max(dens)
    # select according to specifics
    sel = trunc_dens > params.selection.peak_fraction
    trunc_dens[~sel] = 0
    trunc_dens[sel] = 1.0
    these_log_ints = LI[sel].flatten()
    these_ccs = CC[sel].flatten()

    # calculate which images are within the decent area
    selection_array = in_area(trunc_dens, log_int_axis, cc_axis, log_ints,
                              these_scores)
    leftover_master_indices = master_indices[selection_array]
    print '%i frames left after selection' % len(leftover_master_indices)
    print 'Writing selection array as numpy array, with filename %s' % params.selection.selected_frames
    np.save(params.selection.selected_frames, leftover_master_indices)

    # now that we have selected frames, lets extract rings

    # build a c2_prep object
    # this guy does the polar transformation and static mask polar conversion etc

    c2_prep_obj = correlation.c2_prep(mask, params.geometry)
    p_template = c2_prep_obj.to_polar(template)

    # now build a c2 accumulator
    c2_obj = correlation.correlation_accumulator(c2_prep_obj.geom_def.nq,
                                                 c2_prep_obj.geom_def.nphi,
                                                 c2_prep_obj.final_polar_mask)

    # now we can compute C2's
    for this_image_index in leftover_master_indices:
        print this_image_index
        img = data_f[this_image_index, :, :]
        p_img = c2_prep_obj.to_polar(img)

        c2_obj.update_ac_only(p_img)

    ac = c2_obj.finish_up_ac()
    np.save(params.output.filename, ac)
Ejemplo n.º 3
0
def run(config):
    params = basic_parser.read_and_parse(config, default_parameters)
    params.show()

    f = open(params.selection.filename, 'r')
    index = []
    Rgs = []
    I0s = []
    for line in f:
        keys = line[:-1].split()
        ii = int(keys[0])
        Rg = float(keys[1])
        LnI0 = float(keys[2])
        Rgs.append(Rg)
        I0s.append(LnI0)
        index.append(ii)
    f.close()
    index = np.array(index).astype(np.int)
    scales = np.array(I0s)
    scales = np.exp(scales)
    scales = scales / np.max(scales)

    ff = h5_io.h5_file_pointer(fname=params.data.filename,
                               what=params.data.data_field,
                               path=params.data.data_path)
    imgs = ff[index, :, :]
    N, Nx, Ny = imgs.shape

    mean_img = np.mean(imgs, axis=0)
    eq_mean = panel_tools.equalize(mean_img)
    #plt.imshow(eq_mean);plt.show()

    mask = mask_tools.quick_mask((Nx, Ny), params.mask.mask_def)
    #plt.imshow(eq_mean*mask);plt.show()

    #
    center = (params.geometry.cxcydz[0][0], params.geometry.cxcydz[0][1])
    distance = params.geometry.cxcydz[0][2]
    wavelength = 12398.0 / params.geometry.energy
    det_panel = geometrized_panel.detector_panel(
        (Nx, Ny), params.geometry.pixel, distance, wavelength, center, 0,
        params.geometry.n_q)

    dxdy = det_panel.mimimum_variance_saxs_center(mean_img, mask, power=0.33)

    saxs_curves = []

    q, mean_polar = det_panel.all_rings(mean_img, mask, dxdy[0], dxdy[1],
                                        params.geometry.n_phi)

    # get the mask
    q, mask_polar = det_panel.all_rings(mask, mask, dxdy[0], dxdy[1],
                                        params.geometry.n_phi)
    sel = mask_polar < 0.9
    mask_polar[sel] = 0

    f = params.data.output_base + '_q.dat'
    f = open(f, 'w')
    for qq in q:
        print >> f, qq
    f.close()

    c2_obj = correlation.correlation_accumulator(params.geometry.n_q,
                                                 params.geometry.n_phi,
                                                 mask_polar)

    for ii in range(N):
        print ii, scales[ii]
        img = imgs[ii, :, :]
        s = det_panel.get_saxs(img, mask, False, dxdy[0], dxdy[1])
        saxs_curves.append(s.I)
        q, ring = det_panel.all_rings(img, mask, dxdy[0], dxdy[1],
                                      params.geometry.n_phi)
        scale = 1  #scales[ii]
        write_img(ring, params.data.output_base + '_img_%i.dat' % index[ii])
        c2_obj.update(ring * scale)
    c2_final = c2_obj.finish_up()
    np.save(params.data.output_base + '_c2.npy', c2_final)
Ejemplo n.º 4
0
def run(config, interactive=False):
    print config
    params = basic_parser.read_and_parse( config, default_parameters  )
    params.show()  
    # get the total intensity please
    tot_ints = h5_io.h5_file_pointer( fname = params.data.filename,
                                      what  = params.data.int_field,
                                      path  = params.data.int_path ).value
    # make a histogram please
    histogram, bins = np.histogram( tot_ints, bins=50)
    #   
    peaks = scipy.signal.find_peaks_cwt( histogram, np.arange(3,6) )
    heights = histogram[ peaks ]
    norma = 1.0*np.sum(heights)
    heights = heights / norma
    sel =  heights > params.data.minimum_peak_height
    peaks   = np.array(peaks)[sel]
    heights = heights[sel]
    this_peak = peaks[-1]
    this_intensity = bins[this_peak]
    if params.data.intensity_selection =='auto':
        print "We will focus on images with intensity of %4.3e"%(this_intensity)
        print " +/- %4.3e"%(params.data.delta_i_ratio*this_intensity)

    else:
       that_intensity = this_intensity*1.0
       this_intensity = float(params.data.intensity_selection)
       print "The intensity bin selected by the user is %4.3e"%(this_intensity)
       print " +/- %4.3e"%(params.data.delta_i_ratio*this_intensity)
       print "    The auto-selection would give %4.3e"%that_intensity
       print "    user supplied / auto selection = %4.3e"%(this_intensity/that_intensity)


    delta_i = params.data.delta_i_ratio*this_intensity
    sel = ( tot_ints  > this_intensity-delta_i ) & ( tot_ints < this_intensity+delta_i)
    indices = np.arange(0,len(tot_ints))
    indices = indices[sel]
    M_indices = len( indices )
    print "In total, %i images have been selected for template construction"%M_indices 
    plotters.intensity_histogram( tot_ints, 50, 'Integrated Intensity','Occurance','%s'%params.data.filename, params.data.filename+'_int_hist.png', (this_intensity,delta_i), interactive )



    data_f = h5_io.h5_file_pointer( fname = params.data.filename,
                                    what  = params.data.data_field,
                                    path  = params.data.data_path )
    N_imgs,Nx,Ny = data_f.shape

    median_image = fast_median_calculator.Fast_Median_Image(100,0)
    var_image    = fast_median_calculator.ReservoirSampler( (Nx,Ny), min(500,len(indices)//2) )

    for nn in indices:
        print nn
        img = data_f[nn,:,:]
        median_image.update(img)
        var_image.update(img)
    
    median_img = median_image.current_median()
    sig_image = var_image.sigma()

    n_bins = 1000
    image_histogram, image_bins = np.histogram( median_img.flatten() , bins=n_bins, normed=True  )
    for ii in range(1,n_bins):
        image_histogram[ii] += image_histogram[ii-1]
    bin_centers = image_bins[0:-1] + image_bins[1:]
    bin_centers = bin_centers / 2.0
    equalized_image = np.interp(median_img, bin_centers, image_histogram)
    low_lim = np.percentile( median_img.flatten(), 10.0 )
    high_lim = np.percentile( median_img.flatten(), 85.0 )
   
    np.save(params.output.filename_base+'_equalized',equalized_image) 
    np.save(params.output.filename_base+'_median',median_img)
    np.save(params.output.filename_base+'_sigma',sig_image)

    plotters.plot_equalized_template(equalized_image, params.data.filename+'_eq_template.png', interactive)
Ejemplo n.º 5
0
def run(config, interactive=False):
    print config
    params = basic_parser.read_and_parse(config, default_parameters)
    params.show()
    # get the total intensity please
    tot_ints = h5_io.h5_file_pointer(fname=params.data.filename,
                                     what=params.data.int_field,
                                     path=params.data.int_path).value
    # make a histogram please
    histogram, bins = np.histogram(tot_ints, bins=50)
    #
    peaks = scipy.signal.find_peaks_cwt(histogram, np.arange(3, 6))
    heights = histogram[peaks]
    norma = 1.0 * np.sum(heights)
    heights = heights / norma
    sel = heights > params.data.minimum_peak_height
    peaks = np.array(peaks)[sel]
    heights = heights[sel]
    this_peak = peaks[-1]
    this_intensity = bins[this_peak]
    if params.data.intensity_selection == 'auto':
        print "We will focus on images with intensity of %4.3e" % (
            this_intensity)
        print " +/- %4.3e" % (params.data.delta_i_ratio * this_intensity)

    else:
        that_intensity = this_intensity * 1.0
        this_intensity = float(params.data.intensity_selection)
        print "The intensity bin selected by the user is %4.3e" % (
            this_intensity)
        print " +/- %4.3e" % (params.data.delta_i_ratio * this_intensity)
        print "    The auto-selection would give %4.3e" % that_intensity
        print "    user supplied / auto selection = %4.3e" % (this_intensity /
                                                              that_intensity)

    delta_i = params.data.delta_i_ratio * this_intensity
    sel = (tot_ints > this_intensity - delta_i) & (tot_ints <
                                                   this_intensity + delta_i)
    indices = np.arange(0, len(tot_ints))
    indices = indices[sel]
    M_indices = len(indices)
    print "In total, %i images have been selected for template construction" % M_indices
    plotters.intensity_histogram(tot_ints, 50, 'Integrated Intensity',
                                 'Occurance', '%s' % params.data.filename,
                                 params.data.filename + '_int_hist.png',
                                 (this_intensity, delta_i), interactive)

    data_f = h5_io.h5_file_pointer(fname=params.data.filename,
                                   what=params.data.data_field,
                                   path=params.data.data_path)
    N_imgs, Nx, Ny = data_f.shape

    median_image = fast_median_calculator.Fast_Median_Image(100, 0)
    var_image = fast_median_calculator.ReservoirSampler(
        (Nx, Ny), min(500,
                      len(indices) // 2))

    for nn in indices:
        print 'Processing image ', nn
        img = data_f[nn, :, :]
        median_image.update(img)
        var_image.update(img)

    median_img = median_image.current_median()
    sig_image = var_image.sigma()

    equalized_image = panel_tools.equalize(median_img)
    equalized_sigma = panel_tools.equalize(sig_image)

    #np.save(params.output.filename_base+'_equalized',equalized_image)
    #np.save(params.output.filename_base+'_median',median_img)
    #np.save(params.output.filename_base+'_sigma',sig_image)

    plotters.plot_equalized_template(equalized_image,
                                     params.data.filename + '_eq_median.png',
                                     interactive)
    plotters.plot_equalized_template(equalized_sigma,
                                     params.data.filename + '_eq_sigma.png',
                                     interactive)

    # Let's make a new data filed in the exisiting h5 file where we store the template and its sigma
    template_h5 = h5py.File(params.output.filename, 'w')
    grp = template_h5.create_group('template')
    grp.create_dataset('median', data=median_img, dtype=median_img.dtype)
    grp.create_dataset('sigma', data=sig_image, dtype=sig_image.dtype)
    int_range_sel = np.array(
        [this_intensity - delta_i, this_intensity + delta_i])
    grp.create_dataset('intensity_selection_range',
                       data=int_range_sel,
                       dtype=int_range_sel.dtype)
    # we now need the mask
    mask = mask_tools.quick_mask(sig_image.shape, params.mask.mask_def)
    grp.create_dataset('mask', data=mask, dtype=mask.dtype)

    # now we can do scoring of all images in the file
    z_scores = []
    indices = np.arange(0, len(tot_ints))
    for ii in indices:
        img = data_f[ii]
        # now we need to score these images
        score, zimg = z_score(img, median_img, sig_image, mask, True)
        print ii, '--->', score
        z_scores.append(score)
        #plt.imshow( zimg, vmin=0, vmax=6, interpolation='none'); plt.colorbar(); plt.show()
    z_scores = np.array(z_scores)

    grp.create_dataset('z_scores', data=z_scores, dtype=z_scores.dtype)
    template_h5.close()
Ejemplo n.º 6
0
        sel_y = sel[1]
        tmp[sel_x[0]:sel_x[1], sel_y[0]:sel_y[1]] = 1.0
        asic_masks[key] = tmp
    return asic_masks


def equalize(median_img, M=10000):
    n_bins = M
    image_histogram, image_bins = np.histogram(median_img.flatten(),
                                               bins=n_bins,
                                               normed=True)
    for ii in range(1, n_bins):
        image_histogram[ii] += image_histogram[ii - 1]
    bin_centers = image_bins[0:-1] + image_bins[1:]
    bin_centers = bin_centers / 2.0
    equalized_image = np.interp(median_img, bin_centers, image_histogram)
    return equalized_image


if __name__ == "__main__":
    img = np.load(sys.argv[1])
    plt.imshow(equalize(img), interpolation='none')
    plt.show()
    params = sys.argv[2]
    params = basic_parser.read_and_parse(open(params, 'r'))
    params.show()
    mask = np.load(sys.argv[3])
    img = fix_image(img, params.adjust)
    plt.imshow(equalize(img) * mask, interpolation='none')
    plt.show()
Ejemplo n.º 7
0
def run(config):
    print config
    params = basic_parser.read_and_parse(config, default_parameters)
    params.show()

    # get the total intensity please
    tot_ints = h5_io.h5_file_pointer(fname=params.data.filename,
                                     what=params.data.int_field,
                                     path=params.data.int_path).value
    histogram, bins = np.histogram(tot_ints, bins=50)
    peaks = scipy.signal.find_peaks_cwt(histogram, np.arange(3, 6))
    heights = histogram[peaks]
    norma = 1.0 * np.sum(heights)
    heights = heights / norma
    sel = heights > params.intensity_selection.minimum_peak_height
    peaks = np.array(peaks)[sel]
    heights = heights[sel]
    this_peak = peaks[-1]
    this_intensity = bins[this_peak]
    if params.intensity_selection.intensity_selection == 'auto':
        print "We will focus on images with intensity of %4.3e" % (
            this_intensity)
        print " +/- %4.3e" % (params.intensity_selection.delta_i_ratio *
                              this_intensity)

    else:
        that_intensity = this_intensity * 1.0
        this_intensity = float(params.intensity_selection.intensity_selection)
        print "The intensity bin selected by the user is %4.3e" % (
            this_intensity)
        print " +/- %4.3e" % (params.intensity_selection.delta_i_ratio *
                              this_intensity)
        print "    The auto-selection would give %4.3e" % that_intensity
        print "    user supplied / auto selection = %4.3e" (this_intensity /
                                                            that_intensity)

    delta_i = params.intensity_selection.delta_i_ratio * this_intensity
    int_sel = (tot_ints > this_intensity - delta_i) & (
        tot_ints < this_intensity + delta_i)

    # read in the template match scores
    template_scores = np.load(params.data.template_matches)

    cc_sel = (template_scores > params.cc_selection.cc_low) & (
        template_scores < params.cc_selection.cc_high)

    combo_sel = int_sel & cc_sel
    these_ccs = template_scores[combo_sel]
    these_ints = tot_ints[combo_sel]
    indices = np.arange(0, len(tot_ints))
    indices = indices[combo_sel]

    print "Exporting %i images with a mean score of %4.3f" % (
        len(indices), np.mean(these_ccs))

    # make a new file please
    data_path = params.data.data_path
    data_field = params.data.data_field
    f_out = h5py.File(params.output.filename, 'w')

    # we need provenance fields to be copied

    exp_id = h5_io.h5_file_pointer(fname=params.data.filename,
                                   what='exp_id',
                                   path='provenance').value
    time_points = h5_io.h5_file_pointer(fname=params.data.filename,
                                        what='event_time',
                                        path='provenance').value
    fiducials = h5_io.h5_file_pointer(fname=params.data.filename,
                                      what='event_fiducials',
                                      path='provenance').value
    time_points = time_points[combo_sel]
    fiducials = fiducials[combo_sel]

    prov = f_out.create_group('provenance')
    dt = h5py.special_dtype(vlen=bytes)
    prov.create_dataset('exp_id', data=exp_id, dtype=dt)
    prov.create_dataset('event_time', data=time_points, dtype='uint64')
    prov.create_dataset('event_fiducials', data=fiducials, dtype='uint64')

    # make a field that will contain the data
    data_group = f_out.create_group(data_path)

    export_data = data_group.create_dataset(params.data.data_field,
                                            (len(indices), 1024, 1024),
                                            dtype='float32')

    # get a point to the data
    data_f = h5_io.h5_file_pointer(fname=params.data.filename,
                                   what=params.data.data_field,
                                   path=params.data.data_path)

    for jj, this_index in enumerate(indices):
        print jj, this_index
        export_data[jj, :, :] = data_f[this_index, :, :]

    # I want to export the total intensities as well, and the template scores
    data_group.create_dataset('mask',
                              data=np.load(params.data.mask),
                              dtype='float32')
    summary = f_out.create_group('summary')
    summary.create_dataset('tot_int', data=these_ints, dtype='float32')
    summary.create_dataset('template_scores', data=these_ccs, dtype='float32')

    f_out.close()
Ejemplo n.º 8
0
def run(config):
    params = basic_parser.read_and_parse(config, default_parameters)
    params.show()

    h5_fname = params.output.filename
    exp_name = params.data.experiment
    run_number = str(params.data.run)
    max_events = params.data.index_max
    start_index = params.data.index_start
    event_stride = params.data.index_stride

    exprun = 'exp=' + exp_name + ':run=' + run_number + ':idx'
    ds = psana.DataSource(exprun)
    env = ds.env()
    run = ds.runs().next()
    times = run.times()
    print 'total number of events in XTC file %d' % (len(times))
    max_events = min((len(times) - start_index), max_events)
    print 'We will only export %i of these events' % max_events
    print '   starting at %i with a stride of %i' % (start_index, event_stride)

    stop_index = start_index + max_events
    my_event_indices = np.array(range(start_index, stop_index, event_stride))
    event_list = []

    fiducial_record = []
    time_record = []

    for this_index in my_event_indices:
        event_list.append(times[this_index])
        print(times[this_index].fiducial(), times[this_index].time())
        fiducial_record.append(times[this_index].fiducial())
        time_record.append(times[this_index].time())
    fiducial_record = np.array(fiducial_record)
    time_record = np.array(time_record)

    # (use ds.events().next().keys() to get src and alias
    srcList = ['Camp.0.pnCCD.0', 'Camp.0.pnCCD.1']
    aliasList = ['pnccdFront', 'pnccdBack']
    detList = [psana.Detector(src, env) for src in aliasList]

    # name of h5 file
    grp_name = 'data'
    adu_front_datafield = 'adu_front'
    mask_front_datafield = 'mask_front'
    adu_back_datafield = 'adu_back'
    mask_back_datafield = 'mask_back'

    if os.path.isfile(h5_fname):
        f = h5py.File(h5_fname, 'r+')

        if grp_name in f:
            del f[grp_name]

        if mask_front_datafield in f:
            del f[mask_front_datafield]

        if mask_back_datafield in f:
            del f[mask_back_datafield]

        if mask_front_datafield in f:
            del f[adu_front_datafield]

        if mask_back_datafield in f:
            del f[adu_back_datafield]

        f.close()

    # parallel h5py file processing
    f = h5py.File(h5_fname, 'w')
    grp = f.create_group(grp_name)

    # lets get the comon mode parameters
    front_comm_mode_pars = params.data.common_mode_front
    back_comm_mode_pars = params.data.common_mode_back

    # lets make a provenance field that contains some info on what data this is
    dt = h5py.special_dtype(vlen=bytes)
    prov = f.create_group('provenance')
    prov.create_dataset('exp_id',
                        data='exp=' + exp_name + ':run=' + run_number,
                        dtype=dt)
    prov.create_dataset('common_mode_front',
                        data=front_comm_mode_pars,
                        dtype='uint8')
    prov.create_dataset('common_mode_back',
                        data=back_comm_mode_pars,
                        dtype='uint8')
    prov.create_dataset('event_fiducials',
                        data=fiducial_record,
                        dtype='uint64')
    prov.create_dataset('event_time', data=time_record, dtype='uint64')
    prov.create_dataset('user_comments', data=params.output.comments, dtype=dt)

    adu_front_ds = None
    mask_front = None
    adu_back_ds = None
    mask_back = None

    # make two median objects
    front_median_object = fast_median_calculator.Fast_Median_Image()
    back_median_object = fast_median_calculator.Fast_Median_Image()
    front_sum_int = []
    back_sum_int = []
    faults = 0
    for n, evt in enumerate(event_list):
        print n, evt.time()
        evt = run.event(evt)
        sec, ns, fid, phot_en = getEventID(evt)
        adu_back = np.zeros((1024, 1024))
        adu_front = np.zeros((1024, 1024))
        if sec is not None:
            if n == 0:
                mask_front = panel_tools.slab_data(
                    getMask(evt, detList[0], assemble=False))
                mask_back = panel_tools.slab_data(
                    getMask(evt, detList[1], assemble=False))
            adu_front, ok = getAssembledImg(evt,
                                            detList[0],
                                            cmpars=front_comm_mode_pars,
                                            assemble=False)
            if not ok:
                faults += 1

            adu_front = panel_tools.slab_data(adu_front).astype('float32')

            #adu_back, ok   = getAssembledImg(evt,detList[1],cmpars=back_comm_mode_pars,assemble=False)
            #adu_back       = panel_tools.slab_data(adu_back).astype('float32')
        else:
            faults += 1
        if adu_front_ds is None:
            adu_front_ds = grp.create_dataset(adu_front_datafield,
                                              (len(event_list), 1024, 1024),
                                              dtype='float32')
        if adu_back_ds is None:
            adu_back_ds = grp.create_dataset(adu_back_datafield,
                                             (len(event_list), 1024, 1024),
                                             dtype='float32')
        adu_front_ds[n, :, :] = adu_front
        adu_back_ds[n, :, :] = adu_back
        front_sum_int.append(np.sum(adu_front.flatten()))
        back_sum_int.append(np.sum(adu_back.flatten()))
        # update median estimates
        front_median_object.update(adu_front)
        back_median_object.update(adu_back)

    grp.create_dataset(mask_front_datafield, data=mask_front, dtype='uint8')
    grp.create_dataset(mask_back_datafield, data=mask_back, dtype='uint8')

    median = f.create_group('summary')
    median.create_dataset('front_median',
                          data=front_median_object.current_median(),
                          dtype='float32')
    median.create_dataset('back_median',
                          data=back_median_object.current_median(),
                          dtype='float32')
    median.create_dataset('front_sum_int', data=front_sum_int, dtype='float32')
    median.create_dataset('back_sum_int', data=back_sum_int, dtype='float32')

    f.close()