def run(config): params = basic_parser.read_and_parse(config, default_parameters) params.show() ff = h5_io.h5_file_pointer(fname=params.data.filename, what=params.data.data_field, path=params.data.data_path) N, Nx, Ny = ff.shape print N, Nx, Ny imgs = ff.value mean_img = np.mean(imgs, axis=0) eq_mean = panel_tools.equalize(mean_img) #plt.imshow(eq_mean);plt.show() mask = mask_tools.quick_mask((Nx, Ny), params.mask.mask_def) #plt.imshow(eq_mean*mask);plt.show() # center = (params.geometry.cxcydz[0][0], params.geometry.cxcydz[0][1]) distance = params.geometry.cxcydz[0][2] wavelength = 12398.0 / params.geometry.energy det_panel = geometrized_panel.detector_panel( (Nx, Ny), params.geometry.pixel, distance, wavelength, center, 2) dxdy = det_panel.mimimum_variance_saxs_center(mean_img, mask, power=0.33) saxs_curves = [] q, mean_polar = det_panel.all_rings(mean_img, mask, dxdy[0], dxdy[1], 129) f = params.data.filename + '_q.dat' f = open(f, 'w') for qq in q: print >> f, qq f.close() for ii in range(N): img = imgs[ii, :, :] s = det_panel.get_saxs(img, mask, False, dxdy[0], dxdy[1]) Rg, LnI0 = saxs_tools.rg_estimate(s) if not np.isnan(Rg): print ii, Rg, LnI0 saxs_curves.append(s.I) q, ring = det_panel.all_rings(img, mask, dxdy[0], dxdy[1], 129) write_img(ring, params.data.filename + '_%i.dat' % ii)
def run(config, interactive=False): params = basic_parser.read_and_parse(config, default_parameters) params.show() # first get all stuff needed for correlation calculations tot_ints = h5_io.h5_file_pointer(fname=params.template.filename, what=params.template.ints, path='').value # this is an array of indices of frames that we subject to selection. # it will change while we remove outliers etc master_indices = np.arange(len(tot_ints)) tot_scores = h5_io.h5_file_pointer(fname=params.template.filename, what=params.template.scores, path='').value mask = h5_io.h5_file_pointer(fname=params.template.filename, what=params.template.mask, path='').value template = h5_io.h5_file_pointer(fname=params.template.filename, what=params.template.template, path='').value offset = h5_io.h5_file_pointer(fname=params.template.filename, what=params.template.offset, path='').value data_f = h5_io.h5_file_pointer(fname=params.data.filename, what=params.data.data_field, path=params.data.data_path) N_imgs, Nx, Ny = data_f.shape # display the template equalized_template = panel_tools.equalize(template) #+offset ) #------------------------------------------------- # Here we do some selection stuff # print "---- Starting Selection Procedure ---- \n\n" # first make a 2d histogram of the data errors = np.geterr() np.seterr(invalid='ignore') # suppress negative logs log_ints = np.log(tot_ints) np.seterr(invalid=errors['invalid']) # go back to previous settings sel = np.isnan(log_ints) log_ints = log_ints[~sel] these_scores = tot_scores[~sel] master_indices = master_indices[~sel] sel = these_scores > 0.5 # if worse than 0.5 we have an issue (I guess) log_ints = log_ints[sel] these_scores = these_scores[sel] master_indices = master_indices[sel] hist = plt.hist2d(log_ints, these_scores, bins=[500, 500]) plt.clf() # no need for the figure to ghang around dens = hist[0] # run a median filter to smoothen things a bit dens = mf2d(dens, 3) log_int_axis = hist[1] cc_axis = hist[2] log_int_axis = log_int_axis[0:-1] cc_axis = cc_axis[0:-1] LI, CC = np.meshgrid(log_int_axis, cc_axis) ind = np.argmax(dens) # normalize the histogram such that the maximum value is 1 trunc_dens = dens / np.max(dens) # select according to specifics sel = trunc_dens > params.selection.peak_fraction trunc_dens[~sel] = 0 trunc_dens[sel] = 1.0 these_log_ints = LI[sel].flatten() these_ccs = CC[sel].flatten() # calculate which images are within the decent area selection_array = in_area(trunc_dens, log_int_axis, cc_axis, log_ints, these_scores) leftover_master_indices = master_indices[selection_array] print '%i frames left after selection' % len(leftover_master_indices) print 'Writing selection array as numpy array, with filename %s' % params.selection.selected_frames np.save(params.selection.selected_frames, leftover_master_indices) # now that we have selected frames, lets extract rings # build a c2_prep object # this guy does the polar transformation and static mask polar conversion etc c2_prep_obj = correlation.c2_prep(mask, params.geometry) p_template = c2_prep_obj.to_polar(template) # now build a c2 accumulator c2_obj = correlation.correlation_accumulator(c2_prep_obj.geom_def.nq, c2_prep_obj.geom_def.nphi, c2_prep_obj.final_polar_mask) # now we can compute C2's for this_image_index in leftover_master_indices: print this_image_index img = data_f[this_image_index, :, :] p_img = c2_prep_obj.to_polar(img) c2_obj.update_ac_only(p_img) ac = c2_obj.finish_up_ac() np.save(params.output.filename, ac)
def run(config): params = basic_parser.read_and_parse(config, default_parameters) params.show() f = open(params.selection.filename, 'r') index = [] Rgs = [] I0s = [] for line in f: keys = line[:-1].split() ii = int(keys[0]) Rg = float(keys[1]) LnI0 = float(keys[2]) Rgs.append(Rg) I0s.append(LnI0) index.append(ii) f.close() index = np.array(index).astype(np.int) scales = np.array(I0s) scales = np.exp(scales) scales = scales / np.max(scales) ff = h5_io.h5_file_pointer(fname=params.data.filename, what=params.data.data_field, path=params.data.data_path) imgs = ff[index, :, :] N, Nx, Ny = imgs.shape mean_img = np.mean(imgs, axis=0) eq_mean = panel_tools.equalize(mean_img) #plt.imshow(eq_mean);plt.show() mask = mask_tools.quick_mask((Nx, Ny), params.mask.mask_def) #plt.imshow(eq_mean*mask);plt.show() # center = (params.geometry.cxcydz[0][0], params.geometry.cxcydz[0][1]) distance = params.geometry.cxcydz[0][2] wavelength = 12398.0 / params.geometry.energy det_panel = geometrized_panel.detector_panel( (Nx, Ny), params.geometry.pixel, distance, wavelength, center, 0, params.geometry.n_q) dxdy = det_panel.mimimum_variance_saxs_center(mean_img, mask, power=0.33) saxs_curves = [] q, mean_polar = det_panel.all_rings(mean_img, mask, dxdy[0], dxdy[1], params.geometry.n_phi) # get the mask q, mask_polar = det_panel.all_rings(mask, mask, dxdy[0], dxdy[1], params.geometry.n_phi) sel = mask_polar < 0.9 mask_polar[sel] = 0 f = params.data.output_base + '_q.dat' f = open(f, 'w') for qq in q: print >> f, qq f.close() c2_obj = correlation.correlation_accumulator(params.geometry.n_q, params.geometry.n_phi, mask_polar) for ii in range(N): print ii, scales[ii] img = imgs[ii, :, :] s = det_panel.get_saxs(img, mask, False, dxdy[0], dxdy[1]) saxs_curves.append(s.I) q, ring = det_panel.all_rings(img, mask, dxdy[0], dxdy[1], params.geometry.n_phi) scale = 1 #scales[ii] write_img(ring, params.data.output_base + '_img_%i.dat' % index[ii]) c2_obj.update(ring * scale) c2_final = c2_obj.finish_up() np.save(params.data.output_base + '_c2.npy', c2_final)
def run(config, interactive=False): print config params = basic_parser.read_and_parse( config, default_parameters ) params.show() # get the total intensity please tot_ints = h5_io.h5_file_pointer( fname = params.data.filename, what = params.data.int_field, path = params.data.int_path ).value # make a histogram please histogram, bins = np.histogram( tot_ints, bins=50) # peaks = scipy.signal.find_peaks_cwt( histogram, np.arange(3,6) ) heights = histogram[ peaks ] norma = 1.0*np.sum(heights) heights = heights / norma sel = heights > params.data.minimum_peak_height peaks = np.array(peaks)[sel] heights = heights[sel] this_peak = peaks[-1] this_intensity = bins[this_peak] if params.data.intensity_selection =='auto': print "We will focus on images with intensity of %4.3e"%(this_intensity) print " +/- %4.3e"%(params.data.delta_i_ratio*this_intensity) else: that_intensity = this_intensity*1.0 this_intensity = float(params.data.intensity_selection) print "The intensity bin selected by the user is %4.3e"%(this_intensity) print " +/- %4.3e"%(params.data.delta_i_ratio*this_intensity) print " The auto-selection would give %4.3e"%that_intensity print " user supplied / auto selection = %4.3e"%(this_intensity/that_intensity) delta_i = params.data.delta_i_ratio*this_intensity sel = ( tot_ints > this_intensity-delta_i ) & ( tot_ints < this_intensity+delta_i) indices = np.arange(0,len(tot_ints)) indices = indices[sel] M_indices = len( indices ) print "In total, %i images have been selected for template construction"%M_indices plotters.intensity_histogram( tot_ints, 50, 'Integrated Intensity','Occurance','%s'%params.data.filename, params.data.filename+'_int_hist.png', (this_intensity,delta_i), interactive ) data_f = h5_io.h5_file_pointer( fname = params.data.filename, what = params.data.data_field, path = params.data.data_path ) N_imgs,Nx,Ny = data_f.shape median_image = fast_median_calculator.Fast_Median_Image(100,0) var_image = fast_median_calculator.ReservoirSampler( (Nx,Ny), min(500,len(indices)//2) ) for nn in indices: print nn img = data_f[nn,:,:] median_image.update(img) var_image.update(img) median_img = median_image.current_median() sig_image = var_image.sigma() n_bins = 1000 image_histogram, image_bins = np.histogram( median_img.flatten() , bins=n_bins, normed=True ) for ii in range(1,n_bins): image_histogram[ii] += image_histogram[ii-1] bin_centers = image_bins[0:-1] + image_bins[1:] bin_centers = bin_centers / 2.0 equalized_image = np.interp(median_img, bin_centers, image_histogram) low_lim = np.percentile( median_img.flatten(), 10.0 ) high_lim = np.percentile( median_img.flatten(), 85.0 ) np.save(params.output.filename_base+'_equalized',equalized_image) np.save(params.output.filename_base+'_median',median_img) np.save(params.output.filename_base+'_sigma',sig_image) plotters.plot_equalized_template(equalized_image, params.data.filename+'_eq_template.png', interactive)
def run(config, interactive=False): print config params = basic_parser.read_and_parse(config, default_parameters) params.show() # get the total intensity please tot_ints = h5_io.h5_file_pointer(fname=params.data.filename, what=params.data.int_field, path=params.data.int_path).value # make a histogram please histogram, bins = np.histogram(tot_ints, bins=50) # peaks = scipy.signal.find_peaks_cwt(histogram, np.arange(3, 6)) heights = histogram[peaks] norma = 1.0 * np.sum(heights) heights = heights / norma sel = heights > params.data.minimum_peak_height peaks = np.array(peaks)[sel] heights = heights[sel] this_peak = peaks[-1] this_intensity = bins[this_peak] if params.data.intensity_selection == 'auto': print "We will focus on images with intensity of %4.3e" % ( this_intensity) print " +/- %4.3e" % (params.data.delta_i_ratio * this_intensity) else: that_intensity = this_intensity * 1.0 this_intensity = float(params.data.intensity_selection) print "The intensity bin selected by the user is %4.3e" % ( this_intensity) print " +/- %4.3e" % (params.data.delta_i_ratio * this_intensity) print " The auto-selection would give %4.3e" % that_intensity print " user supplied / auto selection = %4.3e" % (this_intensity / that_intensity) delta_i = params.data.delta_i_ratio * this_intensity sel = (tot_ints > this_intensity - delta_i) & (tot_ints < this_intensity + delta_i) indices = np.arange(0, len(tot_ints)) indices = indices[sel] M_indices = len(indices) print "In total, %i images have been selected for template construction" % M_indices plotters.intensity_histogram(tot_ints, 50, 'Integrated Intensity', 'Occurance', '%s' % params.data.filename, params.data.filename + '_int_hist.png', (this_intensity, delta_i), interactive) data_f = h5_io.h5_file_pointer(fname=params.data.filename, what=params.data.data_field, path=params.data.data_path) N_imgs, Nx, Ny = data_f.shape median_image = fast_median_calculator.Fast_Median_Image(100, 0) var_image = fast_median_calculator.ReservoirSampler( (Nx, Ny), min(500, len(indices) // 2)) for nn in indices: print 'Processing image ', nn img = data_f[nn, :, :] median_image.update(img) var_image.update(img) median_img = median_image.current_median() sig_image = var_image.sigma() equalized_image = panel_tools.equalize(median_img) equalized_sigma = panel_tools.equalize(sig_image) #np.save(params.output.filename_base+'_equalized',equalized_image) #np.save(params.output.filename_base+'_median',median_img) #np.save(params.output.filename_base+'_sigma',sig_image) plotters.plot_equalized_template(equalized_image, params.data.filename + '_eq_median.png', interactive) plotters.plot_equalized_template(equalized_sigma, params.data.filename + '_eq_sigma.png', interactive) # Let's make a new data filed in the exisiting h5 file where we store the template and its sigma template_h5 = h5py.File(params.output.filename, 'w') grp = template_h5.create_group('template') grp.create_dataset('median', data=median_img, dtype=median_img.dtype) grp.create_dataset('sigma', data=sig_image, dtype=sig_image.dtype) int_range_sel = np.array( [this_intensity - delta_i, this_intensity + delta_i]) grp.create_dataset('intensity_selection_range', data=int_range_sel, dtype=int_range_sel.dtype) # we now need the mask mask = mask_tools.quick_mask(sig_image.shape, params.mask.mask_def) grp.create_dataset('mask', data=mask, dtype=mask.dtype) # now we can do scoring of all images in the file z_scores = [] indices = np.arange(0, len(tot_ints)) for ii in indices: img = data_f[ii] # now we need to score these images score, zimg = z_score(img, median_img, sig_image, mask, True) print ii, '--->', score z_scores.append(score) #plt.imshow( zimg, vmin=0, vmax=6, interpolation='none'); plt.colorbar(); plt.show() z_scores = np.array(z_scores) grp.create_dataset('z_scores', data=z_scores, dtype=z_scores.dtype) template_h5.close()
def run(config): print config params = basic_parser.read_and_parse(config, default_parameters) params.show() # get the total intensity please tot_ints = h5_io.h5_file_pointer(fname=params.data.filename, what=params.data.int_field, path=params.data.int_path).value histogram, bins = np.histogram(tot_ints, bins=50) peaks = scipy.signal.find_peaks_cwt(histogram, np.arange(3, 6)) heights = histogram[peaks] norma = 1.0 * np.sum(heights) heights = heights / norma sel = heights > params.intensity_selection.minimum_peak_height peaks = np.array(peaks)[sel] heights = heights[sel] this_peak = peaks[-1] this_intensity = bins[this_peak] if params.intensity_selection.intensity_selection == 'auto': print "We will focus on images with intensity of %4.3e" % ( this_intensity) print " +/- %4.3e" % (params.intensity_selection.delta_i_ratio * this_intensity) else: that_intensity = this_intensity * 1.0 this_intensity = float(params.intensity_selection.intensity_selection) print "The intensity bin selected by the user is %4.3e" % ( this_intensity) print " +/- %4.3e" % (params.intensity_selection.delta_i_ratio * this_intensity) print " The auto-selection would give %4.3e" % that_intensity print " user supplied / auto selection = %4.3e" (this_intensity / that_intensity) delta_i = params.intensity_selection.delta_i_ratio * this_intensity int_sel = (tot_ints > this_intensity - delta_i) & ( tot_ints < this_intensity + delta_i) # read in the template match scores template_scores = np.load(params.data.template_matches) cc_sel = (template_scores > params.cc_selection.cc_low) & ( template_scores < params.cc_selection.cc_high) combo_sel = int_sel & cc_sel these_ccs = template_scores[combo_sel] these_ints = tot_ints[combo_sel] indices = np.arange(0, len(tot_ints)) indices = indices[combo_sel] print "Exporting %i images with a mean score of %4.3f" % ( len(indices), np.mean(these_ccs)) # make a new file please data_path = params.data.data_path data_field = params.data.data_field f_out = h5py.File(params.output.filename, 'w') # we need provenance fields to be copied exp_id = h5_io.h5_file_pointer(fname=params.data.filename, what='exp_id', path='provenance').value time_points = h5_io.h5_file_pointer(fname=params.data.filename, what='event_time', path='provenance').value fiducials = h5_io.h5_file_pointer(fname=params.data.filename, what='event_fiducials', path='provenance').value time_points = time_points[combo_sel] fiducials = fiducials[combo_sel] prov = f_out.create_group('provenance') dt = h5py.special_dtype(vlen=bytes) prov.create_dataset('exp_id', data=exp_id, dtype=dt) prov.create_dataset('event_time', data=time_points, dtype='uint64') prov.create_dataset('event_fiducials', data=fiducials, dtype='uint64') # make a field that will contain the data data_group = f_out.create_group(data_path) export_data = data_group.create_dataset(params.data.data_field, (len(indices), 1024, 1024), dtype='float32') # get a point to the data data_f = h5_io.h5_file_pointer(fname=params.data.filename, what=params.data.data_field, path=params.data.data_path) for jj, this_index in enumerate(indices): print jj, this_index export_data[jj, :, :] = data_f[this_index, :, :] # I want to export the total intensities as well, and the template scores data_group.create_dataset('mask', data=np.load(params.data.mask), dtype='float32') summary = f_out.create_group('summary') summary.create_dataset('tot_int', data=these_ints, dtype='float32') summary.create_dataset('template_scores', data=these_ccs, dtype='float32') f_out.close()