def __run_script__(fns): global Plot4,Plot5,Plot6 from Reduction import reduction,AddCifMetadata from os.path import basename from os.path import join import time #how fast are we going? from Formats import output elapsed = time.clock() print 'Started working at %f' % (time.clock()-elapsed) df.datasets.clear() # check input if (fns is None or len(fns) == 0) : print 'no input datasets' return # check if input needs to be normalized if norm_apply.value: # norm_ref is the source of information for normalisation # norm_tar is the value norm_ref should become, # by multiplication. If 'auto', the maximum value of norm_ref # for the first dataset is used, otherwise any number may be entered. norm_ref = str(norm_reference.value) norm_tar = str(norm_target.value).lower() # check if normalization target needs to be determined if len(norm_tar) == 0: norm_ref = None norm_tar = None print 'WARNING: no reference for normalization was specified' elif norm_tar == 'auto': # set flag norm_tar = -1 # iterate through input datasets location = norm_table[norm_ref] print 'utilized reference value for "' + norm_ref + '" is:', norm_tar # use provided reference value else: norm_tar = float(norm_tar) else: norm_ref = None norm_tar = None # check if eff-map needs to be loaded if eff_apply.value: if not eff_map.value: eff = None print 'WARNING: no eff-map was specified' else: eff_map_canonical = str(eff_map.value) if eff_map_canonical[0:5] != 'file:': eff_map_canonical = 'file:' + eff_map_canonical if not eff_map_canonical in eff_map_cache: eff_map_cache[eff_map_canonical] = reduction.read_efficiency_cif(eff_map_canonical) else: print 'Found in cache ' + `eff_map_canonical` eff = eff_map_cache[eff_map_canonical] else: eff = None # check if vertical tube correction needs to be loaded if vtc_apply.value: if not vtc_file.value: vtc = None print 'WARNING: no vtc-file was specified' else: vtc = str(vtc_file.value) else: vtc = None # iterate through input datasets # note that the normalisation target (an arbitrary number) is set by # the first dataset unless it has already been specified. for fn in fns: # load dataset ds = df[fn] # extract basic metadata ds = reduction.AddCifMetadata.extract_metadata(ds) # remove redundant dimensions rs = ds.get_reduced() rs.copy_cif_metadata(ds) # check if normalized is required if norm_ref: ds,norm_tar = reduction.applyNormalization(rs, reference=norm_table[norm_ref], target=norm_tar) print 'Finished normalisation at %f' % (time.clock()-elapsed) # check if vertical tube correction is required if vtc: ds = reduction.getVerticallyCorrected(ds, vtc) print 'Finished vertical offset correction at %f' % (time.clock()-elapsed) # check if efficiency correction is required if eff: ds = reduction.getEfficiencyCorrected(ds, eff) print 'Finished efficiency correction at %f' % (time.clock()-elapsed) # check if we are recalculating gain if regain_apply.value: b = ds.intg(axis=1).get_reduced() #reduce dimension ignore = regain_ignore.value #Ignore first two tubes # Determine pixels per tube interval tube_pos = ds.axes[-1] tubesep = abs(tube_pos[0]-tube_pos[-1])/(len(tube_pos)-1) tube_steps = ds.axes[0] bin_size = abs(tube_steps[0]-tube_steps[-1])/(len(tube_steps)-1) pixel_step = int(round(tubesep/bin_size)) bin_size = tubesep/pixel_step print '%f tube separation, %d steps before overlap, ideal binsize %f' % (tubesep,pixel_step,bin_size) # Reshape with individual sections summed c = b.reshape([b.shape[0]/pixel_step,pixel_step,b.shape[-1]]) print `b.shape` + "->" + `c.shape` # sum the individual unoverlapped sections d = c.intg(axis=1) e = d.transpose() # we skip the first tubes' data as it is all zero # Get an initial average to start with bottom = vig_lower_boundary.value top = vig_upper_boundary.value resummed = ds[:,bottom:top,:] resummed = resummed.intg(axis=1).get_reduced() first_gain = array.ones(len(b.transpose())-ignore) first_ave,x,first_var = overlap.apply_gain(resummed.transpose()[ignore:,:],1.0/resummed.transpose().var[ignore:,:],pixel_step,first_gain, calc_var=True) if regain_unit_weights.value is True: weights = array.ones_like(e[ignore:]) else: weights = 1.0/e[ignore:].var q= iterate_data(e[ignore:],weights,pixel_step=1,iter_no=int(regain_iterno.value)) # Now we actually apply the vertical limits requested f,x, varf = overlap.apply_gain(resummed.transpose()[ignore:,:],1.0/resummed.transpose().var[ignore:,:],pixel_step,q[0],calc_var=True) # Get error for full dataset esds = overlap.calc_error_new(b.transpose()[ignore:,:],f,q[0],pixel_step) f = Dataset(f) f.title = "After scaling" f.var = varf # construct the ideal axes axis = arange(len(f)) f.axes[0] = axis*bin_size + ds.axes[0][0] + ignore*pixel_step*bin_size f.copy_cif_metadata(ds) print `f.shape` + ' ' + `x.shape` Plot1.set_dataset(f) first_ave = Dataset(first_ave) first_ave.var = first_var first_ave.title = "Before scaling" first_ave.axes[0] = f.axes[0] Plot1.add_dataset(Dataset(first_ave)) Plot4.set_dataset(Dataset(q[4])) fg = Dataset(q[0]) fg.var = esds Plot5.set_dataset(fg) # show old esds fgold = Dataset(q[0]) fgold.var = q[5] Plot5.add_dataset(fgold) residual_map = Dataset(q[3]) try: Plot6.set_dataset(residual_map) except: pass print 'Finished regain calculation at %f' % (time.clock() - elapsed) # Output datasets filename_base = join(str(out_folder.value),str(output_stem.value) + basename(str(fn))[:-7]) if output_cif.value: output.write_cif_data(f,filename_base) if output_xyd.value: output.write_xyd_data(f,filename_base) if output_fxye.value: output.write_fxye_data(f,filename_base) print 'Finished writing data at %f' % (time.clock()-elapsed)
def do_overlap(ds,iterno,algo="FordRollett"): import time from Reduction import overlap b = ds.intg(axis=1).get_reduced() # Determine pixels per tube interval tube_pos = ds.axes[-1] tubesep = abs(tube_pos[0]-tube_pos[-1])/(len(tube_pos)-1) tube_steps = ds.axes[0] bin_size = abs(tube_steps[0]-tube_steps[-1])/(len(tube_steps)-1) pixel_step = int(round(tubesep/bin_size)) print '%d steps before overlap' % pixel_step # Reshape with individual sections summed c = b.reshape([b.shape[0]/pixel_step,pixel_step,b.shape[-1]]) print `b.shape` + "->" + `c.shape` # sum the individual unoverlapped sections d = c.intg(axis=1) e = d.transpose() # we skip the first two tubes' data as it is all zero gain,dd,interim_result,residual_map,chisquared,oldesds,first_ave = \ iterate_data(e[3:],pixel_step=1,iter_no=iterno) print 'Have gains at %f' % time.clock() # calculate errors based on full dataset # First get a full model model,wd,mv = overlap.apply_gain(b.transpose()[3:],b.var.transpose()[3:],pixel_step,gain) esds = overlap.calc_error_new(b.transpose()[3:],model,gain,pixel_step) print 'Have full model and errors at %f' % time.clock() """ The following lines are intended to improve efficiency by a factor of about 10, by using Arrays instead of datasets and avoiding the [] operator, which currently involves too many lines of Python code per invocation. Note that ArraySectionIter.next() is also code heavy, so calculate the sections ourselves.""" final_gains = array.ones(ds.shape[-1]) final_gains[2:] = gain final_errors = array.zeros(ds.shape[-1]) final_errors[2:] = esds ds_as_array = ds.storage rs = array.zeros_like(ds) rs_var = array.zeros_like(ds) gain_iter = final_gains.__iter__() gain_var_iter = final_errors.__iter__() print 'RS shape: ' + `rs.shape` print 'Gain shape: ' + `final_gains.shape` target_shape = [rs.shape[0],rs.shape[1],1] for atubeno in range(len(final_gains)): rta = rs.get_section([0,0,atubeno],target_shape) dta = ds_as_array.get_section([0,0,atubeno],target_shape) fgn = gain_iter.next() fgvn = gain_var_iter.next() rta += dta * fgn rtav = rs_var.get_section([0,0,atubeno],target_shape) # sigma^2(a*b) = a^2 sigma^2(b) + b^2 sigma^2(a) rtav += ds.var.storage.get_section([0,0,atubeno],target_shape)*fgn*fgn + \ fgvn * dta**2 # Now build up the important information cs = copy(ds) cs.storage = rs cs.var = rs_var cs.copy_cif_metadata(ds) # prepare info for CIF file import math detno = map(lambda a:"%d" % a,range(len(final_gains))) gain_as_strings = map(lambda a:"%.4f" % a,final_gains) gain_esd = map(lambda a:"%.4f" % math.sqrt(a),final_errors) cs.harvest_metadata("CIF").AddCifItem(( (("_[local]_detector_number","_[local]_refined_gain","_[local]_refined_gain_esd"),), ((detno,gain_as_strings,gain_esd),)) ) return cs,gain,esds,chisquared